summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-08-02 17:35:32 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-08-02 17:35:32 -0400
commitcac43c138723f39170dbf2989d04225ec1f58b6e (patch)
treecb72f4cdc3cf53e685841c25af5ab1d79eb46a75 /3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch
parentgrsecurity-3.1-4.1.3-201507281943 (diff)
downloadhardened-patchset-cac43c138723f39170dbf2989d04225ec1f58b6e.tar.gz
hardened-patchset-cac43c138723f39170dbf2989d04225ec1f58b6e.tar.bz2
hardened-patchset-cac43c138723f39170dbf2989d04225ec1f58b6e.zip
grsecurity-{3.2.69,3.14.48,4.1.3}-20150802090120150802
Diffstat (limited to '3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch')
-rw-r--r--3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch134248
1 files changed, 0 insertions, 134248 deletions
diff --git a/3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch b/3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch
deleted file mode 100644
index 11686d8..0000000
--- a/3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch
+++ /dev/null
@@ -1,134248 +0,0 @@
-diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index dfa6fc6..ccbfbf3 100644
---- a/Documentation/dontdiff
-+++ b/Documentation/dontdiff
-@@ -2,9 +2,11 @@
- *.aux
- *.bin
- *.bz2
-+*.c.[012]*.*
- *.cis
- *.cpio
- *.csp
-+*.dbg
- *.dsp
- *.dvi
- *.elf
-@@ -14,6 +16,7 @@
- *.gcov
- *.gen.S
- *.gif
-+*.gmo
- *.grep
- *.grp
- *.gz
-@@ -48,14 +51,17 @@
- *.tab.h
- *.tex
- *.ver
-+*.vim
- *.xml
- *.xz
- *_MODULES
-+*_reg_safe.h
- *_vga16.c
- *~
- \#*#
- *.9
--.*
-+.[^g]*
-+.gen*
- .*.d
- .mm
- 53c700_d.h
-@@ -70,9 +76,11 @@ Kerntypes
- Module.markers
- Module.symvers
- PENDING
-+PERF*
- SCCS
- System.map*
- TAGS
-+TRACEEVENT-CFLAGS
- aconf
- af_names.h
- aic7*reg.h*
-@@ -81,6 +89,7 @@ aic7*seq.h*
- aicasm
- aicdb.h*
- altivec*.c
-+ashldi3.S
- asm-offsets.h
- asm_offsets.h
- autoconf.h*
-@@ -93,19 +102,24 @@ bounds.h
- bsetup
- btfixupprep
- build
-+builtin-policy.h
- bvmlinux
- bzImage*
- capability_names.h
- capflags.c
- classlist.h*
-+clut_vga16.c
-+common-cmds.h
- comp*.log
- compile.h*
- conf
- config
- config-*
- config_data.h*
-+config.c
- config.mak
- config.mak.autogen
-+config.tmp
- conmakehash
- consolemap_deftbl.c*
- cpustr.h
-@@ -116,9 +130,11 @@ devlist.h*
- dnotify_test
- docproc
- dslm
-+dtc-lexer.lex.c
- elf2ecoff
- elfconfig.h*
- evergreen_reg_safe.h
-+exception_policy.conf
- fixdep
- flask.h
- fore200e_mkfirm
-@@ -126,12 +142,15 @@ fore200e_pca_fw.c*
- gconf
- gconf.glade.h
- gen-devlist
-+gen-kdb_cmds.c
- gen_crc32table
- gen_init_cpio
- generated
- genheaders
- genksyms
- *_gray256.c
-+hash
-+hid-example
- hpet_example
- hugepage-mmap
- hugepage-shm
-@@ -146,7 +165,7 @@ int32.c
- int4.c
- int8.c
- kallsyms
--kconfig
-+kern_constants.h
- keywords.c
- ksym.c*
- ksym.h*
-@@ -154,7 +173,7 @@ kxgettext
- lkc_defs.h
- lex.c
- lex.*.c
--linux
-+lib1funcs.S
- logo_*.c
- logo_*_clut224.c
- logo_*_mono.c
-@@ -166,14 +185,15 @@ machtypes.h
- map
- map_hugetlb
- maui_boot.h
--media
- mconf
-+mdp
- miboot*
- mk_elfconfig
- mkboot
- mkbugboot
- mkcpustr
- mkdep
-+mkpiggy
- mkprep
- mkregtable
- mktables
-@@ -208,7 +228,10 @@ r200_reg_safe.h
- r300_reg_safe.h
- r420_reg_safe.h
- r600_reg_safe.h
-+randomize_layout_hash.h
-+randomize_layout_seed.h
- recordmcount
-+regdb.c
- relocs
- rlim_names.h
- rn50_reg_safe.h
-@@ -218,7 +241,10 @@ series
- setup
- setup.bin
- setup.elf
-+signing_key*
-+size_overflow_hash.h
- sImage
-+slabinfo
- sm_tbl*
- split-include
- syscalltab.h
-@@ -229,6 +255,7 @@ tftpboot.img
- timeconst.h
- times.h*
- trix_boot.h
-+user_constants.h
- utsrelease.h*
- vdso-syms.lds
- vdso.lds
-@@ -246,7 +273,9 @@ vmlinux
- vmlinux-*
- vmlinux.aout
- vmlinux.bin.all
-+vmlinux.bin.bz2
- vmlinux.lds
-+vmlinux.relocs
- vmlinuz
- voffset.h
- vsyscall.lds
-@@ -254,9 +283,12 @@ vsyscall_32.lds
- wanxlfw.inc
- uImage
- unifdef
-+utsrelease.h
- wakeup.bin
- wakeup.elf
- wakeup.lds
-+x509*
- zImage*
- zconf.hash.c
-+zconf.lex.c
- zoffset.h
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index f0001eb..8f5703b 100644
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -859,6 +859,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- gpt [EFI] Forces disk with valid GPT signature but
- invalid Protective MBR to be treated as GPT.
-
-+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
-+ ignore grsecurity's /proc restrictions
-+
-+ grsec_sysfs_restrict= Format: 0 | 1
-+ Default: 1
-+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
-+
- hashdist= [KNL,NUMA] Large hashes allocated during boot
- are distributed across NUMA nodes. Defaults on
- for 64-bit NUMA, off otherwise.
-@@ -1963,6 +1970,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- the specified number of seconds. This is to be used if
- your oopses keep scrolling off the screen.
-
-+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
-+ virtualization environments that don't cope well with the
-+ expand down segment used by UDEREF on X86-32 or the frequent
-+ page table updates on X86-64.
-+
-+ pax_sanitize_slab=
-+ Format: { 0 | 1 | off | fast | full }
-+ Options '0' and '1' are only provided for backward
-+ compatibility, 'off' or 'fast' should be used instead.
-+ 0|off : disable slab object sanitization
-+ 1|fast: enable slab object sanitization excluding
-+ whitelisted slabs (default)
-+ full : sanitize all slabs, even the whitelisted ones
-+
-+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
-+
-+ pax_extra_latent_entropy
-+ Enable a very simple form of latent entropy extraction
-+ from the first 4GB of memory as the bootmem allocator
-+ passes the memory pages to the buddy allocator.
-+
- pcbit= [HW,ISDN]
-
- pcd. [PARIDE]
-diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
-index 88fd7f5..b318a78 100644
---- a/Documentation/sysctl/fs.txt
-+++ b/Documentation/sysctl/fs.txt
-@@ -163,16 +163,22 @@ This value can be used to query and set the core dump mode for setuid
- or otherwise protected/tainted binaries. The modes are
-
- 0 - (default) - traditional behaviour. Any process which has changed
-- privilege levels or is execute only will not be dumped
-+ privilege levels or is execute only will not be dumped.
- 1 - (debug) - all processes dump core when possible. The core dump is
- owned by the current user and no security is applied. This is
- intended for system debugging situations only. Ptrace is unchecked.
-+ This is insecure as it allows regular users to examine the memory
-+ contents of privileged processes.
- 2 - (suidsafe) - any binary which normally would not be dumped is dumped
-- readable by root only. This allows the end user to remove
-- such a dump but not access it directly. For security reasons
-- core dumps in this mode will not overwrite one another or
-- other files. This mode is appropriate when administrators are
-- attempting to debug problems in a normal environment.
-+ anyway, but only if the "core_pattern" kernel sysctl is set to
-+ either a pipe handler or a fully qualified path. (For more details
-+ on this limitation, see CVE-2006-2451.) This mode is appropriate
-+ when administrators are attempting to debug problems in a normal
-+ environment, and either have a core dump pipe handler that knows
-+ to treat privileged core dumps with care, or specific directory
-+ defined for catching core dumps. If a core dump happens without
-+ a pipe handler or fully qualifid path, a message will be emitted
-+ to syslog warning about the lack of a correct setting.
-
- ==============================================================
-
-diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
-index 2a68089..b3300e1 100644
---- a/Documentation/sysctl/kernel.txt
-+++ b/Documentation/sysctl/kernel.txt
-@@ -36,6 +36,7 @@ show up in /proc/sys/kernel:
- - kptr_restrict
- - kstack_depth_to_print [ X86 only ]
- - l2cr [ PPC only ]
-+- modify_ldt [ X86 only ]
- - modprobe ==> Documentation/debugging-modules.txt
- - modules_disabled
- - msgmax
-@@ -318,6 +319,20 @@ This flag controls the L2 cache of G3 processor boards. If
-
- ==============================================================
-
-+modify_ldt: (X86 only)
-+
-+Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
-+(Local Descriptor Table) may be needed to run a 16-bit or segmented code
-+such as Dosemu or Wine. This is done via a system call which is not needed
-+to run portable applications, and which can sometimes be abused to exploit
-+some weaknesses of the architecture, opening new vulnerabilities.
-+
-+This sysctl allows one to increase the system's security by disabling the
-+system call, or to restore compatibility with specific applications when it
-+was already disabled.
-+
-+==============================================================
-+
- modules_disabled:
-
- A toggle value indicating if modules are allowed to be loaded
-diff --git a/Makefile b/Makefile
-index 8071888..b024b7b 100644
---- a/Makefile
-+++ b/Makefile
-@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
-
- HOSTCC = gcc
- HOSTCXX = g++
--HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
--HOSTCXXFLAGS = -O2
-+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks -std=gnu89
-+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
-+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
-
- # Decide whether to build built-in, modular, or both.
- # Normally, just do built-in.
-@@ -312,9 +313,15 @@ endif
- # If the user is running make -s (silent mode), suppress echoing of
- # commands
-
-+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
-+ quiet=silent_
-+endif
-+else # make-3.8x
- ifneq ($(findstring s,$(MAKEFLAGS)),)
- quiet=silent_
- endif
-+endif
-
- export quiet Q KBUILD_VERBOSE
-
-@@ -407,8 +414,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
- # Rules shared between *config targets and build targets
-
- # Basic helpers built in scripts/
--PHONY += scripts_basic
--scripts_basic:
-+PHONY += scripts_basic gcc-plugins
-+scripts_basic: gcc-plugins
- $(Q)$(MAKE) $(build)=scripts/basic
- $(Q)rm -f .tmp_quiet_recordmcount
-
-@@ -564,6 +571,75 @@ else
- KBUILD_CFLAGS += -O2
- endif
-
-+# Tell gcc to never replace conditional load with a non-conditional one
-+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
-+
-+ifndef DISABLE_PAX_PLUGINS
-+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
-+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
-+else
-+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
-+endif
-+ifneq ($(PLUGINCC),)
-+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
-+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
-+endif
-+ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
-+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
-+endif
-+ifdef CONFIG_KALLOCSTAT_PLUGIN
-+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
-+endif
-+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
-+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
-+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
-+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
-+endif
-+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
-+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
-+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
-+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
-+endif
-+endif
-+ifdef CONFIG_CHECKER_PLUGIN
-+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
-+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
-+endif
-+endif
-+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
-+ifdef CONFIG_PAX_SIZE_OVERFLOW
-+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
-+endif
-+ifdef CONFIG_PAX_LATENT_ENTROPY
-+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
-+endif
-+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
-+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
-+endif
-+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
-+export PLUGINCC CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
-+ifeq ($(KBUILD_EXTMOD),)
-+gcc-plugins:
-+ $(Q)$(MAKE) $(build)=tools/gcc
-+else
-+gcc-plugins: ;
-+endif
-+else
-+gcc-plugins:
-+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
-+else
-+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
-+endif
-+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
-+endif
-+endif
-+
- include $(srctree)/arch/$(SRCARCH)/Makefile
-
- ifneq ($(CONFIG_FRAME_WARN),0)
-@@ -596,7 +672,7 @@ KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
-
- ifdef CONFIG_DEBUG_INFO
- KBUILD_CFLAGS += -g
--KBUILD_AFLAGS += -gdwarf-2
-+KBUILD_AFLAGS += -Wa,--gdwarf-2
- endif
-
- ifdef CONFIG_DEBUG_INFO_REDUCED
-@@ -710,7 +786,7 @@ export mod_strip_cmd
-
-
- ifeq ($(KBUILD_EXTMOD),)
--core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
-
- vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
- $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -934,6 +1010,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
-
- # The actual objects are generated when descending,
- # make sure no implicit rule kicks in
-+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
-
- # Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -943,7 +1021,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
- # Error messages still appears in the original language
-
- PHONY += $(vmlinux-dirs)
--$(vmlinux-dirs): prepare scripts
-+$(vmlinux-dirs): gcc-plugins prepare scripts
- $(Q)$(MAKE) $(build)=$@
-
- # Store (new) KERNELRELASE string in include/config/kernel.release
-@@ -983,10 +1061,13 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
-
- archprepare: archscripts prepare1 scripts_basic
-
-+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- prepare0: archprepare FORCE
- $(Q)$(MAKE) $(build)=.
-
- # All the preparing..
-+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
- prepare: prepare0
-
- # Generate some files
-@@ -1091,6 +1172,8 @@ all: modules
- # using awk while concatenating to the final file.
-
- PHONY += modules
-+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
- $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
- @$(kecho) ' Building modules, stage 2.';
-@@ -1106,7 +1189,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
-
- # Target to prepare building external modules
- PHONY += modules_prepare
--modules_prepare: prepare scripts
-+modules_prepare: gcc-plugins prepare scripts
-
- # Target to install modules
- PHONY += modules_install
-@@ -1130,7 +1213,7 @@ _modinst_:
- # boot a modules.dep even before / is mounted read-write. However the
- # boot script depmod is the master version.
- PHONY += _modinst_post
--_modinst_post: _modinst_
-+_modinst_post: include/config/kernel.release _modinst_
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst
- $(call cmd,depmod)
-
-@@ -1166,6 +1249,9 @@ MRPROPER_DIRS += include/config usr/include include/generated \
- arch/*/include/generated
- MRPROPER_FILES += .config .config.old .version .old_version \
- include/linux/version.h \
-+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
-+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
-+ tools/gcc/randomize_layout_seed.h \
- Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
-
- # clean - Delete most, but leave enough to build external modules
-@@ -1202,7 +1288,7 @@ distclean: mrproper
- @find $(srctree) $(RCS_FIND_IGNORE) \
- \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
- -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-- -o -name '.*.rej' \
-+ -o -name '.*.rej' -o -name '*.so' \
- -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
- -type f -print | xargs rm -f
-
-@@ -1256,6 +1342,7 @@ help:
- @echo ' gtags - Generate GNU GLOBAL index'
- @echo ' kernelrelease - Output the release version string'
- @echo ' kernelversion - Output the version stored in Makefile'
-+ @echo ' image_name - Output the image name'
- @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
- echo ' (default: $(INSTALL_HDR_PATH))'; \
- echo ''
-@@ -1363,6 +1450,8 @@ PHONY += $(module-dirs) modules
- $(module-dirs): crmodverdir $(objtree)/Module.symvers
- $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
-
-+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- modules: $(module-dirs)
- @$(kecho) ' Building modules, stage 2.';
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1449,7 +1538,7 @@ export_report:
- endif #ifeq ($(config-targets),1)
- endif #ifeq ($(mixed-targets),1)
-
--PHONY += checkstack kernelrelease kernelversion
-+PHONY += checkstack kernelrelease kernelversion image_name
-
- # UML needs a little special treatment here. It wants to use the host
- # toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone
-@@ -1470,6 +1559,18 @@ kernelrelease:
- kernelversion:
- @echo $(KERNELVERSION)
-
-+image_name:
-+ @echo $(KBUILD_IMAGE)
-+
-+# Clear a bunch of variables before executing the submake
-+tools/: FORCE
-+ $(Q)mkdir -p $(objtree)/tools
-+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
-+
-+tools/%: FORCE
-+ $(Q)mkdir -p $(objtree)/tools
-+ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
-+
- # Single targets
- # ---------------------------------------------------------------------------
- # Single targets are compatible with:
-@@ -1489,17 +1590,21 @@ else
- target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
- endif
-
--%.s: %.c prepare scripts FORCE
-+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.s: %.c gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.i: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.o: %.c prepare scripts FORCE
-+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.o: %.c gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.lst: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.s: %.S prepare scripts FORCE
-+%.s: %.S gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.o: %.S prepare scripts FORCE
-+%.o: %.S gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.symtypes: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1509,11 +1614,15 @@ endif
- $(cmd_crmodverdir)
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir)
--%/: prepare scripts FORCE
-+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%/: gcc-plugins prepare scripts FORCE
- $(cmd_crmodverdir)
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir)
--%.ko: prepare scripts FORCE
-+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.ko: gcc-plugins prepare scripts FORCE
- $(cmd_crmodverdir)
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir) $(@:.ko=.o)
-diff --git a/arch/Kconfig b/arch/Kconfig
-index 4b0669c..7389b35 100644
---- a/arch/Kconfig
-+++ b/arch/Kconfig
-@@ -181,4 +181,28 @@ config HAVE_RCU_TABLE_FREE
- config ARCH_HAVE_NMI_SAFE_CMPXCHG
- bool
-
-+config HAVE_ARCH_SECCOMP_FILTER
-+ bool
-+ help
-+ An arch should select this symbol if it provides all of these things:
-+ - syscall_get_arch()
-+ - syscall_get_arguments()
-+ - syscall_rollback()
-+ - syscall_set_return_value()
-+ - SIGSYS siginfo_t support
-+ - uses __secure_computing_int() or secure_computing()
-+ - secure_computing is called from a ptrace_event()-safe context
-+ - secure_computing return value is checked and a return value of -1
-+ results in the system call being skipped immediately.
-+
-+config SECCOMP_FILTER
-+ def_bool y
-+ depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
-+ help
-+ Enable tasks to build secure computing environments defined
-+ in terms of Berkeley Packet Filter programs which implement
-+ task-defined system call filtering polices.
-+
-+ See Documentation/prctl/seccomp_filter.txt for details.
-+
- source "kernel/gcov/Kconfig"
-diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
-index 6f1aca7..fa956e0 100644
---- a/arch/alpha/include/asm/atomic.h
-+++ b/arch/alpha/include/asm/atomic.h
-@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
- #define atomic_dec(v) atomic_sub(1,(v))
- #define atomic64_dec(v) atomic64_sub(1,(v))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #define smp_mb__before_atomic_dec() smp_mb()
- #define smp_mb__after_atomic_dec() smp_mb()
- #define smp_mb__before_atomic_inc() smp_mb()
-diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
-index ad368a9..fbe0f25 100644
---- a/arch/alpha/include/asm/cache.h
-+++ b/arch/alpha/include/asm/cache.h
-@@ -4,19 +4,19 @@
- #ifndef __ARCH_ALPHA_CACHE_H
- #define __ARCH_ALPHA_CACHE_H
-
-+#include <linux/const.h>
-
- /* Bytes per L1 (data) cache line. */
- #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
--# define L1_CACHE_BYTES 64
- # define L1_CACHE_SHIFT 6
- #else
- /* Both EV4 and EV5 are write-through, read-allocate,
- direct-mapped, physical.
- */
--# define L1_CACHE_BYTES 32
- # define L1_CACHE_SHIFT 5
- #endif
-
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
- #endif
-diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
-index da5449e..7418343 100644
---- a/arch/alpha/include/asm/elf.h
-+++ b/arch/alpha/include/asm/elf.h
-@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
-+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
-+#endif
-+
- /* $0 is set by ld.so to a pointer to a function which might be
- registered using atexit. This provides a mean for the dynamic
- linker to call DT_FINI functions for shared libraries that have
-diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
-index bc2a0da..8ad11ee 100644
---- a/arch/alpha/include/asm/pgalloc.h
-+++ b/arch/alpha/include/asm/pgalloc.h
-@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
- pgd_set(pgd, pmd);
- }
-
-+static inline void
-+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
-+{
-+ pgd_populate(mm, pgd, pmd);
-+}
-+
- extern pgd_t *pgd_alloc(struct mm_struct *mm);
-
- static inline void
-diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
-index de98a73..bd4f1f8 100644
---- a/arch/alpha/include/asm/pgtable.h
-+++ b/arch/alpha/include/asm/pgtable.h
-@@ -101,6 +101,17 @@ struct vm_area_struct;
- #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
- #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
- #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
-+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
-
- #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
-diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
-index 2fd00b7..cfd5069 100644
---- a/arch/alpha/kernel/module.c
-+++ b/arch/alpha/kernel/module.c
-@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
-
- /* The small sections were sorted to the end of the segment.
- The following should definitely cover them. */
-- gp = (u64)me->module_core + me->core_size - 0x8000;
-+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
- got = sechdrs[me->arch.gotsecindex].sh_addr;
-
- for (i = 0; i < n; i++) {
-diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index 01e8715..6a5a03b 100644
---- a/arch/alpha/kernel/osf_sys.c
-+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1138,16 +1138,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
- generic version except that we know how to honor ADDR_LIMIT_32BIT. */
-
- static unsigned long
--arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
-- unsigned long limit)
-+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
-+ unsigned long limit, unsigned long flags)
- {
- struct vm_area_struct *vma = find_vma(current->mm, addr);
--
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
- while (1) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (limit - len < addr)
- return -ENOMEM;
-- if (!vma || addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- addr = vma->vm_end;
- vma = vma->vm_next;
-@@ -1183,20 +1183,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- merely specific addresses, but regions of memory -- perhaps
- this feature should be incorporated into all ports? */
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
-- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
- if (addr != (unsigned long) -ENOMEM)
- return addr;
- }
-
- /* Next, try allocating at TASK_UNMAPPED_BASE. */
-- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
-- len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
-+
- if (addr != (unsigned long) -ENOMEM)
- return addr;
-
- /* Finally, try allocating in low memory. */
-- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
-
- return addr;
- }
-diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
-index e576b91..9b43be9 100644
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
- __reload_thread(pcb);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int ldah, ldq, jmp;
-+
-+ err = get_user(ldah, (unsigned int *)regs->pc);
-+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
-+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
-+ jmp == 0x6BFB0000U)
-+ {
-+ unsigned long r27, addr;
-+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
-+
-+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+ err = get_user(r27, (unsigned long *)addr);
-+ if (err)
-+ break;
-+
-+ regs->r27 = r27;
-+ regs->pc = r27;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ldah, lda, br;
-+
-+ err = get_user(ldah, (unsigned int *)regs->pc);
-+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
-+ err |= get_user(br, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
-+ (br & 0xFFE00000U) == 0xC3E00000U)
-+ {
-+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
-+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
-+
-+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation */
-+ unsigned int br;
-+
-+ err = get_user(br, (unsigned int *)regs->pc);
-+
-+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
-+ unsigned int br2, ldq, nop, jmp;
-+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
-+
-+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+ err = get_user(br2, (unsigned int *)addr);
-+ err |= get_user(ldq, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ err |= get_user(jmp, (unsigned int *)(addr+12));
-+ err |= get_user(resolver, (unsigned long *)(addr+16));
-+
-+ if (err)
-+ break;
-+
-+ if (br2 == 0xC3600000U &&
-+ ldq == 0xA77B000CU &&
-+ nop == 0x47FF041FU &&
-+ jmp == 0x6B7B0000U)
-+ {
-+ regs->r28 = regs->pc+4;
-+ regs->r27 = addr+16;
-+ regs->pc = resolver;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-
- /*
- * This routine handles page faults. It determines the address,
-@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
- good_area:
- si_code = SEGV_ACCERR;
- if (cause < 0) {
-- if (!(vma->vm_flags & VM_EXEC))
-+ if (!(vma->vm_flags & VM_EXEC)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
-+ goto bad_area;
-+
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
-+ do_group_exit(SIGKILL);
-+#else
- goto bad_area;
-+#endif
-+
-+ }
- } else if (!cause) {
- /* Allow reads even for write-only mappings */
- if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 082bd36..da47cc5 100644
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -2013,6 +2013,7 @@ config XIP_PHYS_ADDR
- config KEXEC
- bool "Kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
-index b7c5d5d..4b0c4ed 100644
---- a/arch/arm/include/asm/assembler.h
-+++ b/arch/arm/include/asm/assembler.h
-@@ -231,7 +231,7 @@
- */
- #ifdef CONFIG_THUMB2_KERNEL
-
-- .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
-+ .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
- 9999:
- .if \inc == 1
- \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
-@@ -271,7 +271,7 @@
-
- #else /* !CONFIG_THUMB2_KERNEL */
-
-- .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
-+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
- .rept \rept
- 9999:
- .if \inc == 1
-diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 86976d0..15420e6 100644
---- a/arch/arm/include/asm/atomic.h
-+++ b/arch/arm/include/asm/atomic.h
-@@ -15,6 +15,10 @@
- #include <linux/types.h>
- #include <asm/system.h>
-
-+#ifdef CONFIG_GENERIC_ATOMIC64
-+#include <asm-generic/atomic64.h>
-+#endif
-+
- #define ATOMIC_INIT(i) { (i) }
-
- #ifdef __KERNEL__
-@@ -25,7 +29,15 @@
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
- #define atomic_read(v) (*(volatile int *)&(v)->counter)
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return *(const volatile int *)&v->counter;
-+}
- #define atomic_set(v,i) (((v)->counter) = (i))
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
-
- #if __LINUX_ARM_ARCH__ >= 6
-
-@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
- int result;
-
- __asm__ __volatile__("@ atomic_add\n"
-+"1: ldrex %1, [%3]\n"
-+" adds %0, %1, %4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strex %1, %0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "Ir" (i)
-+ : "cc");
-+}
-+
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ __asm__ __volatile__("@ atomic_add_unchecked\n"
- "1: ldrex %0, [%3]\n"
- " add %0, %0, %4\n"
- " strex %1, %0, [%3]\n"
-@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
- smp_mb();
-
- __asm__ __volatile__("@ atomic_add_return\n"
-+"1: ldrex %1, [%3]\n"
-+" adds %0, %1, %4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strex %1, %0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "Ir" (i)
-+ : "cc");
-+
-+ smp_mb();
-+
-+ return result;
-+}
-+
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ smp_mb();
-+
-+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
- "1: ldrex %0, [%3]\n"
- " add %0, %0, %4\n"
- " strex %1, %0, [%3]\n"
-@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
- int result;
-
- __asm__ __volatile__("@ atomic_sub\n"
-+"1: ldrex %1, [%3]\n"
-+" subs %0, %1, %4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strex %1, %0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "Ir" (i)
-+ : "cc");
-+}
-+
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ unsigned long tmp;
-+ int result;
-+
-+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
- "1: ldrex %0, [%3]\n"
- " sub %0, %0, %4\n"
- " strex %1, %0, [%3]\n"
-@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
- smp_mb();
-
- __asm__ __volatile__("@ atomic_sub_return\n"
--"1: ldrex %0, [%3]\n"
--" sub %0, %0, %4\n"
-+"1: ldrex %1, [%3]\n"
-+" subs %0, %1, %4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
- " strex %1, %0, [%3]\n"
- " teq %1, #0\n"
- " bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
- return oldval;
- }
-
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
-+{
-+ unsigned long oldval, res;
-+
-+ smp_mb();
-+
-+ do {
-+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
-+ "ldrex %1, [%3]\n"
-+ "mov %0, #0\n"
-+ "teq %1, %4\n"
-+ "strexeq %0, %5, [%3]\n"
-+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
-+ : "cc");
-+ } while (res);
-+
-+ smp_mb();
-+
-+ return oldval;
-+}
-+
- static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
- {
- unsigned long tmp, tmp2;
-@@ -165,7 +307,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
-
- return val;
- }
-+
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ return atomic_add_return(i, v);
-+}
-+
- #define atomic_add(i, v) (void) atomic_add_return(i, v)
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ (void) atomic_add_return(i, v);
-+}
-
- static inline int atomic_sub_return(int i, atomic_t *v)
- {
-@@ -180,6 +332,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
- return val;
- }
- #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ (void) atomic_sub_return(i, v);
-+}
-
- static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
- {
-@@ -195,6 +351,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
- return ret;
- }
-
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
-+{
-+ return atomic_cmpxchg(v, old, new);
-+}
-+
- static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
- {
- unsigned long flags;
-@@ -207,6 +368,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
- #endif /* __LINUX_ARM_ARCH__ */
-
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
-@@ -219,11 +384,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- }
-
- #define atomic_inc(v) atomic_add(1, v)
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
-+}
- #define atomic_dec(v) atomic_sub(1, v)
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_sub_unchecked(1, v);
-+}
-
- #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v) == 0;
-+}
- #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
- #define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
- #define atomic_dec_return(v) (atomic_sub_return(1, v))
- #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-@@ -239,6 +420,14 @@ typedef struct {
- u64 __aligned(8) counter;
- } atomic64_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ u64 __aligned(8) counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
-+
- #define ATOMIC64_INIT(i) { (i) }
-
- static inline u64 atomic64_read(atomic64_t *v)
-@@ -254,6 +443,19 @@ static inline u64 atomic64_read(atomic64_t *v)
- return result;
- }
-
-+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
-+{
-+ u64 result;
-+
-+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
-+" ldrexd %0, %H0, [%1]"
-+ : "=&r" (result)
-+ : "r" (&v->counter), "Qo" (v->counter)
-+ );
-+
-+ return result;
-+}
-+
- static inline void atomic64_set(atomic64_t *v, u64 i)
- {
- u64 tmp;
-@@ -268,6 +470,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
- : "cc");
- }
-
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
-+{
-+ u64 tmp;
-+
-+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
-+"1: ldrexd %0, %H0, [%2]\n"
-+" strexd %0, %3, %H3, [%2]\n"
-+" teq %0, #0\n"
-+" bne 1b"
-+ : "=&r" (tmp), "=Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ : "cc");
-+}
-+
- static inline void atomic64_add(u64 i, atomic64_t *v)
- {
- u64 result;
-@@ -276,6 +492,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
- __asm__ __volatile__("@ atomic64_add\n"
- "1: ldrexd %0, %H0, [%3]\n"
- " adds %0, %0, %4\n"
-+" adcs %H0, %H0, %H4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strexd %1, %0, %H0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ : "cc");
-+}
-+
-+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
-+{
-+ u64 result;
-+ unsigned long tmp;
-+
-+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
-+"1: ldrexd %0, %H0, [%3]\n"
-+" adds %0, %0, %4\n"
- " adc %H0, %H0, %H4\n"
- " strexd %1, %0, %H0, [%3]\n"
- " teq %1, #0\n"
-@@ -287,12 +533,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
-
- static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
- {
-- u64 result;
-- unsigned long tmp;
-+ u64 result, tmp;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic64_add_return\n"
-+"1: ldrexd %1, %H1, [%3]\n"
-+" adds %0, %1, %4\n"
-+" adcs %H0, %H1, %H4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %0, %1\n"
-+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strexd %1, %0, %H0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ : "cc");
-+
-+ smp_mb();
-+
-+ return result;
-+}
-+
-+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
-+{
-+ u64 result;
-+ unsigned long tmp;
-+
-+ smp_mb();
-+
-+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
- "1: ldrexd %0, %H0, [%3]\n"
- " adds %0, %0, %4\n"
- " adc %H0, %H0, %H4\n"
-@@ -316,6 +599,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
- __asm__ __volatile__("@ atomic64_sub\n"
- "1: ldrexd %0, %H0, [%3]\n"
- " subs %0, %0, %4\n"
-+" sbcs %H0, %H0, %H4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
-+" strexd %1, %0, %H0, [%3]\n"
-+" teq %1, #0\n"
-+" bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ : "cc");
-+}
-+
-+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
-+{
-+ u64 result;
-+ unsigned long tmp;
-+
-+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
-+"1: ldrexd %0, %H0, [%3]\n"
-+" subs %0, %0, %4\n"
- " sbc %H0, %H0, %H4\n"
- " strexd %1, %0, %H0, [%3]\n"
- " teq %1, #0\n"
-@@ -327,18 +640,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
-
- static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
- {
-- u64 result;
-- unsigned long tmp;
-+ u64 result, tmp;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic64_sub_return\n"
--"1: ldrexd %0, %H0, [%3]\n"
--" subs %0, %0, %4\n"
--" sbc %H0, %H0, %H4\n"
-+"1: ldrexd %1, %H1, [%3]\n"
-+" subs %0, %1, %4\n"
-+" sbcs %H0, %H1, %H4\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %0, %1\n"
-+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
- " strexd %1, %0, %H0, [%3]\n"
- " teq %1, #0\n"
- " bne 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
-@@ -372,6 +699,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
- return oldval;
- }
-
-+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
-+{
-+ u64 oldval;
-+ unsigned long res;
-+
-+ smp_mb();
-+
-+ do {
-+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
-+ "ldrexd %1, %H1, [%3]\n"
-+ "mov %0, #0\n"
-+ "teq %1, %4\n"
-+ "teqeq %H1, %H4\n"
-+ "strexdeq %0, %5, %H5, [%3]"
-+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "r" (old), "r" (new)
-+ : "cc");
-+ } while (res);
-+
-+ smp_mb();
-+
-+ return oldval;
-+}
-+
- static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
- {
- u64 result;
-@@ -395,21 +746,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
-
- static inline u64 atomic64_dec_if_positive(atomic64_t *v)
- {
-- u64 result;
-- unsigned long tmp;
-+ u64 result, tmp;
-
- smp_mb();
-
- __asm__ __volatile__("@ atomic64_dec_if_positive\n"
--"1: ldrexd %0, %H0, [%3]\n"
--" subs %0, %0, #1\n"
--" sbc %H0, %H0, #0\n"
-+"1: ldrexd %1, %H1, [%3]\n"
-+" subs %0, %1, #1\n"
-+" sbcs %H0, %H1, #0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %0, %1\n"
-+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
- " teq %H0, #0\n"
--" bmi 2f\n"
-+" bmi 4f\n"
- " strexd %1, %0, %H0, [%3]\n"
- " teq %1, #0\n"
- " bne 1b\n"
--"2:"
-+"4:\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter)
- : "cc");
-@@ -432,13 +796,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
- " teq %0, %5\n"
- " teqeq %H0, %H5\n"
- " moveq %1, #0\n"
--" beq 2f\n"
-+" beq 4f\n"
- " adds %0, %0, %6\n"
--" adc %H0, %H0, %H6\n"
-+" adcs %H0, %H0, %H6\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
-+"3:\n"
-+#endif
-+
- " strexd %2, %0, %H0, [%4]\n"
- " teq %2, #0\n"
- " bne 1b\n"
--"2:"
-+"4:\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (u), "r" (a)
- : "cc");
-@@ -451,10 +827,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
-
- #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
- #define atomic64_inc(v) atomic64_add(1LL, (v))
-+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
- #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
- #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
- #define atomic64_dec(v) atomic64_sub(1LL, (v))
-+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
- #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
- #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
-index 75fe66b..2255c86 100644
---- a/arch/arm/include/asm/cache.h
-+++ b/arch/arm/include/asm/cache.h
-@@ -4,8 +4,10 @@
- #ifndef __ASMARM_CACHE_H
- #define __ASMARM_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index b1e0e07..071fe7c 100644
---- a/arch/arm/include/asm/cacheflush.h
-+++ b/arch/arm/include/asm/cacheflush.h
-@@ -108,7 +108,7 @@ struct cpu_cache_fns {
- void (*dma_unmap_area)(const void *, size_t, int);
-
- void (*dma_flush_range)(const void *, const void *);
--};
-+} __no_const;
-
- /*
- * Select the calling method
-diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
-index af18cea..b5dc173 100644
---- a/arch/arm/include/asm/domain.h
-+++ b/arch/arm/include/asm/domain.h
-@@ -83,9 +83,9 @@
- * instructions (inline assembly)
- */
- #ifdef CONFIG_CPU_USE_DOMAINS
--#define T(instr) #instr "t"
-+#define TUSER(instr) #instr "t"
- #else
--#define T(instr) #instr
-+#define TUSER(instr) #instr
- #endif
-
- #else /* __ASSEMBLY__ */
-@@ -95,9 +95,9 @@
- * instructions
- */
- #ifdef CONFIG_CPU_USE_DOMAINS
--#define T(instr) instr ## t
-+#define TUSER(instr) instr ## t
- #else
--#define T(instr) instr
-+#define TUSER(instr) instr
- #endif
-
- #endif /* __ASSEMBLY__ */
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index 0e9ce8d..6ef1e03 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
-+
-+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#endif
-
- /* When the program starts, a1 contains a pointer to a function to be
- registered with atexit, as per the SVR4 ABI. A value of 0 means we
-@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- extern void elf_set_personality(const struct elf32_hdr *);
- #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- extern int vectors_user_mapping(void);
- #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index aefd459..e37af12 100644
---- a/arch/arm/include/asm/futex.h
-+++ b/arch/arm/include/asm/futex.h
-@@ -70,9 +70,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-
- #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
- __asm__ __volatile__( \
-- "1: " T(ldr) " %1, [%3]\n" \
-+ "1: " TUSER(ldr) " %1, [%3]\n" \
- " " insn "\n" \
-- "2: " T(str) " %0, [%3]\n" \
-+ "2: " TUSER(str) " %0, [%3]\n" \
- " mov %0, #0\n" \
- __futex_atomic_ex_table("%5") \
- : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
-@@ -90,10 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- return -EFAULT;
-
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
-- "1: " T(ldr) " %1, [%4]\n"
-+ "1: " TUSER(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
- " it eq @ explicit IT needed for the 2b label\n"
-- "2: " T(streq) " %3, [%4]\n"
-+ "2: " TUSER(streq) " %3, [%4]\n"
- __futex_atomic_ex_table("%5")
- : "+r" (ret), "=&r" (val)
- : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
-diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
-index e51b1e8..32a3113 100644
---- a/arch/arm/include/asm/kmap_types.h
-+++ b/arch/arm/include/asm/kmap_types.h
-@@ -21,6 +21,7 @@ enum km_type {
- KM_L1_CACHE,
- KM_L2_CACHE,
- KM_KDB,
-+ KM_CLEARPAGE,
- KM_TYPE_NR
- };
-
-diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
-index a8997d7..f0a29154 100644
---- a/arch/arm/include/asm/memory.h
-+++ b/arch/arm/include/asm/memory.h
-@@ -268,7 +268,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
- #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-
- #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
--#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
-+ && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
-
- /*
- * Optional coherency support. Currently used only by selected
-diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
-index 53426c6..c7baff3 100644
---- a/arch/arm/include/asm/outercache.h
-+++ b/arch/arm/include/asm/outercache.h
-@@ -35,7 +35,7 @@ struct outer_cache_fns {
- #endif
- void (*set_debug)(unsigned long);
- void (*resume)(void);
--};
-+} __no_const;
-
- #ifdef CONFIG_OUTER_CACHE
-
-diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
-index ca94653..9c398dc 100644
---- a/arch/arm/include/asm/page.h
-+++ b/arch/arm/include/asm/page.h
-@@ -23,6 +23,7 @@
-
- #else
-
-+#include <linux/compiler.h>
- #include <asm/glue.h>
-
- /*
-@@ -123,7 +124,7 @@ struct cpu_user_fns {
- void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
- void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma);
--};
-+} __no_const;
-
- #ifdef MULTI_USER
- extern struct cpu_user_fns cpu_user;
-diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
-index 3e08fd3..3f14f89 100644
---- a/arch/arm/include/asm/pgalloc.h
-+++ b/arch/arm/include/asm/pgalloc.h
-@@ -31,6 +31,7 @@
- #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
- #define pmd_free(mm, pmd) do { } while (0)
- #define pgd_populate(mm,pmd,pte) BUG()
-+#define pgd_populate_kernel(mm,pmd,pte) BUG()
-
- extern pgd_t *pgd_alloc(struct mm_struct *mm);
- extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index fcbac3c..6e9b464 100644
---- a/arch/arm/include/asm/pgtable.h
-+++ b/arch/arm/include/asm/pgtable.h
-@@ -26,6 +26,9 @@
-
- #include <asm/pgtable-2level.h>
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- /*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
-diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
-index 96187ff..7a9b049 100644
---- a/arch/arm/include/asm/ptrace.h
-+++ b/arch/arm/include/asm/ptrace.h
-@@ -72,7 +72,7 @@
- * ARMv7 groups of PSR bits
- */
- #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
--#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
-+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
- #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
- #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
-
-diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
-index 984014b..a6d914f 100644
---- a/arch/arm/include/asm/system.h
-+++ b/arch/arm/include/asm/system.h
-@@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
-
- #define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-+#define xchg_unchecked(ptr,x) \
-+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
- extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-@@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void);
- extern void cpu_init(void);
-
- void arm_machine_restart(char mode, const char *cmd);
--extern void (*arm_pm_restart)(char str, const char *cmd);
-+extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
-
- #define UDBG_UNDEFINED (1 << 0)
- #define UDBG_SYSCALL (1 << 1)
-@@ -526,6 +528,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
-
- #endif /* __LINUX_ARM_ARCH__ >= 6 */
-
-+#define _ASM_EXTABLE(from, to) \
-+" .pushsection __ex_table,\"a\"\n"\
-+" .align 3\n" \
-+" .long " #from ", " #to"\n" \
-+" .popsection"
-+
-+
- #endif /* __ASSEMBLY__ */
-
- #define arch_align_stack(x) (x)
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 7b5cc8d..5d70d88 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -139,6 +139,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
- #define TIF_NEED_RESCHED 1
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
- #define TIF_SYSCALL_TRACE 8
-+
-+/* within 8 bits of TIF_SYSCALL_TRACE
-+ to meet flexible second operand requirements
-+*/
-+#define TIF_GRSEC_SETXID 9
-+
- #define TIF_POLLING_NRFLAG 16
- #define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-@@ -155,6 +161,10 @@ extern void vfp_flush_hwstate(struct thread_info *);
- #define _TIF_FREEZE (1 << TIF_FREEZE)
- #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
-+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
-+
-+/* Checks for any syscall work in entry-common.S */
-+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_GRSEC_SETXID)
-
- /*
- * Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 18a2858..21db277 100644
---- a/arch/arm/include/asm/uaccess.h
-+++ b/arch/arm/include/asm/uaccess.h
-@@ -202,6 +202,7 @@ static inline void set_fs(mm_segment_t fs)
-
- #endif /* CONFIG_MMU */
-
-+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
- #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
- /*
-@@ -242,7 +243,7 @@ do { \
-
- #define __get_user_asm_byte(x,addr,err) \
- __asm__ __volatile__( \
-- "1: " T(ldrb) " %1,[%2],#0\n" \
-+ "1: " TUSER(ldrb) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
-@@ -278,7 +279,7 @@ do { \
-
- #define __get_user_asm_word(x,addr,err) \
- __asm__ __volatile__( \
-- "1: " T(ldr) " %1,[%2],#0\n" \
-+ "1: " TUSER(ldr) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
-@@ -323,7 +324,7 @@ do { \
-
- #define __put_user_asm_byte(x,__pu_addr,err) \
- __asm__ __volatile__( \
-- "1: " T(strb) " %1,[%2],#0\n" \
-+ "1: " TUSER(strb) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
-@@ -356,7 +357,7 @@ do { \
-
- #define __put_user_asm_word(x,__pu_addr,err) \
- __asm__ __volatile__( \
-- "1: " T(str) " %1,[%2],#0\n" \
-+ "1: " TUSER(str) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
-@@ -381,10 +382,10 @@ do { \
-
- #define __put_user_asm_dword(x,__pu_addr,err) \
- __asm__ __volatile__( \
-- ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
-- ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
-- THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
-- THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
-+ ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
-+ ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
-+ THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
-+ THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
- "3:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
-@@ -402,8 +403,21 @@ do { \
-
-
- #ifdef CONFIG_MMU
--extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
--extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
-+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
-+
-+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ check_object_size(to, n, false);
-+ return ___copy_from_user(to, from, n);
-+}
-+
-+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
-+{
-+ check_object_size(from, n, true);
-+ return ___copy_to_user(to, from, n);
-+}
-+
- extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
- extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
- extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
-@@ -418,6 +432,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
- static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
-@@ -427,6 +444,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
-
- static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
-index 5b0bce6..becd81c 100644
---- a/arch/arm/kernel/armksyms.c
-+++ b/arch/arm/kernel/armksyms.c
-@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
- #ifdef CONFIG_MMU
- EXPORT_SYMBOL(copy_page);
-
--EXPORT_SYMBOL(__copy_from_user);
--EXPORT_SYMBOL(__copy_to_user);
-+EXPORT_SYMBOL(___copy_from_user);
-+EXPORT_SYMBOL(___copy_to_user);
- EXPORT_SYMBOL(__clear_user);
-
- EXPORT_SYMBOL(__get_user_1);
-diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index b2a27b6..520889c 100644
---- a/arch/arm/kernel/entry-common.S
-+++ b/arch/arm/kernel/entry-common.S
-@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
- get_thread_info tsk
- ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
- mov why, #1
-- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
-+ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
- beq ret_slow_syscall
- mov r1, sp
- mov r0, #1 @ trace exit [IP = 1]
-@@ -443,7 +443,7 @@ ENTRY(vector_swi)
- 1:
- #endif
-
-- tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
-+ tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
- bne __sys_trace
-
- cmp scno, #NR_syscalls @ check upper syscall limit
-diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
-index 3606e85..44ba19d 100644
---- a/arch/arm/kernel/head.S
-+++ b/arch/arm/kernel/head.S
-@@ -46,7 +46,9 @@
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
-
- .macro pgtbl, rd, phys
-- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
-+ mov \rd, #TEXT_OFFSET
-+ sub \rd, #PG_DIR_SIZE
-+ add \rd, \rd, \phys
- .endm
-
- #ifdef CONFIG_XIP_KERNEL
-diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
-index 2bc1a8e..f433c88 100644
---- a/arch/arm/kernel/hw_breakpoint.c
-+++ b/arch/arm/kernel/hw_breakpoint.c
-@@ -986,7 +986,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata dbg_reset_nb = {
-+static struct notifier_block dbg_reset_nb = {
- .notifier_call = dbg_reset_notify,
- };
-
-diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index 1e9be5d..4e0f470 100644
---- a/arch/arm/kernel/module.c
-+++ b/arch/arm/kernel/module.c
-@@ -39,6 +39,8 @@
- #ifdef CONFIG_MMU
- void *module_alloc(unsigned long size)
- {
-+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
-+ return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
- __builtin_return_address(0));
-diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index d9e3c61..9cf9513 100644
---- a/arch/arm/kernel/process.c
-+++ b/arch/arm/kernel/process.c
-@@ -28,7 +28,6 @@
- #include <linux/tick.h>
- #include <linux/utsname.h>
- #include <linux/uaccess.h>
--#include <linux/random.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/cpuidle.h>
-
-@@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused)
- __setup("nohlt", nohlt_setup);
- __setup("hlt", hlt_setup);
-
--void arm_machine_restart(char mode, const char *cmd)
-+__noreturn void arm_machine_restart(char mode, const char *cmd)
- {
- /* Disable interrupts first */
- local_irq_disable();
-@@ -135,7 +134,7 @@ void arm_machine_restart(char mode, const char *cmd)
- void (*pm_power_off)(void);
- EXPORT_SYMBOL(pm_power_off);
-
--void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
-+void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
- EXPORT_SYMBOL_GPL(arm_pm_restart);
-
- static void do_nothing(void *unused)
-@@ -250,6 +249,7 @@ void machine_power_off(void)
- machine_shutdown();
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
-
- void machine_restart(char *cmd)
-@@ -268,8 +268,8 @@ void __show_regs(struct pt_regs *regs)
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
-- print_symbol("PC is at %s\n", instruction_pointer(regs));
-- print_symbol("LR is at %s\n", regs->ARM_lr);
-+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
-+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
- printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
- "sp : %08lx ip : %08lx fp : %08lx\n",
- regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -489,12 +489,6 @@ unsigned long get_wchan(struct task_struct *p)
- return 0;
- }
-
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long range_end = mm->brk + 0x02000000;
-- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
--}
--
- #ifdef CONFIG_MMU
- /*
- * The vectors page is always readable from user space for the
-diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
-index 90fa8b3..a3a2212 100644
---- a/arch/arm/kernel/ptrace.c
-+++ b/arch/arm/kernel/ptrace.c
-@@ -904,10 +904,19 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
- {
- unsigned long ip;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return scno;
- if (!(current->ptrace & PT_PTRACED))
-diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index d45fd22..1f2471d 100644
---- a/arch/arm/kernel/traps.c
-+++ b/arch/arm/kernel/traps.c
-@@ -63,7 +63,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
- void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
- {
- #ifdef CONFIG_KALLSYMS
-- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
-+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
- #else
- printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
- #endif
-@@ -265,6 +265,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
-
- static DEFINE_RAW_SPINLOCK(die_lock);
-
-+extern void gr_handle_kernel_exploit(void);
-+
- /*
- * This function is protected against re-entrancy.
- */
-@@ -294,6 +296,9 @@ void die(const char *str, struct pt_regs *regs, int err)
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception");
-+
-+ gr_handle_kernel_exploit();
-+
- if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
- }
-diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
-index 20b3041..da44b1f 100644
---- a/arch/arm/kernel/vmlinux.lds.S
-+++ b/arch/arm/kernel/vmlinux.lds.S
-@@ -103,6 +103,8 @@ SECTIONS
- ARM_CPU_KEEP(PROC_INFO)
- }
-
-+ _etext = .; /* End of text section */
-+
- RO_DATA(PAGE_SIZE)
-
- #ifdef CONFIG_ARM_UNWIND
-@@ -122,8 +124,6 @@ SECTIONS
- }
- #endif
-
-- _etext = .; /* End of text and rodata section */
--
- #ifndef CONFIG_XIP_KERNEL
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
-diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
-index 66a477a..bee61d3 100644
---- a/arch/arm/lib/copy_from_user.S
-+++ b/arch/arm/lib/copy_from_user.S
-@@ -16,7 +16,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_from_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -84,11 +84,11 @@
-
- .text
-
--ENTRY(__copy_from_user)
-+ENTRY(___copy_from_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_from_user)
-+ENDPROC(___copy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
-diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
-index 6ee2f67..d1cce76 100644
---- a/arch/arm/lib/copy_page.S
-+++ b/arch/arm/lib/copy_page.S
-@@ -10,6 +10,7 @@
- * ASM optimised string functions
- */
- #include <linux/linkage.h>
-+#include <linux/const.h>
- #include <asm/assembler.h>
- #include <asm/asm-offsets.h>
- #include <asm/cache.h>
-diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
-index d066df6..df28194 100644
---- a/arch/arm/lib/copy_to_user.S
-+++ b/arch/arm/lib/copy_to_user.S
-@@ -16,7 +16,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_to_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -88,11 +88,11 @@
- .text
-
- ENTRY(__copy_to_user_std)
--WEAK(__copy_to_user)
-+WEAK(___copy_to_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_to_user)
-+ENDPROC(___copy_to_user)
- ENDPROC(__copy_to_user_std)
-
- .pushsection .fixup,"ax"
-diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
-index 4306fbf..9b06bb4 100644
---- a/arch/arm/lib/getuser.S
-+++ b/arch/arm/lib/getuser.S
-@@ -34,7 +34,7 @@
-
- ENTRY(__get_user_1)
- check_uaccess r0, 1, r1, r2, __get_user_bad
--1: T(ldrb) r2, [r0]
-+1: TUSER(ldrb) r2, [r0]
- mov r0, #0
- mov pc, lr
- ENDPROC(__get_user_1)
-@@ -61,7 +61,7 @@ ENDPROC(__get_user_2)
-
- ENTRY(__get_user_4)
- check_uaccess r0, 4, r1, r2, __get_user_bad
--4: T(ldr) r2, [r0]
-+4: TUSER(ldr) r2, [r0]
- mov r0, #0
- mov pc, lr
- ENDPROC(__get_user_4)
-diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
-index 9a897fa..3d73dcb 100644
---- a/arch/arm/lib/putuser.S
-+++ b/arch/arm/lib/putuser.S
-@@ -34,7 +34,7 @@
-
- ENTRY(__put_user_1)
- check_uaccess r0, 1, r1, ip, __put_user_bad
--1: T(strb) r2, [r0]
-+1: TUSER(strb) r2, [r0]
- mov r0, #0
- mov pc, lr
- ENDPROC(__put_user_1)
-@@ -44,19 +44,19 @@ ENTRY(__put_user_2)
- mov ip, r2, lsr #8
- #ifdef CONFIG_THUMB2_KERNEL
- #ifndef __ARMEB__
--2: T(strb) r2, [r0]
--3: T(strb) ip, [r0, #1]
-+2: TUSER(strb) r2, [r0]
-+3: TUSER(strb) ip, [r0, #1]
- #else
--2: T(strb) ip, [r0]
--3: T(strb) r2, [r0, #1]
-+2: TUSER(strb) ip, [r0]
-+3: TUSER(strb) r2, [r0, #1]
- #endif
- #else /* !CONFIG_THUMB2_KERNEL */
- #ifndef __ARMEB__
--2: T(strb) r2, [r0], #1
--3: T(strb) ip, [r0]
-+2: TUSER(strb) r2, [r0], #1
-+3: TUSER(strb) ip, [r0]
- #else
--2: T(strb) ip, [r0], #1
--3: T(strb) r2, [r0]
-+2: TUSER(strb) ip, [r0], #1
-+3: TUSER(strb) r2, [r0]
- #endif
- #endif /* CONFIG_THUMB2_KERNEL */
- mov r0, #0
-@@ -65,7 +65,7 @@ ENDPROC(__put_user_2)
-
- ENTRY(__put_user_4)
- check_uaccess r0, 4, r1, ip, __put_user_bad
--4: T(str) r2, [r0]
-+4: TUSER(str) r2, [r0]
- mov r0, #0
- mov pc, lr
- ENDPROC(__put_user_4)
-@@ -73,11 +73,11 @@ ENDPROC(__put_user_4)
- ENTRY(__put_user_8)
- check_uaccess r0, 8, r1, ip, __put_user_bad
- #ifdef CONFIG_THUMB2_KERNEL
--5: T(str) r2, [r0]
--6: T(str) r3, [r0, #4]
-+5: TUSER(str) r2, [r0]
-+6: TUSER(str) r3, [r0, #4]
- #else
--5: T(str) r2, [r0], #4
--6: T(str) r3, [r0]
-+5: TUSER(str) r2, [r0], #4
-+6: TUSER(str) r3, [r0]
- #endif
- mov r0, #0
- mov pc, lr
-diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
-index d0ece2a..e712687 100644
---- a/arch/arm/lib/uaccess.S
-+++ b/arch/arm/lib/uaccess.S
-@@ -20,7 +20,7 @@
-
- #define PAGE_SHIFT 12
-
--/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
-+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
- * Purpose : copy a block to user memory from kernel memory
- * Params : to - user memory
- * : from - kernel memory
-@@ -32,15 +32,15 @@
- rsb ip, ip, #4
- cmp ip, #2
- ldrb r3, [r1], #1
--USER( T(strb) r3, [r0], #1) @ May fault
-+USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
--USER( T(strgeb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #1
--USER( T(strgtb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- sub r2, r2, ip
- b .Lc2u_dest_aligned
-
--ENTRY(__copy_to_user)
-+ENTRY(___copy_to_user)
- stmfd sp!, {r2, r4 - r7, lr}
- cmp r2, #4
- blt .Lc2u_not_enough
-@@ -59,7 +59,7 @@ ENTRY(__copy_to_user)
- addmi ip, r2, #4
- bmi .Lc2u_0nowords
- ldr r3, [r1], #4
--USER( T(str) r3, [r0], #4) @ May fault
-+USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
-@@ -88,18 +88,18 @@ USER( T(str) r3, [r0], #4) @ May fault
- stmneia r0!, {r3 - r4} @ Shouldnt fault
- tst ip, #4
- ldrne r3, [r1], #4
-- T(strne) r3, [r0], #4 @ Shouldnt fault
-+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_0fupi
- .Lc2u_0nowords: teq ip, #0
- beq .Lc2u_finished
- .Lc2u_nowords: cmp ip, #2
- ldrb r3, [r1], #1
--USER( T(strb) r3, [r0], #1) @ May fault
-+USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
--USER( T(strgeb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #1
--USER( T(strgtb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
- .Lc2u_not_enough:
-@@ -120,7 +120,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
- mov r3, r7, pull #8
- ldr r7, [r1], #4
- orr r3, r3, r7, push #24
--USER( T(str) r3, [r0], #4) @ May fault
-+USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
-@@ -155,18 +155,18 @@ USER( T(str) r3, [r0], #4) @ May fault
- movne r3, r7, pull #8
- ldrne r7, [r1], #4
- orrne r3, r3, r7, push #24
-- T(strne) r3, [r0], #4 @ Shouldnt fault
-+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_1fupi
- .Lc2u_1nowords: mov r3, r7, get_byte_1
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
--USER( T(strb) r3, [r0], #1) @ May fault
-+USER( TUSER( strb) r3, [r0], #1) @ May fault
- movge r3, r7, get_byte_2
--USER( T(strgeb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- movgt r3, r7, get_byte_3
--USER( T(strgtb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
- .Lc2u_2fupi: subs r2, r2, #4
-@@ -175,7 +175,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
- mov r3, r7, pull #16
- ldr r7, [r1], #4
- orr r3, r3, r7, push #16
--USER( T(str) r3, [r0], #4) @ May fault
-+USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
-@@ -210,18 +210,18 @@ USER( T(str) r3, [r0], #4) @ May fault
- movne r3, r7, pull #16
- ldrne r7, [r1], #4
- orrne r3, r3, r7, push #16
-- T(strne) r3, [r0], #4 @ Shouldnt fault
-+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_2fupi
- .Lc2u_2nowords: mov r3, r7, get_byte_2
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
--USER( T(strb) r3, [r0], #1) @ May fault
-+USER( TUSER( strb) r3, [r0], #1) @ May fault
- movge r3, r7, get_byte_3
--USER( T(strgeb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #0
--USER( T(strgtb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
-
- .Lc2u_3fupi: subs r2, r2, #4
-@@ -230,7 +230,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
- mov r3, r7, pull #24
- ldr r7, [r1], #4
- orr r3, r3, r7, push #8
--USER( T(str) r3, [r0], #4) @ May fault
-+USER( TUSER( str) r3, [r0], #4) @ May fault
- mov ip, r0, lsl #32 - PAGE_SHIFT
- rsb ip, ip, #0
- movs ip, ip, lsr #32 - PAGE_SHIFT
-@@ -265,27 +265,27 @@ USER( T(str) r3, [r0], #4) @ May fault
- movne r3, r7, pull #24
- ldrne r7, [r1], #4
- orrne r3, r3, r7, push #8
-- T(strne) r3, [r0], #4 @ Shouldnt fault
-+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
- ands ip, ip, #3
- beq .Lc2u_3fupi
- .Lc2u_3nowords: mov r3, r7, get_byte_3
- teq ip, #0
- beq .Lc2u_finished
- cmp ip, #2
--USER( T(strb) r3, [r0], #1) @ May fault
-+USER( TUSER( strb) r3, [r0], #1) @ May fault
- ldrgeb r3, [r1], #1
--USER( T(strgeb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
- ldrgtb r3, [r1], #0
--USER( T(strgtb) r3, [r0], #1) @ May fault
-+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
- b .Lc2u_finished
--ENDPROC(__copy_to_user)
-+ENDPROC(___copy_to_user)
-
- .pushsection .fixup,"ax"
- .align 0
- 9001: ldmfd sp!, {r0, r4 - r7, pc}
- .popsection
-
--/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
-+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
- * Purpose : copy a block from user memory to kernel memory
- * Params : to - kernel memory
- * : from - user memory
-@@ -295,16 +295,16 @@ ENDPROC(__copy_to_user)
- .Lcfu_dest_not_aligned:
- rsb ip, ip, #4
- cmp ip, #2
--USER( T(ldrb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
- strb r3, [r0], #1
--USER( T(ldrgeb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
--USER( T(ldrgtb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- sub r2, r2, ip
- b .Lcfu_dest_aligned
-
--ENTRY(__copy_from_user)
-+ENTRY(___copy_from_user)
- stmfd sp!, {r0, r2, r4 - r7, lr}
- cmp r2, #4
- blt .Lcfu_not_enough
-@@ -322,7 +322,7 @@ ENTRY(__copy_from_user)
- .Lcfu_0fupi: subs r2, r2, #4
- addmi ip, r2, #4
- bmi .Lcfu_0nowords
--USER( T(ldr) r3, [r1], #4)
-+USER( TUSER( ldr) r3, [r1], #4)
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
- rsb ip, ip, #0
-@@ -351,18 +351,18 @@ USER( T(ldr) r3, [r1], #4)
- ldmneia r1!, {r3 - r4} @ Shouldnt fault
- stmneia r0!, {r3 - r4}
- tst ip, #4
-- T(ldrne) r3, [r1], #4 @ Shouldnt fault
-+ TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
- strne r3, [r0], #4
- ands ip, ip, #3
- beq .Lcfu_0fupi
- .Lcfu_0nowords: teq ip, #0
- beq .Lcfu_finished
- .Lcfu_nowords: cmp ip, #2
--USER( T(ldrb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
- strb r3, [r0], #1
--USER( T(ldrgeb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
--USER( T(ldrgtb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
-
-@@ -375,7 +375,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
-
- .Lcfu_src_not_aligned:
- bic r1, r1, #3
--USER( T(ldr) r7, [r1], #4) @ May fault
-+USER( TUSER( ldr) r7, [r1], #4) @ May fault
- cmp ip, #2
- bgt .Lcfu_3fupi
- beq .Lcfu_2fupi
-@@ -383,7 +383,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
- addmi ip, r2, #4
- bmi .Lcfu_1nowords
- mov r3, r7, pull #8
--USER( T(ldr) r7, [r1], #4) @ May fault
-+USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, push #24
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
-@@ -418,7 +418,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, pull #8
--USER( T(ldrne) r7, [r1], #4) @ May fault
-+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, push #24
- strne r3, [r0], #4
- ands ip, ip, #3
-@@ -438,7 +438,7 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
- addmi ip, r2, #4
- bmi .Lcfu_2nowords
- mov r3, r7, pull #16
--USER( T(ldr) r7, [r1], #4) @ May fault
-+USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, push #16
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
-@@ -474,7 +474,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, pull #16
--USER( T(ldrne) r7, [r1], #4) @ May fault
-+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, push #16
- strne r3, [r0], #4
- ands ip, ip, #3
-@@ -486,7 +486,7 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
- strb r3, [r0], #1
- movge r3, r7, get_byte_3
- strgeb r3, [r0], #1
--USER( T(ldrgtb) r3, [r1], #0) @ May fault
-+USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
-
-@@ -494,7 +494,7 @@ USER( T(ldrgtb) r3, [r1], #0) @ May fault
- addmi ip, r2, #4
- bmi .Lcfu_3nowords
- mov r3, r7, pull #24
--USER( T(ldr) r7, [r1], #4) @ May fault
-+USER( TUSER( ldr) r7, [r1], #4) @ May fault
- orr r3, r3, r7, push #8
- str r3, [r0], #4
- mov ip, r1, lsl #32 - PAGE_SHIFT
-@@ -529,7 +529,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
- stmneia r0!, {r3 - r4}
- tst ip, #4
- movne r3, r7, pull #24
--USER( T(ldrne) r7, [r1], #4) @ May fault
-+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
- orrne r3, r3, r7, push #8
- strne r3, [r0], #4
- ands ip, ip, #3
-@@ -539,12 +539,12 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
- beq .Lcfu_finished
- cmp ip, #2
- strb r3, [r0], #1
--USER( T(ldrgeb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
- strgeb r3, [r0], #1
--USER( T(ldrgtb) r3, [r1], #1) @ May fault
-+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
- strgtb r3, [r0], #1
- b .Lcfu_finished
--ENDPROC(__copy_from_user)
-+ENDPROC(___copy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
-diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
-index 025f742..8432b08 100644
---- a/arch/arm/lib/uaccess_with_memcpy.c
-+++ b/arch/arm/lib/uaccess_with_memcpy.c
-@@ -104,7 +104,7 @@ out:
- }
-
- unsigned long
--__copy_to_user(void __user *to, const void *from, unsigned long n)
-+___copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- /*
- * This test is stubbed out of the main function above to keep
-diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
-index e9d5f4a..f099699 100644
---- a/arch/arm/mach-omap2/board-n8x0.c
-+++ b/arch/arm/mach-omap2/board-n8x0.c
-@@ -593,7 +593,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
- }
- #endif
-
--static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
-+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
- .late_init = n8x0_menelaus_late_init,
- };
-
-diff --git a/arch/arm/mach-omap2/smartreflex.h b/arch/arm/mach-omap2/smartreflex.h
-index 5f35b9e..6d09f99 100644
---- a/arch/arm/mach-omap2/smartreflex.h
-+++ b/arch/arm/mach-omap2/smartreflex.h
-@@ -183,7 +183,7 @@ struct omap_sr_class_data {
- int (*notify)(struct voltagedomain *voltdm, u32 status);
- u8 notify_flags;
- u8 class_type;
--};
-+} __do_const;
-
- /**
- * struct omap_sr_nvalue_table - Smartreflex n-target value info
-diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
-index 2b2d51c..0127490 100644
---- a/arch/arm/mach-ux500/mbox-db5500.c
-+++ b/arch/arm/mach-ux500/mbox-db5500.c
-@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
- return sprintf(buf, "0x%X\n", mbox_value);
- }
-
--static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
-+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
-
- static int mbox_show(struct seq_file *s, void *data)
- {
-diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 4b0bc37..d556b08 100644
---- a/arch/arm/mm/fault.c
-+++ b/arch/arm/mm/fault.c
-@@ -386,6 +386,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- }
- #endif /* CONFIG_MMU */
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 20; i++) {
-+ unsigned char c;
-+ if (get_user(c, (__force unsigned char __user *)pc+i))
-+ printk(KERN_CONT "?? ");
-+ else
-+ printk(KERN_CONT "%02x ", c);
-+ }
-+ printk("\n");
-+
-+ printk(KERN_ERR "PAX: bytes at SP-4: ");
-+ for (i = -1; i < 20; i++) {
-+ unsigned long c;
-+ if (get_user(c, (__force unsigned long __user *)sp+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08lx ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * First Level Translation Fault Handler
- *
-@@ -630,6 +657,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
- const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
- struct siginfo info;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (fsr_fs(ifsr) == 2) {
-+ unsigned int bkpt;
-+
-+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
-+ current->thread.error_code = ifsr;
-+ current->thread.trap_no = 0;
-+ pax_report_refcount_overflow(regs);
-+ fixup_exception(regs);
-+ return;
-+ }
-+ }
-+#endif
-+
- if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
- return;
-
-diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index 44b628e..be706ee 100644
---- a/arch/arm/mm/mmap.c
-+++ b/arch/arm/mm/mmap.c
-@@ -33,6 +33,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long start_addr;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- /*
- * We only need to do colour alignment if either the I or D
-@@ -54,6 +55,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- if (len > TASK_SIZE)
- return -ENOMEM;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -61,16 +66,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
- if (len > mm->cached_hole_size) {
-- start_addr = addr = mm->free_area_cache;
-+ start_addr = addr = mm->free_area_cache;
- } else {
-- start_addr = addr = TASK_UNMAPPED_BASE;
-- mm->cached_hole_size = 0;
-+ start_addr = addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
- }
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
-@@ -89,14 +98,14 @@ full_search:
- * Start a new search - just in case we missed
- * some holes.
- */
-- if (start_addr != TASK_UNMAPPED_BASE) {
-- start_addr = addr = TASK_UNMAPPED_BASE;
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = addr = mm->mmap_base;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -111,7 +120,6 @@ full_search:
- }
- }
-
--
- /*
- * You really shouldn't be using read() or write() on /dev/mem. This
- * might go away in the future.
-diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
-index 4c1a363..df311d0 100644
---- a/arch/arm/plat-samsung/include/plat/dma-ops.h
-+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
-@@ -41,7 +41,7 @@ struct samsung_dma_ops {
- int (*started)(unsigned ch);
- int (*flush)(unsigned ch);
- int (*stop)(unsigned ch);
--};
-+} __no_const;
-
- extern void *samsung_dmadev_get_ops(void);
- extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
-index 5f28cae..3d23723 100644
---- a/arch/arm/plat-samsung/include/plat/ehci.h
-+++ b/arch/arm/plat-samsung/include/plat/ehci.h
-@@ -14,7 +14,7 @@
- struct s5p_ehci_platdata {
- int (*phy_init)(struct platform_device *pdev, int type);
- int (*phy_exit)(struct platform_device *pdev, int type);
--};
-+} __no_const;
-
- extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
-
-diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
-index c3a58a1..78fbf54 100644
---- a/arch/avr32/include/asm/cache.h
-+++ b/arch/avr32/include/asm/cache.h
-@@ -1,8 +1,10 @@
- #ifndef __ASM_AVR32_CACHE_H
- #define __ASM_AVR32_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
-index 3b3159b..425ea94d 100644
---- a/arch/avr32/include/asm/elf.h
-+++ b/arch/avr32/include/asm/elf.h
-@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
-+
-+#define PAX_DELTA_MMAP_LEN 15
-+#define PAX_DELTA_STACK_LEN 15
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
-index b7f5c68..556135c 100644
---- a/arch/avr32/include/asm/kmap_types.h
-+++ b/arch/avr32/include/asm/kmap_types.h
-@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
- D(11) KM_IRQ1,
- D(12) KM_SOFTIRQ0,
- D(13) KM_SOFTIRQ1,
--D(14) KM_TYPE_NR
-+D(14) KM_CLEARPAGE,
-+D(15) KM_TYPE_NR
- };
-
- #undef D
-diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index 632b649..043ddd2 100644
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
-
- int exception_trace = 1;
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 20; i++) {
-+ unsigned char c;
-+ if (get_user(c, (unsigned char *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%02x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * This routine handles page faults. It determines the address and the
- * problem, and then passes it off to one of the appropriate routines.
-@@ -158,6 +175,16 @@ bad_area:
- up_read(&mm->mmap_sem);
-
- if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
-+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
-+ do_group_exit(SIGKILL);
-+ }
-+ }
-+#endif
-+
- if (exception_trace && printk_ratelimit())
- printk("%s%s[%d]: segfault at %08lx pc %08lx "
- "sp %08lx ecr %lu\n",
-diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
-index 568885a..f8008df 100644
---- a/arch/blackfin/include/asm/cache.h
-+++ b/arch/blackfin/include/asm/cache.h
-@@ -7,6 +7,7 @@
- #ifndef __ARCH_BLACKFIN_CACHE_H
- #define __ARCH_BLACKFIN_CACHE_H
-
-+#include <linux/const.h>
- #include <linux/linkage.h> /* for asmlinkage */
-
- /*
-@@ -14,7 +15,7 @@
- * Blackfin loads 32 bytes for cache
- */
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
- #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
-index aea2718..3639a60 100644
---- a/arch/cris/include/arch-v10/arch/cache.h
-+++ b/arch/cris/include/arch-v10/arch/cache.h
-@@ -1,8 +1,9 @@
- #ifndef _ASM_ARCH_CACHE_H
- #define _ASM_ARCH_CACHE_H
-
-+#include <linux/const.h>
- /* Etrax 100LX have 32-byte cache-lines. */
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_ARCH_CACHE_H */
-diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
-index 1de779f..336fad3 100644
---- a/arch/cris/include/arch-v32/arch/cache.h
-+++ b/arch/cris/include/arch-v32/arch/cache.h
-@@ -1,11 +1,12 @@
- #ifndef _ASM_CRIS_ARCH_CACHE_H
- #define _ASM_CRIS_ARCH_CACHE_H
-
-+#include <linux/const.h>
- #include <arch/hwregs/dma.h>
-
- /* A cache-line is 32 bytes. */
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data.read_mostly")))
-
-diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
-index 0d8a7d6..d0c9ff5 100644
---- a/arch/frv/include/asm/atomic.h
-+++ b/arch/frv/include/asm/atomic.h
-@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
- #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
- #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- {
- int c, old;
-diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
-index 2797163..c2a401df9 100644
---- a/arch/frv/include/asm/cache.h
-+++ b/arch/frv/include/asm/cache.h
-@@ -12,10 +12,11 @@
- #ifndef __ASM_CACHE_H
- #define __ASM_CACHE_H
-
-+#include <linux/const.h>
-
- /* bytes per L1 cache line */
- #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
- #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
-diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
-index f8e16b2..c73ff79 100644
---- a/arch/frv/include/asm/kmap_types.h
-+++ b/arch/frv/include/asm/kmap_types.h
-@@ -23,6 +23,7 @@ enum km_type {
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
-+ KM_CLEARPAGE,
- KM_TYPE_NR
- };
-
-diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 385fd30..27cf8ba 100644
---- a/arch/frv/mm/elf-fdpic.c
-+++ b/arch/frv/mm/elf-fdpic.c
-@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- {
- struct vm_area_struct *vma;
- unsigned long limit;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(current->mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- goto success;
- }
-
-@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- for (; vma; vma = vma->vm_next) {
- if (addr > limit)
- break;
-- if (addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- goto success;
- addr = vma->vm_end;
- }
-@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- for (; vma; vma = vma->vm_next) {
- if (addr > limit)
- break;
-- if (addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- goto success;
- addr = vma->vm_end;
- }
-diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
-index c635028..6d9445a 100644
---- a/arch/h8300/include/asm/cache.h
-+++ b/arch/h8300/include/asm/cache.h
-@@ -1,8 +1,10 @@
- #ifndef __ARCH_H8300_CACHE_H
- #define __ARCH_H8300_CACHE_H
-
-+#include <linux/const.h>
-+
- /* bytes per L1 cache line */
--#define L1_CACHE_BYTES 4
-+#define L1_CACHE_BYTES _AC(4,UL)
-
- /* m68k-elf-gcc 2.95.2 doesn't like these */
-
-diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
-index 0f01de2..d37d309 100644
---- a/arch/hexagon/include/asm/cache.h
-+++ b/arch/hexagon/include/asm/cache.h
-@@ -21,9 +21,11 @@
- #ifndef __ASM_CACHE_H
- #define __ASM_CACHE_H
-
-+#include <linux/const.h>
-+
- /* Bytes per L1 cache line */
--#define L1_CACHE_SHIFT (5)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
- #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
-diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
-index 18c4f0b..2c2d8624 100644
---- a/arch/hexagon/kernel/process.c
-+++ b/arch/hexagon/kernel/process.c
-@@ -264,7 +264,7 @@ void free_thread_info(struct thread_info *ti)
- void thread_info_cache_init(void)
- {
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
-- THREAD_SIZE, 0, NULL);
-+ THREAD_SIZE, SLAB_USERCOPY, NULL);
- BUG_ON(thread_info_cache == NULL);
- }
-
-diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index 27489b6..45ab736 100644
---- a/arch/ia64/Kconfig
-+++ b/arch/ia64/Kconfig
-@@ -570,6 +570,7 @@ source "drivers/sn/Kconfig"
- config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
-index be7bfa1..32098c5 100644
---- a/arch/ia64/Makefile
-+++ b/arch/ia64/Makefile
-@@ -101,5 +101,6 @@ endef
- archprepare: make_nr_irqs_h FORCE
- PHONY += make_nr_irqs_h FORCE
-
-+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
- make_nr_irqs_h: FORCE
- $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
-diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
-index 2fc214b..7597423 100644
---- a/arch/ia64/include/asm/atomic.h
-+++ b/arch/ia64/include/asm/atomic.h
-@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
- #define atomic64_inc(v) atomic64_add(1, (v))
- #define atomic64_dec(v) atomic64_sub(1, (v))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- /* Atomic operations are already serializing */
- #define smp_mb__before_atomic_dec() barrier()
- #define smp_mb__after_atomic_dec() barrier()
-diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
-index 988254a..e1ee885 100644
---- a/arch/ia64/include/asm/cache.h
-+++ b/arch/ia64/include/asm/cache.h
-@@ -1,6 +1,7 @@
- #ifndef _ASM_IA64_CACHE_H
- #define _ASM_IA64_CACHE_H
-
-+#include <linux/const.h>
-
- /*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
-@@ -9,7 +10,7 @@
-
- /* Bytes per L1 (data) cache line. */
- #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #ifdef CONFIG_SMP
- # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
-index b5298eb..67c6e62 100644
---- a/arch/ia64/include/asm/elf.h
-+++ b/arch/ia64/include/asm/elf.h
-@@ -42,6 +42,13 @@
- */
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#endif
-+
- #define PT_IA_64_UNWIND 0x70000001
-
- /* IA-64 relocations: */
-diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
-index 96a8d92..617a1cf 100644
---- a/arch/ia64/include/asm/pgalloc.h
-+++ b/arch/ia64/include/asm/pgalloc.h
-@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
- pgd_val(*pgd_entry) = __pa(pud);
- }
-
-+static inline void
-+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
-+{
-+ pgd_populate(mm, pgd_entry, pud);
-+}
-+
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
- return quicklist_alloc(0, GFP_KERNEL, NULL);
-@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
- pud_val(*pud_entry) = __pa(pmd);
- }
-
-+static inline void
-+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
-+{
-+ pud_populate(mm, pud_entry, pmd);
-+}
-+
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
- return quicklist_alloc(0, GFP_KERNEL, NULL);
-diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
-index 1a97af3..7529d31 100644
---- a/arch/ia64/include/asm/pgtable.h
-+++ b/arch/ia64/include/asm/pgtable.h
-@@ -12,7 +12,7 @@
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
--
-+#include <linux/const.h>
- #include <asm/mman.h>
- #include <asm/page.h>
- #include <asm/processor.h>
-@@ -143,6 +143,17 @@
- #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
-+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+#endif
-+
- #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
- #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
- #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
-diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
-index fba7696..a7650fd 100644
---- a/arch/ia64/include/asm/processor.h
-+++ b/arch/ia64/include/asm/processor.h
-@@ -320,7 +320,7 @@ struct thread_struct {
- regs->loadrs = 0; \
- regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
- regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
-- if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
-+ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
- /* \
- * Zap scratch regs to avoid leaking bits between processes with different \
- * uid/privileges. \
-diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
-index b77768d..e0795eb 100644
---- a/arch/ia64/include/asm/spinlock.h
-+++ b/arch/ia64/include/asm/spinlock.h
-@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
- unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
-
- asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
-- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
-+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
- }
-
- static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
-index 449c8c0..3d4b1e9 100644
---- a/arch/ia64/include/asm/uaccess.h
-+++ b/arch/ia64/include/asm/uaccess.h
-@@ -70,6 +70,7 @@
- && ((segment).seg == KERNEL_DS.seg \
- || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
- })
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
-
- /*
-@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
- static inline unsigned long
- __copy_to_user (void __user *to, const void *from, unsigned long count)
- {
-+ if (count > INT_MAX)
-+ return count;
-+
-+ if (!__builtin_constant_p(count))
-+ check_object_size(from, count, true);
-+
- return __copy_user(to, (__force void __user *) from, count);
- }
-
- static inline unsigned long
- __copy_from_user (void *to, const void __user *from, unsigned long count)
- {
-+ if (count > INT_MAX)
-+ return count;
-+
-+ if (!__builtin_constant_p(count))
-+ check_object_size(to, count, false);
-+
- return __copy_user((__force void __user *) to, from, count);
- }
-
-@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
- ({ \
- void __user *__cu_to = (to); \
- const void *__cu_from = (from); \
-- long __cu_len = (n); \
-+ unsigned long __cu_len = (n); \
- \
-- if (__access_ok(__cu_to, __cu_len, get_fs())) \
-+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
-+ if (!__builtin_constant_p(n)) \
-+ check_object_size(__cu_from, __cu_len, true); \
- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
-+ } \
- __cu_len; \
- })
-
-@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
- ({ \
- void *__cu_to = (to); \
- const void __user *__cu_from = (from); \
-- long __cu_len = (n); \
-+ unsigned long __cu_len = (n); \
- \
- __chk_user_ptr(__cu_from); \
-- if (__access_ok(__cu_from, __cu_len, get_fs())) \
-+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
-+ if (!__builtin_constant_p(n)) \
-+ check_object_size(__cu_to, __cu_len, false); \
- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
-+ } \
- __cu_len; \
- })
-
-diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
-index c539c68..c95d3db 100644
---- a/arch/ia64/kernel/err_inject.c
-+++ b/arch/ia64/kernel/err_inject.c
-@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
-+static struct notifier_block err_inject_cpu_notifier =
- {
- .notifier_call = err_inject_cpu_callback,
- };
-diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
-index 782c3a35..3540c5e 100644
---- a/arch/ia64/kernel/irq_ia64.c
-+++ b/arch/ia64/kernel/irq_ia64.c
-@@ -23,7 +23,6 @@
- #include <linux/ioport.h>
- #include <linux/kernel_stat.h>
- #include <linux/ptrace.h>
--#include <linux/random.h> /* for rand_initialize_irq() */
- #include <linux/signal.h>
- #include <linux/smp.h>
- #include <linux/threads.h>
-diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
-index 9b97303..69464a9 100644
---- a/arch/ia64/kernel/mca.c
-+++ b/arch/ia64/kernel/mca.c
-@@ -1919,7 +1919,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block mca_cpu_notifier __cpuinitdata = {
-+static struct notifier_block mca_cpu_notifier = {
- .notifier_call = mca_cpu_callback
- };
-
-diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
-index 24603be..948052d 100644
---- a/arch/ia64/kernel/module.c
-+++ b/arch/ia64/kernel/module.c
-@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
- void
- module_free (struct module *mod, void *module_region)
- {
-- if (mod && mod->arch.init_unw_table &&
-- module_region == mod->module_init) {
-+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
- unw_remove_unwind_table(mod->arch.init_unw_table);
- mod->arch.init_unw_table = NULL;
- }
-@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
- }
-
- static inline int
-+in_init_rx (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
-+}
-+
-+static inline int
-+in_init_rw (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
-+}
-+
-+static inline int
- in_init (const struct module *mod, uint64_t addr)
- {
-- return addr - (uint64_t) mod->module_init < mod->init_size;
-+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
-+}
-+
-+static inline int
-+in_core_rx (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
-+}
-+
-+static inline int
-+in_core_rw (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
- }
-
- static inline int
- in_core (const struct module *mod, uint64_t addr)
- {
-- return addr - (uint64_t) mod->module_core < mod->core_size;
-+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
- }
-
- static inline int
-@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
- break;
-
- case RV_BDREL:
-- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
-+ if (in_init_rx(mod, val))
-+ val -= (uint64_t) mod->module_init_rx;
-+ else if (in_init_rw(mod, val))
-+ val -= (uint64_t) mod->module_init_rw;
-+ else if (in_core_rx(mod, val))
-+ val -= (uint64_t) mod->module_core_rx;
-+ else if (in_core_rw(mod, val))
-+ val -= (uint64_t) mod->module_core_rw;
- break;
-
- case RV_LTV:
-@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
- * addresses have been selected...
- */
- uint64_t gp;
-- if (mod->core_size > MAX_LTOFF)
-+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
- /*
- * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
- * at the end of the module.
- */
-- gp = mod->core_size - MAX_LTOFF / 2;
-+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
- else
-- gp = mod->core_size / 2;
-- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
-+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
-+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
- mod->arch.gp = gp;
- DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
- }
-diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
-index 77597e5..189dd62f 100644
---- a/arch/ia64/kernel/palinfo.c
-+++ b/arch/ia64/kernel/palinfo.c
-@@ -977,7 +977,7 @@ create_palinfo_proc_entries(unsigned int cpu)
- struct proc_dir_entry **pdir;
- struct proc_dir_entry *cpu_dir;
- int j;
-- char cpustr[sizeof(CPUSTR)];
-+ char cpustr[3+4+1];
-
-
- /*
-@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata palinfo_cpu_notifier =
-+static struct notifier_block palinfo_cpu_notifier =
- {
- .notifier_call = palinfo_cpu_callback,
- .priority = 0,
-diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
-index 89accc6..236d389 100644
---- a/arch/ia64/kernel/perfmon.c
-+++ b/arch/ia64/kernel/perfmon.c
-@@ -632,6 +632,7 @@ static struct file_system_type pfm_fs_type = {
- .mount = pfmfs_mount,
- .kill_sb = kill_anon_super,
- };
-+MODULE_ALIAS_FS("pfmfs");
-
- DEFINE_PER_CPU(unsigned long, pfm_syst_info);
- DEFINE_PER_CPU(struct task_struct *, pmu_owner);
-@@ -2370,7 +2371,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
- */
- insert_vm_struct(mm, vma);
-
-- mm->total_vm += size >> PAGE_SHIFT;
- vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
- vma_pages(vma));
- up_write(&task->mm->mmap_sem);
-diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
-index 79802e5..1a89ec5 100644
---- a/arch/ia64/kernel/salinfo.c
-+++ b/arch/ia64/kernel/salinfo.c
-@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
- return NOTIFY_OK;
- }
-
--static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
-+static struct notifier_block salinfo_cpu_notifier =
- {
- .notifier_call = salinfo_cpu_callback,
- .priority = 0,
-diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index 609d500..254a3d7 100644
---- a/arch/ia64/kernel/sys_ia64.c
-+++ b/arch/ia64/kernel/sys_ia64.c
-@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- unsigned long start_addr, align_mask = PAGE_SIZE - 1;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (len > RGN_MAP_LIMIT)
- return -ENOMEM;
-@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- if (REGION_NUMBER(addr) == RGN_HPAGE)
- addr = 0;
- #endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ addr = mm->free_area_cache;
-+ else
-+#endif
-+
- if (!addr)
- addr = mm->free_area_cache;
-
-@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
-- if (start_addr != TASK_UNMAPPED_BASE) {
-+ if (start_addr != mm->mmap_base) {
- /* Start a new search --- just in case we missed some holes. */
-- addr = TASK_UNMAPPED_BASE;
-+ addr = mm->mmap_base;
- goto full_search;
- }
- return -ENOMEM;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* Remember the address where we stopped this search: */
- mm->free_area_cache = addr + len;
- return addr;
-diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
-index 9be1f11..f2eef30 100644
---- a/arch/ia64/kernel/topology.c
-+++ b/arch/ia64/kernel/topology.c
-@@ -444,7 +444,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata cache_cpu_notifier =
-+static struct notifier_block cache_cpu_notifier =
- {
- .notifier_call = cache_cpu_callback
- };
-diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
-index 53c0ba0..2accdde 100644
---- a/arch/ia64/kernel/vmlinux.lds.S
-+++ b/arch/ia64/kernel/vmlinux.lds.S
-@@ -199,7 +199,7 @@ SECTIONS {
- /* Per-cpu data: */
- . = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
-- __phys_per_cpu_start = __per_cpu_load;
-+ __phys_per_cpu_start = per_cpu_load;
- /*
- * ensure percpu data fits
- * into percpu page size
-diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
-index 1e362cd..3ad6444 100644
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
- return pte_present(pte);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- void __kprobes
- ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
- {
-@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
- mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
- | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
-
-- if ((vma->vm_flags & mask) != mask)
-+ if ((vma->vm_flags & mask) != mask) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
-+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
-+ goto bad_area;
-+
-+ up_read(&mm->mmap_sem);
-+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- goto bad_area;
-
-+ }
-+
- /*
- * If for any reason at all we couldn't handle the fault, make
- * sure we exit gracefully rather than endlessly redo the
-diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 5ca674b..0d1395a 100644
---- a/arch/ia64/mm/hugetlbpage.c
-+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
- unsigned long pgoff, unsigned long flags)
- {
- struct vm_area_struct *vmm;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
-
- if (len > RGN_MAP_LIMIT)
- return -ENOMEM;
-@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
- return -ENOMEM;
-- if (!vmm || (addr + len) <= vmm->vm_start)
-+ if (check_heap_stack_gap(vmm, &addr, len, offset))
- return addr;
- addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
- }
-diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index 00cb0e2..2ad8024 100644
---- a/arch/ia64/mm/init.c
-+++ b/arch/ia64/mm/init.c
-@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
- vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
- vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
-+ vma->vm_flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
-+ vma->vm_flags &= ~VM_MAYEXEC;
-+#endif
-+
-+ }
-+#endif
-+
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(&current->mm->mmap_sem);
- if (insert_vm_struct(current->mm, vma)) {
-diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
-index 40b3ee98..8c2c112 100644
---- a/arch/m32r/include/asm/cache.h
-+++ b/arch/m32r/include/asm/cache.h
-@@ -1,8 +1,10 @@
- #ifndef _ASM_M32R_CACHE_H
- #define _ASM_M32R_CACHE_H
-
-+#include <linux/const.h>
-+
- /* L1 cache line size */
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_M32R_CACHE_H */
-diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
-index 82abd15..d95ae5d 100644
---- a/arch/m32r/lib/usercopy.c
-+++ b/arch/m32r/lib/usercopy.c
-@@ -14,6 +14,9 @@
- unsigned long
- __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to,from,n);
-@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
- unsigned long
- __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to,from,n);
-diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
-index 0395c51..5f26031 100644
---- a/arch/m68k/include/asm/cache.h
-+++ b/arch/m68k/include/asm/cache.h
-@@ -4,9 +4,11 @@
- #ifndef __ARCH_M68K_CACHE_H
- #define __ARCH_M68K_CACHE_H
-
-+#include <linux/const.h>
-+
- /* bytes per L1 cache line */
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-
-diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
-index 4efe96a..60e8699 100644
---- a/arch/microblaze/include/asm/cache.h
-+++ b/arch/microblaze/include/asm/cache.h
-@@ -13,11 +13,12 @@
- #ifndef _ASM_MICROBLAZE_CACHE_H
- #define _ASM_MICROBLAZE_CACHE_H
-
-+#include <linux/const.h>
- #include <asm/registers.h>
-
- #define L1_CACHE_SHIFT 5
- /* word-granular cache in microblaze */
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index d46f1da..d72dc10 100644
---- a/arch/mips/Kconfig
-+++ b/arch/mips/Kconfig
-@@ -2254,6 +2254,7 @@ source "kernel/Kconfig.preempt"
- config KEXEC
- bool "Kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
-index 1d93f81..67794d0 100644
---- a/arch/mips/include/asm/atomic.h
-+++ b/arch/mips/include/asm/atomic.h
-@@ -21,6 +21,10 @@
- #include <asm/war.h>
- #include <asm/system.h>
-
-+#ifdef CONFIG_GENERIC_ATOMIC64
-+#include <asm-generic/atomic64.h>
-+#endif
-+
- #define ATOMIC_INIT(i) { (i) }
-
- /*
-@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
- */
- #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* CONFIG_64BIT */
-
- /*
-diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
-index b4db69f..8f3b093 100644
---- a/arch/mips/include/asm/cache.h
-+++ b/arch/mips/include/asm/cache.h
-@@ -9,10 +9,11 @@
- #ifndef _ASM_CACHE_H
- #define _ASM_CACHE_H
-
-+#include <linux/const.h>
- #include <kmalloc.h>
-
- #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index 455c0ac..ad65fbe 100644
---- a/arch/mips/include/asm/elf.h
-+++ b/arch/mips/include/asm/elf.h
-@@ -372,13 +372,16 @@ extern const char *__elf_platform;
- #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
- #endif
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- struct linux_binprm;
- extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp);
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- #endif /* _ASM_ELF_H */
-diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
-index 9e8ef59..1139d6b 100644
---- a/arch/mips/include/asm/hw_irq.h
-+++ b/arch/mips/include/asm/hw_irq.h
-@@ -10,7 +10,7 @@
-
- #include <linux/atomic.h>
-
--extern atomic_t irq_err_count;
-+extern atomic_unchecked_t irq_err_count;
-
- /*
- * interrupt-retrigger: NOP for now. This may not be appropriate for all
-diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
-index 94fde8d..d5825cf 100644
---- a/arch/mips/include/asm/local.h
-+++ b/arch/mips/include/asm/local.h
-@@ -12,15 +12,25 @@ typedef struct
- atomic_long_t a;
- } local_t;
-
-+typedef struct {
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l, i) atomic_long_set(&(l)->a, (i))
-+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
-
- #define local_add(i, l) atomic_long_add((i), (&(l)->a))
-+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
- #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
-+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
- #define local_inc(l) atomic_long_inc(&(l)->a)
-+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
- #define local_dec(l) atomic_long_dec(&(l)->a)
-+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
-
- /*
- * Same as above, but return the result value
-@@ -69,6 +79,7 @@ static __inline__ long local_add_return(long i, local_t * l)
-
- return result;
- }
-+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
-
- static __inline__ long local_sub_return(long i, local_t * l)
- {
-@@ -114,9 +125,12 @@ static __inline__ long local_sub_return(long i, local_t * l)
-
- return result;
- }
-+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
-
- #define local_cmpxchg(l, o, n) \
- ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
-+#define local_cmpxchg_unchecked(l, o, n) \
-+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
- #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
-
- /**
-diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
-index e59cd1a..8e329d6 100644
---- a/arch/mips/include/asm/page.h
-+++ b/arch/mips/include/asm/page.h
-@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
- #ifdef CONFIG_CPU_MIPS32
- typedef struct { unsigned long pte_low, pte_high; } pte_t;
- #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
- #else
- typedef struct { unsigned long long pte; } pte_t;
- #define pte_val(x) ((x).pte)
-diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
-index 881d18b4..cea38bc 100644
---- a/arch/mips/include/asm/pgalloc.h
-+++ b/arch/mips/include/asm/pgalloc.h
-@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- {
- set_pud(pud, __pud((unsigned long)pmd));
- }
-+
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
- #endif
-
- /*
-diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index b2202a6..6780030 100644
---- a/arch/mips/include/asm/pgtable.h
-+++ b/arch/mips/include/asm/pgtable.h
-@@ -18,6 +18,9 @@
- #include <asm/io.h>
- #include <asm/pgtable-bits.h>
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- struct mm_struct;
- struct vm_area_struct;
-
-diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
-index 6018c80..7c37203 100644
---- a/arch/mips/include/asm/system.h
-+++ b/arch/mips/include/asm/system.h
-@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
- */
- #define __ARCH_WANT_UNLOCKED_CTXSW
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- #endif /* _ASM_SYSTEM_H */
-diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index 35d1b47..b16efed 100644
---- a/arch/mips/include/asm/thread_info.h
-+++ b/arch/mips/include/asm/thread_info.h
-@@ -114,6 +114,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
- #define TIF_SECCOMP 4 /* secure computing */
- #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
- #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
-+/* li takes a 32bit immediate */
-+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
- #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
- #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-@@ -148,17 +150,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
- #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
- #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
- #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
-
--#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
-+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
-
- /* work to do in syscall_trace_leave() */
--#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
-+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
-
- /* work to do on interrupt/exception return */
- #define _TIF_WORK_MASK (0x0000ffef & \
- ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
- /* work to do on any return to u-space */
--#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
-+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
-
- #endif /* __KERNEL__ */
-
-diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
-index 653a412..f97cc92 100644
---- a/arch/mips/include/asm/uaccess.h
-+++ b/arch/mips/include/asm/uaccess.h
-@@ -119,6 +119,7 @@ extern u64 __ua_limit;
- __ok == 0; \
- })
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- likely(__access_ok((addr), (size), __access_mask))
-
-diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
-index 9fdd8bc..4bd7f1a 100644
---- a/arch/mips/kernel/binfmt_elfn32.c
-+++ b/arch/mips/kernel/binfmt_elfn32.c
-@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
- #include <linux/module.h>
- #include <linux/elfcore.h>
-diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
-index ff44823..97f8906 100644
---- a/arch/mips/kernel/binfmt_elfo32.c
-+++ b/arch/mips/kernel/binfmt_elfo32.c
-@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
-
- /*
-diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
-index 32b397b..3a5143a 100644
---- a/arch/mips/kernel/i8259.c
-+++ b/arch/mips/kernel/i8259.c
-@@ -205,7 +205,7 @@ spurious_8259A_irq:
- printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
-diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
-index 883fc6c..28c0acd 100644
---- a/arch/mips/kernel/irq-gt641xx.c
-+++ b/arch/mips/kernel/irq-gt641xx.c
-@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
- }
- }
-
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- }
-
- void __init gt641xx_irq_init(void)
-diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
-index 7f50318..20685b9 100644
---- a/arch/mips/kernel/irq.c
-+++ b/arch/mips/kernel/irq.c
-@@ -111,7 +111,10 @@ void __init init_IRQ(void)
- #endif
- }
-
-+
- #ifdef DEBUG_STACKOVERFLOW
-+extern void gr_handle_kernel_exploit(void);
-+
- static inline void check_stack_overflow(void)
- {
- unsigned long sp;
-@@ -127,6 +130,7 @@ static inline void check_stack_overflow(void)
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
-+ gr_handle_kernel_exploit();
- }
- }
- #else
-diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
-index bf128d7..bc244d6 100644
---- a/arch/mips/kernel/process.c
-+++ b/arch/mips/kernel/process.c
-@@ -479,15 +479,3 @@ unsigned long get_wchan(struct task_struct *task)
- out:
- return pc;
- }
--
--/*
-- * Don't forget that the stack pointer must be aligned on a 8 bytes
-- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
-- */
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
--
-- return sp & ALMASK;
--}
-diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
-index e56c692..c994648 100644
---- a/arch/mips/kernel/ptrace.c
-+++ b/arch/mips/kernel/ptrace.c
-@@ -530,6 +530,10 @@ static inline int audit_arch(void)
- return arch;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * Notification of system call entry/exit
- * - triggered by current->work.syscall_trace
-@@ -539,6 +543,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
- /* do the secure computing check first */
- secure_computing(regs->regs[2]);
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (!(current->ptrace & PT_PTRACED))
- goto out;
-
-diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
-index 07fc524..b9d7f28 100644
---- a/arch/mips/kernel/reset.c
-+++ b/arch/mips/kernel/reset.c
-@@ -13,6 +13,7 @@
- #include <linux/reboot.h>
-
- #include <asm/reboot.h>
-+#include <asm/bug.h>
-
- /*
- * Urgs ... Too many MIPS machines to handle this in a generic way.
-@@ -29,16 +30,19 @@ void machine_restart(char *command)
- {
- if (_machine_restart)
- _machine_restart(command);
-+ BUG();
- }
-
- void machine_halt(void)
- {
- if (_machine_halt)
- _machine_halt();
-+ BUG();
- }
-
- void machine_power_off(void)
- {
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
-diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index b8314cfe..5bfa31a 100644
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -28,6 +28,23 @@
- #include <asm/highmem.h> /* For VMALLOC_END */
- #include <linux/kdebug.h>
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
-diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index 302d779..b8b4e97 100644
---- a/arch/mips/mm/mmap.c
-+++ b/arch/mips/mm/mmap.c
-@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- struct vm_area_struct *vma;
- unsigned long addr = addr0;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (unlikely(len > TASK_SIZE))
- return -ENOMEM;
-@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- do_color_align = 1;
-
- /* requesting a specific address */
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
-@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
-- if (!vma || addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- addr = vma->vm_end;
- if (do_color_align)
-@@ -144,10 +149,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
-
- /* make sure it can fit in the remaining address space */
- if (likely(addr > len)) {
-- vma = find_vma(mm, addr - len);
-- if (!vma || addr <= vma->vm_start) {
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* cache the address as a hint for next time */
-- return mm->free_area_cache = addr - len;
-+ return (mm->free_area_cache = addr);
- }
- }
-
-@@ -155,17 +161,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- goto bottomup;
-
- addr = mm->mmap_base - len;
-- if (do_color_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-
- do {
-+ if (do_color_align)
-+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (likely(!vma || addr + len <= vma->vm_start)) {
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* cache the address as a hint for next time */
- return mm->free_area_cache = addr;
- }
-@@ -175,10 +181,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start - len;
-- if (do_color_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-- } while (likely(len < vma->vm_start));
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- bottomup:
- /*
-@@ -223,6 +227,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- {
- unsigned long random_factor = 0UL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
-@@ -234,38 +242,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
-
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base(random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
- }
--
--static inline unsigned long brk_rnd(void)
--{
-- unsigned long rnd = get_random_int();
--
-- rnd = rnd << PAGE_SHIFT;
-- /* 8MB for 32bit, 256MB for 64bit */
-- if (TASK_IS_32BIT_ADDR)
-- rnd = rnd & 0x7ffffful;
-- else
-- rnd = rnd & 0xffffffful;
--
-- return rnd;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long base = mm->brk;
-- unsigned long ret;
--
-- ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (ret < mm->brk)
-- return mm->brk;
--
-- return ret;
--}
-diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
-index ed1c542..88552ac 100644
---- a/arch/mips/pci/pci-octeon.c
-+++ b/arch/mips/pci/pci-octeon.c
-@@ -335,8 +335,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
-
-
- static struct pci_ops octeon_pci_ops = {
-- octeon_read_config,
-- octeon_write_config,
-+ .read = octeon_read_config,
-+ .write = octeon_write_config,
- };
-
- static struct resource octeon_pci_mem_resource = {
-diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
-index 0583c463..c07a38e 100644
---- a/arch/mips/pci/pcie-octeon.c
-+++ b/arch/mips/pci/pcie-octeon.c
-@@ -1238,8 +1238,8 @@ static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
- }
-
- static struct pci_ops octeon_pcie0_ops = {
-- octeon_pcie0_read_config,
-- octeon_pcie0_write_config,
-+ .read = octeon_pcie0_read_config,
-+ .write = octeon_pcie0_write_config,
- };
-
- static struct resource octeon_pcie0_mem_resource = {
-@@ -1259,8 +1259,8 @@ static struct pci_controller octeon_pcie0_controller = {
- };
-
- static struct pci_ops octeon_pcie1_ops = {
-- octeon_pcie1_read_config,
-- octeon_pcie1_write_config,
-+ .read = octeon_pcie1_read_config,
-+ .write = octeon_pcie1_write_config,
- };
-
- static struct resource octeon_pcie1_mem_resource = {
-diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
-index 3ab5b5d..67145ff 100644
---- a/arch/mips/sni/rm200.c
-+++ b/arch/mips/sni/rm200.c
-@@ -270,7 +270,7 @@ spurious_8259A_irq:
- "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
-diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
-index a39ef32..98c4860 100644
---- a/arch/mips/vr41xx/common/icu.c
-+++ b/arch/mips/vr41xx/common/icu.c
-@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
-
- printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
-
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
-
- return -1;
- }
-diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
-index fad2bef..6499c27 100644
---- a/arch/mips/vr41xx/common/irq.c
-+++ b/arch/mips/vr41xx/common/irq.c
-@@ -65,7 +65,7 @@ static void irq_dispatch(unsigned int irq)
- irq_cascade_t *cascade;
-
- if (irq >= NR_IRQS) {
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- return;
- }
-
-@@ -85,7 +85,7 @@ static void irq_dispatch(unsigned int irq)
- ret = cascade->get_irq(irq);
- irq = ret;
- if (ret < 0)
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- else
- irq_dispatch(irq);
- if (!irqd_irq_disabled(idata) && chip->irq_unmask)
-diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
-index 967d144..db12197 100644
---- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
-+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
-@@ -11,12 +11,14 @@
- #ifndef _ASM_PROC_CACHE_H
- #define _ASM_PROC_CACHE_H
-
-+#include <linux/const.h>
-+
- /* L1 cache */
-
- #define L1_CACHE_NWAYS 4 /* number of ways in caches */
- #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
--#define L1_CACHE_BYTES 16 /* bytes per entry */
- #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
- #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
-
- #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
-diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-index bcb5df2..84fabd2 100644
---- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-@@ -16,13 +16,15 @@
- #ifndef _ASM_PROC_CACHE_H
- #define _ASM_PROC_CACHE_H
-
-+#include <linux/const.h>
-+
- /*
- * L1 cache
- */
- #define L1_CACHE_NWAYS 4 /* number of ways in caches */
- #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
--#define L1_CACHE_BYTES 32 /* bytes per entry */
- #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
- #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
-
- #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
-diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
-index 4ce7a01..449202a 100644
---- a/arch/openrisc/include/asm/cache.h
-+++ b/arch/openrisc/include/asm/cache.h
-@@ -19,11 +19,13 @@
- #ifndef __ASM_OPENRISC_CACHE_H
- #define __ASM_OPENRISC_CACHE_H
-
-+#include <linux/const.h>
-+
- /* FIXME: How can we replace these with values from the CPU...
- * they shouldn't be hard-coded!
- */
-
--#define L1_CACHE_BYTES 16
- #define L1_CACHE_SHIFT 4
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* __ASM_OPENRISC_CACHE_H */
-diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
-index c4b779b..775b66b 100644
---- a/arch/parisc/include/asm/atomic.h
-+++ b/arch/parisc/include/asm/atomic.h
-@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* !CONFIG_64BIT */
-
-
-diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
-index 47f11c7..3420df2 100644
---- a/arch/parisc/include/asm/cache.h
-+++ b/arch/parisc/include/asm/cache.h
-@@ -5,6 +5,7 @@
- #ifndef __ARCH_PARISC_CACHE_H
- #define __ARCH_PARISC_CACHE_H
-
-+#include <linux/const.h>
-
- /*
- * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
-@@ -15,13 +16,13 @@
- * just ruin performance.
- */
- #ifdef CONFIG_PA20
--#define L1_CACHE_BYTES 64
- #define L1_CACHE_SHIFT 6
- #else
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
- #endif
-
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-+
- #ifndef __ASSEMBLY__
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
-index 19f6cb1..6c78cf2 100644
---- a/arch/parisc/include/asm/elf.h
-+++ b/arch/parisc/include/asm/elf.h
-@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN 16
-+#define PAX_DELTA_STACK_LEN 16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
- but it's not easy, and we've already done it here. */
-diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
-index fc987a1..6e068ef 100644
---- a/arch/parisc/include/asm/pgalloc.h
-+++ b/arch/parisc/include/asm/pgalloc.h
-@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
- (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
- }
-
-+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
-+{
-+ pgd_populate(mm, pgd, pmd);
-+}
-+
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
- {
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
- #define pmd_free(mm, x) do { } while (0)
- #define pgd_populate(mm, pmd, pte) BUG()
-+#define pgd_populate_kernel(mm, pmd, pte) BUG()
-
- #endif
-
-diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
-index 9d35a3e..af9b6d3 100644
---- a/arch/parisc/include/asm/pgtable.h
-+++ b/arch/parisc/include/asm/pgtable.h
-@@ -16,6 +16,8 @@
- #include <asm/processor.h>
- #include <asm/cache.h>
-
-+extern spinlock_t pa_dbit_lock;
-+
- /*
- * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
- * memory. For the return value to be meaningful, ADDR must be >=
-@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
-
- #define set_pte_at(mm, addr, ptep, pteval) \
- do { \
-+ unsigned long flags; \
-+ spin_lock_irqsave(&pa_dbit_lock, flags); \
- set_pte(ptep, pteval); \
- purge_tlb_entries(mm, addr); \
-+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
- } while (0)
-
- #endif /* !__ASSEMBLY__ */
-@@ -216,6 +221,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
- #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
- #define PAGE_COPY PAGE_EXECREAD
- #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
-+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
- #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
- #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
-@@ -433,48 +449,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
-
- static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
- {
--#ifdef CONFIG_SMP
-+ pte_t pte;
-+ unsigned long flags;
-+
- if (!pte_young(*ptep))
- return 0;
-- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
--#else
-- pte_t pte = *ptep;
-- if (!pte_young(pte))
-+
-+ spin_lock_irqsave(&pa_dbit_lock, flags);
-+ pte = *ptep;
-+ if (!pte_young(pte)) {
-+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
- return 0;
-- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
-+ }
-+ set_pte(ptep, pte_mkold(pte));
-+ purge_tlb_entries(vma->vm_mm, addr);
-+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
- return 1;
--#endif
- }
-
--extern spinlock_t pa_dbit_lock;
--
- struct mm_struct;
- static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
- pte_t old_pte;
-+ unsigned long flags;
-
-- spin_lock(&pa_dbit_lock);
-+ spin_lock_irqsave(&pa_dbit_lock, flags);
- old_pte = *ptep;
- pte_clear(mm,addr,ptep);
-- spin_unlock(&pa_dbit_lock);
-+ purge_tlb_entries(mm, addr);
-+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
-
- return old_pte;
- }
-
- static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
--#ifdef CONFIG_SMP
-- unsigned long new, old;
--
-- do {
-- old = pte_val(*ptep);
-- new = pte_val(pte_wrprotect(__pte (old)));
-- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
-+ unsigned long flags;
-+ spin_lock_irqsave(&pa_dbit_lock, flags);
-+ set_pte(ptep, pte_wrprotect(*ptep));
- purge_tlb_entries(mm, addr);
--#else
-- pte_t old_pte = *ptep;
-- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
--#endif
-+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
- }
-
- #define pte_same(A,B) (pte_val(A) == pte_val(B))
-diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
-index ff4cf9d..c0564bb 100644
---- a/arch/parisc/include/asm/uaccess.h
-+++ b/arch/parisc/include/asm/uaccess.h
-@@ -253,10 +253,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
- int ret = -EFAULT;
-
-- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
-+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
- ret = __copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
-index 5241698..91dcb12 100644
---- a/arch/parisc/kernel/cache.c
-+++ b/arch/parisc/kernel/cache.c
-@@ -428,14 +428,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
- /* Note: purge_tlb_entries can be called at startup with
- no context. */
-
-- /* Disable preemption while we play with %sr1. */
-- preempt_disable();
-+ purge_tlb_start(flags);
- mtsp(mm->context, 1);
-- purge_tlb_start(flags);
- pdtlb(addr);
- pitlb(addr);
- purge_tlb_end(flags);
-- preempt_enable();
- }
- EXPORT_SYMBOL(purge_tlb_entries);
-
-diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
-index 5709c5e..14285ca 100644
---- a/arch/parisc/kernel/drivers.c
-+++ b/arch/parisc/kernel/drivers.c
-@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
- static void setup_bus_id(struct parisc_device *padev)
- {
- struct hardware_path path;
-- char name[20];
-+ char name[28];
- char *output = name;
- int i;
-
-diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
-index 5e34ccf..672bc9c 100644
---- a/arch/parisc/kernel/module.c
-+++ b/arch/parisc/kernel/module.c
-@@ -98,16 +98,38 @@
-
- /* three functions to determine where in the module core
- * or init pieces the location is */
-+static inline int in_init_rx(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_init_rx &&
-+ loc < (me->module_init_rx + me->init_size_rx));
-+}
-+
-+static inline int in_init_rw(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_init_rw &&
-+ loc < (me->module_init_rw + me->init_size_rw));
-+}
-+
- static inline int in_init(struct module *me, void *loc)
- {
-- return (loc >= me->module_init &&
-- loc <= (me->module_init + me->init_size));
-+ return in_init_rx(me, loc) || in_init_rw(me, loc);
-+}
-+
-+static inline int in_core_rx(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_core_rx &&
-+ loc < (me->module_core_rx + me->core_size_rx));
-+}
-+
-+static inline int in_core_rw(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_core_rw &&
-+ loc < (me->module_core_rw + me->core_size_rw));
- }
-
- static inline int in_core(struct module *me, void *loc)
- {
-- return (loc >= me->module_core &&
-- loc <= (me->module_core + me->core_size));
-+ return in_core_rx(me, loc) || in_core_rw(me, loc);
- }
-
- static inline int in_local(struct module *me, void *loc)
-@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
- }
-
- /* align things a bit */
-- me->core_size = ALIGN(me->core_size, 16);
-- me->arch.got_offset = me->core_size;
-- me->core_size += gots * sizeof(struct got_entry);
-+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+ me->arch.got_offset = me->core_size_rw;
-+ me->core_size_rw += gots * sizeof(struct got_entry);
-
-- me->core_size = ALIGN(me->core_size, 16);
-- me->arch.fdesc_offset = me->core_size;
-- me->core_size += fdescs * sizeof(Elf_Fdesc);
-+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+ me->arch.fdesc_offset = me->core_size_rw;
-+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
-
- me->arch.got_max = gots;
- me->arch.fdesc_max = fdescs;
-@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
-
- BUG_ON(value == 0);
-
-- got = me->module_core + me->arch.got_offset;
-+ got = me->module_core_rw + me->arch.got_offset;
- for (i = 0; got[i].addr; i++)
- if (got[i].addr == value)
- goto out;
-@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
- #ifdef CONFIG_64BIT
- static Elf_Addr get_fdesc(struct module *me, unsigned long value)
- {
-- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
-+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
-
- if (!value) {
- printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
-@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
-
- /* Create new one */
- fdesc->addr = value;
-- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
- return (Elf_Addr)fdesc;
- }
- #endif /* CONFIG_64BIT */
-@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
-
- table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
- end = table + sechdrs[me->arch.unwind_section].sh_size;
-- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
-
- DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
- me->arch.unwind_section, table, end, gp);
-diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
-index a3328c2..3b812eb 100644
---- a/arch/parisc/kernel/setup.c
-+++ b/arch/parisc/kernel/setup.c
-@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
- /* called from hpux boot loader */
- boot_command_line[0] = '\0';
- } else {
-- strcpy(boot_command_line, (char *)__va(boot_args[1]));
-+ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
-+ COMMAND_LINE_SIZE);
-
- #ifdef CONFIG_BLK_DEV_INITRD
- if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
-diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index 7ea75d1..5075226 100644
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -33,9 +33,11 @@
- #include <linux/utsname.h>
- #include <linux/personality.h>
-
--static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
-+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
-+ unsigned long flags)
- {
- struct vm_area_struct *vma;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- addr = PAGE_ALIGN(addr);
-
-@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
-- if (!vma || addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- addr = vma->vm_end;
- }
-@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
- return offset & 0x3FF000;
- }
-
--static unsigned long get_shared_area(struct address_space *mapping,
-- unsigned long addr, unsigned long len, unsigned long pgoff)
-+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
-+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
- {
- struct vm_area_struct *vma;
- int offset = mapping ? get_offset(mapping) : 0;
-+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
-
-@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
-- if (!vma || addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, rand_offset))
- return addr;
- addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
- if (addr < vma->vm_end) /* handle wraparound */
-@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- if (flags & MAP_FIXED)
- return addr;
- if (!addr)
-- addr = TASK_UNMAPPED_BASE;
-+ addr = current->mm->mmap_base;
-
- if (filp) {
-- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
-+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
- } else if(flags & MAP_SHARED) {
-- addr = get_shared_area(NULL, addr, len, pgoff);
-+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
- } else {
-- addr = get_unshared_area(addr, len);
-+ addr = get_unshared_area(filp, addr, len, flags);
- }
- return addr;
- }
-diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index cd8b02f..543008b 100644
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm,regs->iaoq[0]);
-- if (vma && (regs->iaoq[0] >= vma->vm_start)
-- && (vma->vm_flags & VM_EXEC)) {
--
-+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
- fault_address = regs->iaoq[0];
- fault_space = regs->iasq[0];
-
-diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index a9b765a..e78ae8e 100644
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -15,6 +15,7 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/unistd.h>
-
- #include <asm/uaccess.h>
- #include <asm/traps.h>
-@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
- static unsigned long
- parisc_acctyp(unsigned long code, unsigned int inst)
- {
-- if (code == 6 || code == 16)
-+ if (code == 6 || code == 7 || code == 16)
- return VM_EXEC;
-
- switch (inst & 0xf0000000) {
-@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
- }
- #endif
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when rt_sigreturn trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: unpatched PLT emulation */
-+ unsigned int bl, depwi;
-+
-+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
-+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
-+
-+ if (err)
-+ break;
-+
-+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
-+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
-+
-+ err = get_user(ldw, (unsigned int *)addr);
-+ err |= get_user(bv, (unsigned int *)(addr+4));
-+ err |= get_user(ldw2, (unsigned int *)(addr+8));
-+
-+ if (err)
-+ break;
-+
-+ if (ldw == 0x0E801096U &&
-+ bv == 0xEAC0C000U &&
-+ ldw2 == 0x0E881095U)
-+ {
-+ unsigned int resolver, map;
-+
-+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
-+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
-+ if (err)
-+ break;
-+
-+ regs->gr[20] = instruction_pointer(regs)+8;
-+ regs->gr[21] = map;
-+ regs->gr[22] = resolver;
-+ regs->iaoq[0] = resolver | 3UL;
-+ regs->iaoq[1] = regs->iaoq[0] + 4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+
-+#ifndef CONFIG_PAX_EMUSIGRT
-+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+ return 1;
-+#endif
-+
-+ do { /* PaX: rt_sigreturn emulation */
-+ unsigned int ldi1, ldi2, bel, nop;
-+
-+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
-+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
-+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
-+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
-+ ldi2 == 0x3414015AU &&
-+ bel == 0xE4008200U &&
-+ nop == 0x08000240U)
-+ {
-+ regs->gr[25] = (ldi1 & 2) >> 1;
-+ regs->gr[20] = __NR_rt_sigreturn;
-+ regs->gr[31] = regs->iaoq[1] + 16;
-+ regs->sr[0] = regs->iasq[1];
-+ regs->iaoq[0] = 0x100UL;
-+ regs->iaoq[1] = regs->iaoq[0] + 4;
-+ regs->iasq[0] = regs->sr[2];
-+ regs->iasq[1] = regs->sr[2];
-+ return 2;
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- int fixup_exception(struct pt_regs *regs)
- {
- const struct exception_table_entry *fix;
-@@ -192,8 +303,33 @@ good_area:
-
- acc_type = parisc_acctyp(code,regs->iir);
-
-- if ((vma->vm_flags & acc_type) != acc_type)
-+ if ((vma->vm_flags & acc_type) != acc_type) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
-+ (address & ~3UL) == instruction_pointer(regs))
-+ {
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 3:
-+ return;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ case 2:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- goto bad_area;
-+ }
-
- /*
- * If for any reason at all we couldn't handle the fault, make
-diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index bec952d..f6dbe5d 100644
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -347,6 +347,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
- config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
-index 02e41b5..ec6e26c 100644
---- a/arch/powerpc/include/asm/atomic.h
-+++ b/arch/powerpc/include/asm/atomic.h
-@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* __powerpc64__ */
-
- #endif /* __KERNEL__ */
-diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
-index 4b50941..5605819 100644
---- a/arch/powerpc/include/asm/cache.h
-+++ b/arch/powerpc/include/asm/cache.h
-@@ -3,6 +3,7 @@
-
- #ifdef __KERNEL__
-
-+#include <linux/const.h>
-
- /* bytes per L1 cache line */
- #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-@@ -22,7 +23,7 @@
- #define L1_CACHE_SHIFT 7
- #endif
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
-index 3bf9cca..e7457d0 100644
---- a/arch/powerpc/include/asm/elf.h
-+++ b/arch/powerpc/include/asm/elf.h
-@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--extern unsigned long randomize_et_dyn(unsigned long base);
--#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
-+#define ELF_ET_DYN_BASE (0x20000000)
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
-+
-+#ifdef __powerpc64__
-+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
-+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
-+#else
-+#define PAX_DELTA_MMAP_LEN 15
-+#define PAX_DELTA_STACK_LEN 15
-+#endif
-+#endif
-
- /*
- * Our registers are always unsigned longs, whether we're a 32 bit
-@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- (0x7ff >> (PAGE_SHIFT - 12)) : \
- (0x3ffff >> (PAGE_SHIFT - 12)))
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- #endif /* __KERNEL__ */
-
- /*
-diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
-index bca8fdc..61e9580 100644
---- a/arch/powerpc/include/asm/kmap_types.h
-+++ b/arch/powerpc/include/asm/kmap_types.h
-@@ -27,6 +27,7 @@ enum km_type {
- KM_PPC_SYNC_PAGE,
- KM_PPC_SYNC_ICACHE,
- KM_KDB,
-+ KM_CLEARPAGE,
- KM_TYPE_NR
- };
-
-diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
-index b8da913..60b608a 100644
---- a/arch/powerpc/include/asm/local.h
-+++ b/arch/powerpc/include/asm/local.h
-@@ -9,15 +9,26 @@ typedef struct
- atomic_long_t a;
- } local_t;
-
-+typedef struct
-+{
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l,i) atomic_long_set(&(l)->a, (i))
-+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
-
- #define local_add(i,l) atomic_long_add((i),(&(l)->a))
-+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
- #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
-+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
- #define local_inc(l) atomic_long_inc(&(l)->a)
-+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
- #define local_dec(l) atomic_long_dec(&(l)->a)
-+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
-
- static __inline__ long local_add_return(long a, local_t *l)
- {
-@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
-
- return t;
- }
-+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
-
- #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
-
-@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
-
- return t;
- }
-+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
-
- static __inline__ long local_inc_return(local_t *l)
- {
-@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
-
- #define local_cmpxchg(l, o, n) \
- (cmpxchg_local(&((l)->a.counter), (o), (n)))
-+#define local_cmpxchg_unchecked(l, o, n) \
-+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
- #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
-
- /**
-diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
-index d4a7f64..451de1c 100644
---- a/arch/powerpc/include/asm/mman.h
-+++ b/arch/powerpc/include/asm/mman.h
-@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
- }
- #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
-
--static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
- {
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
- }
-diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
-index 5b0bde2..9f83e1a 100644
---- a/arch/powerpc/include/asm/page.h
-+++ b/arch/powerpc/include/asm/page.h
-@@ -151,8 +151,9 @@ extern phys_addr_t kernstart_addr;
- * and needs to be executable. This means the whole heap ends
- * up being executable.
- */
--#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
-- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_DATA_DEFAULT_FLAGS32 \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
- #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-@@ -180,6 +181,9 @@ extern phys_addr_t kernstart_addr;
- #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
- #endif
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- /*
- * Use the top bit of the higher-level page table entries to indicate whether
- * the entries we point to contain hugepages. This works because we know that
-diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
-index fb40ede..d3ce956 100644
---- a/arch/powerpc/include/asm/page_64.h
-+++ b/arch/powerpc/include/asm/page_64.h
-@@ -144,15 +144,18 @@ do { \
- * stack by default, so in the absence of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
--#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
-- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_STACK_DEFAULT_FLAGS32 \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
- #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-+#ifndef CONFIG_PAX_PAGEEXEC
- #define VM_STACK_DEFAULT_FLAGS \
- (is_32bit_task() ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-+#endif
-
- #include <asm-generic/getorder.h>
-
-diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
-index 292725c..f87ae14 100644
---- a/arch/powerpc/include/asm/pgalloc-64.h
-+++ b/arch/powerpc/include/asm/pgalloc-64.h
-@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
- #ifndef CONFIG_PPC_64K_PAGES
-
- #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
-+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
-
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
-@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- pud_set(pud, (unsigned long)pmd);
- }
-
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
-+
- #define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
- #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
-@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- #else /* CONFIG_PPC_64K_PAGES */
-
- #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
-+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
-
- static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
-diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
-index 88b0bd9..e32bc67 100644
---- a/arch/powerpc/include/asm/pgtable.h
-+++ b/arch/powerpc/include/asm/pgtable.h
-@@ -2,6 +2,7 @@
- #define _ASM_POWERPC_PGTABLE_H
- #ifdef __KERNEL__
-
-+#include <linux/const.h>
- #ifndef __ASSEMBLY__
- #include <asm/processor.h> /* For TASK_SIZE */
- #include <asm/mmu.h>
-diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
-index 4aad413..85d86bf 100644
---- a/arch/powerpc/include/asm/pte-hash32.h
-+++ b/arch/powerpc/include/asm/pte-hash32.h
-@@ -21,6 +21,7 @@
- #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
- #define _PAGE_USER 0x004 /* usermode access allowed */
- #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-+#define _PAGE_EXEC _PAGE_GUARDED
- #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
- #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
- #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index 578e5a0..2ab6a8a 100644
---- a/arch/powerpc/include/asm/reg.h
-+++ b/arch/powerpc/include/asm/reg.h
-@@ -212,6 +212,7 @@
- #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
- #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
- #define DSISR_NOHPTE 0x40000000 /* no translation found */
-+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
- #define DSISR_PROTFAULT 0x08000000 /* protection fault */
- #define DSISR_ISSTORE 0x02000000 /* access was a store */
- #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
-index adba970..ef0d917 100644
---- a/arch/powerpc/include/asm/smp.h
-+++ b/arch/powerpc/include/asm/smp.h
-@@ -50,7 +50,7 @@ struct smp_ops_t {
- int (*cpu_disable)(void);
- void (*cpu_die)(unsigned int nr);
- int (*cpu_bootable)(unsigned int nr);
--};
-+} __no_const;
-
- extern void smp_send_debugger_break(void);
- extern void start_secondary_resume(void);
-diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
-index e30a13d..2b7d994 100644
---- a/arch/powerpc/include/asm/system.h
-+++ b/arch/powerpc/include/asm/system.h
-@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
- #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
- #endif
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- /* Used in very early kernel initialization. */
- extern unsigned long reloc_offset(void);
-diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
-index 836f231..39d0b94 100644
---- a/arch/powerpc/include/asm/thread_info.h
-+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -104,7 +104,6 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SINGLESTEP 8 /* singlestepping active */
--#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
- #define TIF_SECCOMP 10 /* secure computing */
- #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
- #define TIF_NOERROR 12 /* Force successful syscall return */
-@@ -112,6 +111,9 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_FREEZE 14 /* Freezing for suspend */
- #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
- #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
-+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
-+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
-
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -130,8 +132,11 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_FREEZE (1<<TIF_FREEZE)
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
-+
- #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
-+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
-+ _TIF_GRSEC_SETXID)
-
- #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
-diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
-index bd0fb84..a40ed3a 100644
---- a/arch/powerpc/include/asm/uaccess.h
-+++ b/arch/powerpc/include/asm/uaccess.h
-@@ -56,6 +56,7 @@
-
- #endif
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
- __access_ok((__force unsigned long)(addr), (size), get_fs()))
-@@ -327,52 +328,6 @@ do { \
- extern unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size);
-
--#ifndef __powerpc64__
--
--static inline unsigned long copy_from_user(void *to,
-- const void __user *from, unsigned long n)
--{
-- unsigned long over;
--
-- if (access_ok(VERIFY_READ, from, n))
-- return __copy_tofrom_user((__force void __user *)to, from, n);
-- if ((unsigned long)from < TASK_SIZE) {
-- over = (unsigned long)from + n - TASK_SIZE;
-- return __copy_tofrom_user((__force void __user *)to, from,
-- n - over) + over;
-- }
-- return n;
--}
--
--static inline unsigned long copy_to_user(void __user *to,
-- const void *from, unsigned long n)
--{
-- unsigned long over;
--
-- if (access_ok(VERIFY_WRITE, to, n))
-- return __copy_tofrom_user(to, (__force void __user *)from, n);
-- if ((unsigned long)to < TASK_SIZE) {
-- over = (unsigned long)to + n - TASK_SIZE;
-- return __copy_tofrom_user(to, (__force void __user *)from,
-- n - over) + over;
-- }
-- return n;
--}
--
--#else /* __powerpc64__ */
--
--#define __copy_in_user(to, from, size) \
-- __copy_tofrom_user((to), (from), (size))
--
--extern unsigned long copy_from_user(void *to, const void __user *from,
-- unsigned long n);
--extern unsigned long copy_to_user(void __user *to, const void *from,
-- unsigned long n);
--extern unsigned long copy_in_user(void __user *to, const void __user *from,
-- unsigned long n);
--
--#endif /* __powerpc64__ */
--
- static inline unsigned long __copy_from_user_inatomic(void *to,
- const void __user *from, unsigned long n)
- {
-@@ -396,6 +351,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
- if (ret == 0)
- return 0;
- }
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+
- return __copy_tofrom_user((__force void __user *)to, from, n);
- }
-
-@@ -422,6 +381,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
- if (ret == 0)
- return 0;
- }
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+
- return __copy_tofrom_user(to, (__force const void __user *)from, n);
- }
-
-@@ -439,6 +402,92 @@ static inline unsigned long __copy_to_user(void __user *to,
- return __copy_to_user_inatomic(to, from, size);
- }
-
-+#ifndef __powerpc64__
-+
-+static inline unsigned long __must_check copy_from_user(void *to,
-+ const void __user *from, unsigned long n)
-+{
-+ unsigned long over;
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (access_ok(VERIFY_READ, from, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+ return __copy_tofrom_user((__force void __user *)to, from, n);
-+ }
-+ if ((unsigned long)from < TASK_SIZE) {
-+ over = (unsigned long)from + n - TASK_SIZE;
-+ if (!__builtin_constant_p(n - over))
-+ check_object_size(to, n - over, false);
-+ return __copy_tofrom_user((__force void __user *)to, from,
-+ n - over) + over;
-+ }
-+ return n;
-+}
-+
-+static inline unsigned long __must_check copy_to_user(void __user *to,
-+ const void *from, unsigned long n)
-+{
-+ unsigned long over;
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (access_ok(VERIFY_WRITE, to, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+ return __copy_tofrom_user(to, (__force void __user *)from, n);
-+ }
-+ if ((unsigned long)to < TASK_SIZE) {
-+ over = (unsigned long)to + n - TASK_SIZE;
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n - over, true);
-+ return __copy_tofrom_user(to, (__force void __user *)from,
-+ n - over) + over;
-+ }
-+ return n;
-+}
-+
-+#else /* __powerpc64__ */
-+
-+#define __copy_in_user(to, from, size) \
-+ __copy_tofrom_user((to), (from), (size))
-+
-+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ if ((long)n < 0 || n > INT_MAX)
-+ return n;
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+
-+ if (likely(access_ok(VERIFY_READ, from, n)))
-+ n = __copy_from_user(to, from, n);
-+ else
-+ memset(to, 0, n);
-+ return n;
-+}
-+
-+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
-+{
-+ if ((long)n < 0 || n > INT_MAX)
-+ return n;
-+
-+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+ n = __copy_to_user(to, from, n);
-+ }
-+ return n;
-+}
-+
-+extern unsigned long copy_in_user(void __user *to, const void __user *from,
-+ unsigned long n);
-+
-+#endif /* __powerpc64__ */
-+
- extern unsigned long __clear_user(void __user *addr, unsigned long size);
-
- static inline unsigned long clear_user(void __user *addr, unsigned long size)
-diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index ce4f7f1..fed0f27 100644
---- a/arch/powerpc/kernel/Makefile
-+++ b/arch/powerpc/kernel/Makefile
-@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
- CFLAGS_btext.o += -fPIC
- endif
-
-+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+
- ifdef CONFIG_FUNCTION_TRACER
- # Do not trace early boot code
- CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
-@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
- CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
- endif
-
-+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+
- obj-y := cputable.o ptrace.o syscalls.o \
- irq.o align.o signal_32.o pmc.o vdso.o \
- init_task.o process.o systbl.o idle.o \
-diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
-index 429983c..7af363b 100644
---- a/arch/powerpc/kernel/exceptions-64e.S
-+++ b/arch/powerpc/kernel/exceptions-64e.S
-@@ -587,6 +587,7 @@ storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
-+ bl .save_nvgprs
- mr r4,r14
- mr r5,r15
- ld r14,PACA_EXGEN+EX_R14(r13)
-@@ -596,8 +597,7 @@ storage_fault_common:
- cmpdi r3,0
- bne- 1f
- b .ret_from_except_lite
--1: bl .save_nvgprs
-- mr r5,r3
-+1: mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl .bad_page_fault
-diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
-index 8c3baa0..4d8c6f1 100644
---- a/arch/powerpc/kernel/exceptions-64s.S
-+++ b/arch/powerpc/kernel/exceptions-64s.S
-@@ -1004,10 +1004,10 @@ handle_page_fault:
- 11: ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
-+ bl .save_nvgprs
- bl .do_page_fault
- cmpdi r3,0
- beq+ 13f
-- bl .save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
-diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 745c1e7..d231072 100644
---- a/arch/powerpc/kernel/irq.c
-+++ b/arch/powerpc/kernel/irq.c
-@@ -324,6 +324,8 @@ static inline void handle_one_irq(unsigned int irq)
- set_bits(irqtp->flags, &curtp->flags);
- }
-
-+extern void gr_handle_kernel_exploit(void);
-+
- static inline void check_stack_overflow(void)
- {
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
-@@ -336,6 +338,7 @@ static inline void check_stack_overflow(void)
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
-+ gr_handle_kernel_exploit();
- }
- #endif
- }
-@@ -547,9 +550,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
- host->ops = ops;
- host->of_node = of_node_get(of_node);
-
-- if (host->ops->match == NULL)
-- host->ops->match = default_irq_host_match;
--
- raw_spin_lock_irqsave(&irq_big_lock, flags);
-
- /* If it's a legacy controller, check for duplicates and
-@@ -622,7 +622,12 @@ struct irq_host *irq_find_host(struct device_node *node)
- */
- raw_spin_lock_irqsave(&irq_big_lock, flags);
- list_for_each_entry(h, &irq_hosts, link)
-- if (h->ops->match(h, node)) {
-+ if (h->ops->match) {
-+ if (h->ops->match(h, node)) {
-+ found = h;
-+ break;
-+ }
-+ } else if (default_irq_host_match(h, node)) {
- found = h;
- break;
- }
-diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
-index 2e3200c..7118986 100644
---- a/arch/powerpc/kernel/module_32.c
-+++ b/arch/powerpc/kernel/module_32.c
-@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
- me->arch.core_plt_section = i;
- }
- if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
-- printk("Module doesn't contain .plt or .init.plt sections.\n");
-+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
- return -ENOEXEC;
- }
-
-@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
-
- DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
- /* Init, or core PLT? */
-- if (location >= mod->module_core
-- && location < mod->module_core + mod->core_size)
-+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
-+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
- entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
-- else
-+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
-+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
- entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
-+ else {
-+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
-+ return ~0UL;
-+ }
-
- /* Find this entry, or if that fails, the next avail. entry */
- while (entry->jump[0]) {
-@@ -300,7 +305,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
- }
- #ifdef CONFIG_DYNAMIC_FTRACE
- module->arch.tramp =
-- do_plt_call(module->module_core,
-+ do_plt_call(module->module_core_rx,
- (unsigned long)ftrace_caller,
- sechdrs, module);
- #endif
-diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index d687e3f..d2a6750 100644
---- a/arch/powerpc/kernel/process.c
-+++ b/arch/powerpc/kernel/process.c
-@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
- * Lookup NIP late so we have the best change of getting the
- * above info out without failing
- */
-- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
-- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
-+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
-+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
- #endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
- if (!user_mode(regs))
-@@ -1157,10 +1157,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
- newsp = stack[0];
- ip = stack[STACK_FRAME_LR_SAVE];
- if (!firstframe || ip != lr) {
-- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
-+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if ((ip == rth || ip == mrth) && curr_frame >= 0) {
-- printk(" (%pS)",
-+ printk(" (%pA)",
- (void *)current->ret_stack[curr_frame].ret);
- curr_frame--;
- }
-@@ -1180,7 +1180,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
- struct pt_regs *regs = (struct pt_regs *)
- (sp + STACK_FRAME_OVERHEAD);
- lr = regs->link;
-- printk("--- Exception: %lx at %pS\n LR = %pS\n",
-+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
- regs->trap, (void *)regs->nip, (void *)lr);
- firstframe = 1;
- }
-@@ -1250,63 +1250,8 @@ void free_thread_info(struct thread_info *ti)
- void thread_info_cache_init(void)
- {
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
-- THREAD_SIZE, 0, NULL);
-+ THREAD_SIZE, SLAB_USERCOPY, NULL);
- BUG_ON(thread_info_cache == NULL);
- }
-
- #endif /* THREAD_SHIFT < PAGE_SHIFT */
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
-- return sp & ~0xf;
--}
--
--static inline unsigned long brk_rnd(void)
--{
-- unsigned long rnd = 0;
--
-- /* 8MB for 32bit, 1GB for 64bit */
-- if (is_32bit_task())
-- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
-- else
-- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
--
-- return rnd << PAGE_SHIFT;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long base = mm->brk;
-- unsigned long ret;
--
--#ifdef CONFIG_PPC_STD_MMU_64
-- /*
-- * If we are using 1TB segments and we are allowed to randomise
-- * the heap, we can put it above 1TB so it is backed by a 1TB
-- * segment. Otherwise the heap will be in the bottom 1TB
-- * which always uses 256MB segments and this may result in a
-- * performance penalty.
-- */
-- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
-- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
--#endif
--
-- ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (ret < mm->brk)
-- return mm->brk;
--
-- return ret;
--}
--
--unsigned long randomize_et_dyn(unsigned long base)
--{
-- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (ret < base)
-- return base;
--
-- return ret;
--}
-diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
-index 5de73db..a05f61c 100644
---- a/arch/powerpc/kernel/ptrace.c
-+++ b/arch/powerpc/kernel/ptrace.c
-@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
-@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
-
- secure_computing(regs->gpr[0]);
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
-@@ -1748,6 +1757,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
- {
- int step;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (unlikely(current->audit_context))
- audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
- regs->result);
-diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index fa1e56b..e8ef867 100644
---- a/arch/powerpc/kernel/signal_32.c
-+++ b/arch/powerpc/kernel/signal_32.c
-@@ -865,7 +865,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
- /* Save user registers on the stack */
- frame = &rt_sf->uc.uc_mcontext;
- addr = frame;
-- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- if (save_user_regs(regs, frame, 0, 1))
- goto badframe;
- regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
-diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index 60d1f75..2c29348 100644
---- a/arch/powerpc/kernel/signal_64.c
-+++ b/arch/powerpc/kernel/signal_64.c
-@@ -435,7 +435,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
- current->thread.fpscr.val = 0;
-
- /* Set up to return from userspace. */
-- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
- } else {
- err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
-diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
-index f2496f2..4e3cc47 100644
---- a/arch/powerpc/kernel/syscalls.c
-+++ b/arch/powerpc/kernel/syscalls.c
-@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
- long ret;
-
- if (personality(current->personality) == PER_LINUX32
-- && personality == PER_LINUX)
-- personality = PER_LINUX32;
-+ && personality(personality) == PER_LINUX)
-+ personality = (personality & ~PER_MASK) | PER_LINUX32;
- ret = sys_personality(personality);
-- if (ret == PER_LINUX32)
-- ret = PER_LINUX;
-+ if (personality(ret) == PER_LINUX32)
-+ ret = (ret & ~PER_MASK) | PER_LINUX;
- return ret;
- }
- #endif
-diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
-index ca683a1..ab912dd 100644
---- a/arch/powerpc/kernel/sysfs.c
-+++ b/arch/powerpc/kernel/sysfs.c
-@@ -531,7 +531,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
-+static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
- };
-
-diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 9844662..04a2a1e 100644
---- a/arch/powerpc/kernel/traps.c
-+++ b/arch/powerpc/kernel/traps.c
-@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
- static inline void pmac_backlight_unblank(void) { }
- #endif
-
-+extern void gr_handle_kernel_exploit(void);
-+
- int die(const char *str, struct pt_regs *regs, long err)
- {
- static struct {
-@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
- if (panic_on_oops)
- panic("Fatal exception");
-
-+ gr_handle_kernel_exploit();
-+
- oops_exit();
- do_exit(err);
-
-diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 7d14bb69..1305601 100644
---- a/arch/powerpc/kernel/vdso.c
-+++ b/arch/powerpc/kernel/vdso.c
-@@ -35,6 +35,7 @@
- #include <asm/firmware.h>
- #include <asm/vdso.h>
- #include <asm/vdso_datapage.h>
-+#include <asm/mman.h>
-
- #include "setup.h"
-
-@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- vdso_base = VDSO32_MBASE;
- #endif
-
-- current->mm->context.vdso_base = 0;
-+ current->mm->context.vdso_base = ~0UL;
-
- /* vDSO has a problem and was disabled, just don't "enable" it for the
- * process
-@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- vdso_base = get_unmapped_area(NULL, vdso_base,
- (vdso_pages << PAGE_SHIFT) +
- ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
-- 0, 0);
-+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
- if (IS_ERR_VALUE(vdso_base)) {
- rc = vdso_base;
- goto fail_mmapsem;
-diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
-index 607fbdf..ac940f3 100644
---- a/arch/powerpc/kvm/powerpc.c
-+++ b/arch/powerpc/kvm/powerpc.c
-@@ -730,7 +730,7 @@ out:
- return r;
- }
-
--int kvm_arch_init(void *opaque)
-+int kvm_arch_init(const void *opaque)
- {
- return 0;
- }
-diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
-index 5eea6f3..5d10396 100644
---- a/arch/powerpc/lib/usercopy_64.c
-+++ b/arch/powerpc/lib/usercopy_64.c
-@@ -9,22 +9,6 @@
- #include <linux/module.h>
- #include <asm/uaccess.h>
-
--unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
--{
-- if (likely(access_ok(VERIFY_READ, from, n)))
-- n = __copy_from_user(to, from, n);
-- else
-- memset(to, 0, n);
-- return n;
--}
--
--unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
--{
-- if (likely(access_ok(VERIFY_WRITE, to, n)))
-- n = __copy_to_user(to, from, n);
-- return n;
--}
--
- unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n)
- {
-@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
- return n;
- }
-
--EXPORT_SYMBOL(copy_from_user);
--EXPORT_SYMBOL(copy_to_user);
- EXPORT_SYMBOL(copy_in_user);
-
-diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index 7450843..9f8cfc7 100644
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -32,6 +32,10 @@
- #include <linux/perf_event.h>
- #include <linux/magic.h>
- #include <linux/ratelimit.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/unistd.h>
-
- #include <asm/firmware.h>
- #include <asm/page.h>
-@@ -43,6 +47,7 @@
- #include <asm/tlbflush.h>
- #include <asm/siginfo.h>
- #include <mm/mmu_decl.h>
-+#include <asm/ptrace.h>
-
- #ifdef CONFIG_KPROBES
- static inline int notify_page_fault(struct pt_regs *regs)
-@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
- }
- #endif
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->nip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int __user *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * Check whether the instruction at regs->nip is a store using
- * an update addressing form which will update r1.
-@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
- * indicate errors in DSISR but can validly be set in SRR1.
- */
- if (trap == 0x400)
-- error_code &= 0x48200000;
-+ error_code &= 0x58200000;
- else
- is_write = error_code & DSISR_ISSTORE;
- #else
-@@ -259,7 +291,7 @@ good_area:
- * "undefined". Of those that can be set, this is the only
- * one which seems bad.
- */
-- if (error_code & 0x10000000)
-+ if (error_code & DSISR_GUARDED)
- /* Guarded storage error. */
- goto bad_area;
- #endif /* CONFIG_8xx */
-@@ -274,7 +306,7 @@ good_area:
- * processors use the same I/D cache coherency mechanism
- * as embedded.
- */
-- if (error_code & DSISR_PROTFAULT)
-+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
- goto bad_area;
- #endif /* CONFIG_PPC_STD_MMU */
-
-@@ -345,6 +377,23 @@ bad_area:
- bad_area_nosemaphore:
- /* User mode accesses cause a SIGSEGV */
- if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+#ifdef CONFIG_PPC_STD_MMU
-+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
-+#else
-+ if (is_exec && regs->nip == address) {
-+#endif
-+ switch (pax_handle_fetch_fault(regs)) {
-+ }
-+
-+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
-+ do_group_exit(SIGKILL);
-+ }
-+ }
-+#endif
-+
- _exception(SIGSEGV, regs, code, address);
- return 0;
- }
-diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
-index 5a783d8..522eb00 100644
---- a/arch/powerpc/mm/mmap_64.c
-+++ b/arch/powerpc/mm/mmap_64.c
-@@ -61,10 +61,14 @@ static inline int mmap_is_legacy(void)
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
--static unsigned long mmap_rnd(void)
-+static unsigned long mmap_rnd(struct mm_struct *mm)
- {
- unsigned long rnd = 0;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
-@@ -75,7 +79,7 @@ static unsigned long mmap_rnd(void)
- return (rnd << PAGE_SHIFT) * 2;
- }
-
--static inline unsigned long mmap_base(void)
-+static inline unsigned long mmap_base(struct mm_struct *mm)
- {
- unsigned long gap = rlimit(RLIMIT_STACK);
-
-@@ -84,7 +88,7 @@ static inline unsigned long mmap_base(void)
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
-- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
- }
-
- /*
-@@ -99,10 +103,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
-- mm->mmap_base = mmap_base();
-+ mm->mmap_base = mmap_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
-index 5b63bd3..248942d 100644
---- a/arch/powerpc/mm/mmu_context_nohash.c
-+++ b/arch/powerpc/mm/mmu_context_nohash.c
-@@ -370,7 +370,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
-+static struct notifier_block mmu_context_cpu_nb = {
- .notifier_call = mmu_context_cpu_notify,
- };
-
-diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
-index f4b78a3..a11ee27 100644
---- a/arch/powerpc/mm/numa.c
-+++ b/arch/powerpc/mm/numa.c
-@@ -659,7 +659,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
- unsigned int n, rc, ranges, is_kexec_kdump = 0;
- unsigned long lmb_size, base, size, sz;
- int nid;
-- struct assoc_arrays aa;
-+ struct assoc_arrays aa = { .arrays = NULL };
-
- n = of_get_drconf_memory(memory, &dm);
- if (!n)
-@@ -964,7 +964,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
- return ret;
- }
-
--static struct notifier_block __cpuinitdata ppc64_numa_nb = {
-+static struct notifier_block ppc64_numa_nb = {
- .notifier_call = cpu_numa_callback,
- .priority = 1 /* Must run before sched domains notifier. */
- };
-diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index 73709f7..8e825a8 100644
---- a/arch/powerpc/mm/slice.c
-+++ b/arch/powerpc/mm/slice.c
-@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
- if ((mm->task_size - len) < addr)
- return 0;
- vma = find_vma(mm, addr);
-- return (!vma || (addr + len) <= vma->vm_start);
-+ return check_heap_stack_gap(vma, &addr, len, 0);
- }
-
- static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
-@@ -256,7 +256,7 @@ full_search:
- addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
- continue;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-+ if (check_heap_stack_gap(vma, &addr, len, 0)) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- }
- }
-
-- addr = mm->mmap_base;
-- while (addr > len) {
-+ if (mm->mmap_base < len)
-+ addr = -ENOMEM;
-+ else
-+ addr = mm->mmap_base - len;
-+
-+ while (!IS_ERR_VALUE(addr)) {
- /* Go down by chunk size */
-- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
-+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
-
- /* Check for hit with different page size */
- mask = slice_range_to_mask(addr, len);
-@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (!vma || (addr + len) <= vma->vm_start) {
-+ if (check_heap_stack_gap(vma, &addr, len, 0)) {
- /* remember the address as a hint for next time */
- if (use_cache)
- mm->free_area_cache = addr;
-@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start;
-+ addr = skip_heap_stack_gap(vma, len, 0);
- }
-
- /*
-@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
- if (fixed && addr > (mm->task_size - len))
- return -EINVAL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
-+ addr = 0;
-+#endif
-+
- /* If hint, make sure it matches our alignment restrictions */
- if (!fixed && addr) {
- addr = _ALIGN_UP(addr, 1ul << pshift);
-diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-index 14be2bd..56f51cb 100644
---- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
- }
-
- static struct pci_ops scc_pciex_pci_ops = {
-- scc_pciex_read_config,
-- scc_pciex_write_config,
-+ .read = scc_pciex_read_config,
-+ .write = scc_pciex_write_config,
- };
-
- static void pciex_clear_intr_all(unsigned int __iomem *base)
-diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
-index 0cfece4..2f1a0e5 100644
---- a/arch/powerpc/platforms/cell/spufs/file.c
-+++ b/arch/powerpc/platforms/cell/spufs/file.c
-@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- return VM_FAULT_NOPAGE;
- }
-
--static int spufs_mem_mmap_access(struct vm_area_struct *vma,
-+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
- unsigned long address,
-- void *buf, int len, int write)
-+ void *buf, size_t len, int write)
- {
- struct spu_context *ctx = vma->vm_file->private_data;
- unsigned long offset = address - vma->vm_start;
-diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
-index 941d5cb..1803d9e 100644
---- a/arch/powerpc/platforms/cell/spufs/inode.c
-+++ b/arch/powerpc/platforms/cell/spufs/inode.c
-@@ -811,6 +811,7 @@ static struct file_system_type spufs_type = {
- .mount = spufs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("spufs");
-
- static int __init spufs_init(void)
- {
-diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
-index 3394254..8c6825c 100644
---- a/arch/powerpc/platforms/powermac/smp.c
-+++ b/arch/powerpc/platforms/powermac/smp.c
-@@ -886,7 +886,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
-+static struct notifier_block smp_core99_cpu_nb = {
- .notifier_call = smp_core99_cpu_notify,
- };
- #endif /* CONFIG_HOTPLUG_CPU */
-diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
-index d2383cf..a6d33c8 100644
---- a/arch/powerpc/platforms/pseries/eeh_event.c
-+++ b/arch/powerpc/platforms/pseries/eeh_event.c
-@@ -61,7 +61,7 @@ static int eeh_event_handler(void * dummy)
- struct eeh_event *event;
- struct pci_dn *pdn;
-
-- daemonize ("eehd");
-+ set_task_comm(current, "eehd");
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
-diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
-index 24bff4f..0248123 100644
---- a/arch/s390/appldata/appldata_base.c
-+++ b/arch/s390/appldata/appldata_base.c
-@@ -610,7 +610,7 @@ static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata appldata_nb = {
-+static struct notifier_block appldata_nb = {
- .notifier_call = appldata_cpu_notify,
- };
-
-diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
-index 481f4f7..f16ec59 100644
---- a/arch/s390/hypfs/inode.c
-+++ b/arch/s390/hypfs/inode.c
-@@ -454,6 +454,7 @@ static struct file_system_type hypfs_type = {
- .mount = hypfs_mount,
- .kill_sb = hypfs_kill_super
- };
-+MODULE_ALIAS_FS("s390_hypfs");
-
- static const struct super_operations hypfs_s_ops = {
- .statfs = simple_statfs,
-diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
-index 8517d2a..d2738d4 100644
---- a/arch/s390/include/asm/atomic.h
-+++ b/arch/s390/include/asm/atomic.h
-@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
- #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #define smp_mb__before_atomic_dec() smp_mb()
- #define smp_mb__after_atomic_dec() smp_mb()
- #define smp_mb__before_atomic_inc() smp_mb()
-diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
-index 2a30d5a..5e5586f 100644
---- a/arch/s390/include/asm/cache.h
-+++ b/arch/s390/include/asm/cache.h
-@@ -11,8 +11,10 @@
- #ifndef __ARCH_S390_CACHE_H
- #define __ARCH_S390_CACHE_H
-
--#define L1_CACHE_BYTES 256
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 8
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define NET_SKB_PAD 32
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
-index 547f1a6..3e6d0a0 100644
---- a/arch/s390/include/asm/elf.h
-+++ b/arch/s390/include/asm/elf.h
-@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--extern unsigned long randomize_et_dyn(unsigned long base);
--#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
-+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
-+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. */
-@@ -183,7 +189,8 @@ extern char elf_platform[];
- #define ELF_PLATFORM (elf_platform)
-
- #ifndef __s390x__
--#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
-+#define SET_PERSONALITY(ex) \
-+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
- #else /* __s390x__ */
- #define SET_PERSONALITY(ex) \
- do { \
-@@ -211,7 +218,4 @@ struct linux_binprm;
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- int arch_setup_additional_pages(struct linux_binprm *, int);
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- #endif
-diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
-index ef573c1..75a1ce6 100644
---- a/arch/s390/include/asm/system.h
-+++ b/arch/s390/include/asm/system.h
-@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
- extern void (*_machine_halt)(void);
- extern void (*_machine_power_off)(void);
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- static inline int tprot(unsigned long addr)
- {
-diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
-index 2b23885..3db7651 100644
---- a/arch/s390/include/asm/uaccess.h
-+++ b/arch/s390/include/asm/uaccess.h
-@@ -55,6 +55,7 @@
- 1; \
- })
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) __access_ok(addr, size)
-
- /*
-@@ -235,6 +236,10 @@ static inline unsigned long __must_check
- copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- might_fault();
-+
-+ if ((long)n < 0)
-+ return n;
-+
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-@@ -260,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
- static inline unsigned long __must_check
- __copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- if (__builtin_constant_p(n) && (n <= 256))
- return uaccess.copy_from_user_small(n, from, to);
- else
-@@ -291,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
- static inline unsigned long __must_check
- copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-- unsigned int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
-
- might_fault();
-- if (unlikely(sz != -1 && sz < n)) {
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (unlikely(sz != (size_t)-1 && sz < n)) {
- copy_from_user_overflow();
- return n;
- }
-diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
-index dfcb343..eda788a 100644
---- a/arch/s390/kernel/module.c
-+++ b/arch/s390/kernel/module.c
-@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
-
- /* Increase core size by size of got & plt and set start
- offsets for got and plt. */
-- me->core_size = ALIGN(me->core_size, 4);
-- me->arch.got_offset = me->core_size;
-- me->core_size += me->arch.got_size;
-- me->arch.plt_offset = me->core_size;
-- me->core_size += me->arch.plt_size;
-+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
-+ me->arch.got_offset = me->core_size_rw;
-+ me->core_size_rw += me->arch.got_size;
-+ me->arch.plt_offset = me->core_size_rx;
-+ me->core_size_rx += me->arch.plt_size;
- return 0;
- }
-
-@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- if (info->got_initialized == 0) {
- Elf_Addr *gotent;
-
-- gotent = me->module_core + me->arch.got_offset +
-+ gotent = me->module_core_rw + me->arch.got_offset +
- info->got_offset;
- *gotent = val;
- info->got_initialized = 1;
-@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- else if (r_type == R_390_GOTENT ||
- r_type == R_390_GOTPLTENT)
- *(unsigned int *) loc =
-- (val + (Elf_Addr) me->module_core - loc) >> 1;
-+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
- else if (r_type == R_390_GOT64 ||
- r_type == R_390_GOTPLT64)
- *(unsigned long *) loc = val;
-@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
- if (info->plt_initialized == 0) {
- unsigned int *ip;
-- ip = me->module_core + me->arch.plt_offset +
-+ ip = me->module_core_rx + me->arch.plt_offset +
- info->plt_offset;
- #ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
-@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- val - loc + 0xffffUL < 0x1ffffeUL) ||
- (r_type == R_390_PLT32DBL &&
- val - loc + 0xffffffffULL < 0x1fffffffeULL)))
-- val = (Elf_Addr) me->module_core +
-+ val = (Elf_Addr) me->module_core_rx +
- me->arch.plt_offset +
- info->plt_offset;
- val += rela->r_addend - loc;
-@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- case R_390_GOTOFF32: /* 32 bit offset to GOT. */
- case R_390_GOTOFF64: /* 64 bit offset to GOT. */
- val = val + rela->r_addend -
-- ((Elf_Addr) me->module_core + me->arch.got_offset);
-+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
- if (r_type == R_390_GOTOFF16)
- *(unsigned short *) loc = val;
- else if (r_type == R_390_GOTOFF32)
-@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- break;
- case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
- case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
-- val = (Elf_Addr) me->module_core + me->arch.got_offset +
-+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
- rela->r_addend - loc;
- if (r_type == R_390_GOTPC)
- *(unsigned int *) loc = val;
-diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index 53088e2..9f44a36 100644
---- a/arch/s390/kernel/process.c
-+++ b/arch/s390/kernel/process.c
-@@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p)
- }
- return 0;
- }
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
-- return sp & ~0xf;
--}
--
--static inline unsigned long brk_rnd(void)
--{
-- /* 8MB for 32bit, 1GB for 64bit */
-- if (is_32bit_task())
-- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-- else
-- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
--
-- if (ret < mm->brk)
-- return mm->brk;
-- return ret;
--}
--
--unsigned long randomize_et_dyn(unsigned long base)
--{
-- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (!(current->flags & PF_RANDOMIZE))
-- return base;
-- if (ret < base)
-- return base;
-- return ret;
--}
-diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
-index 1df64a8..aea2a39 100644
---- a/arch/s390/kernel/smp.c
-+++ b/arch/s390/kernel/smp.c
-@@ -1035,7 +1035,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __cpuinitdata smp_cpu_nb = {
-+static struct notifier_block smp_cpu_nb = {
- .notifier_call = smp_cpu_notify,
- };
-
-diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
-index c70b3d8..d7d5b01 100644
---- a/arch/s390/mm/mmap.c
-+++ b/arch/s390/mm/mmap.c
-@@ -60,6 +60,12 @@ static inline int mmap_is_legacy(void)
-
- static unsigned long mmap_rnd(void)
- {
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
-+ return 0;
-+#endif
-+
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
- /* 8MB randomization for mmap_base */
-@@ -92,10 +98,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-@@ -175,10 +193,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = s390_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
- mm->mmap_base = mmap_base();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = s390_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
-index ae3d59f..f65f075 100644
---- a/arch/score/include/asm/cache.h
-+++ b/arch/score/include/asm/cache.h
-@@ -1,7 +1,9 @@
- #ifndef _ASM_SCORE_CACHE_H
- #define _ASM_SCORE_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_SCORE_CACHE_H */
-diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
-index 589d5c7..669e274 100644
---- a/arch/score/include/asm/system.h
-+++ b/arch/score/include/asm/system.h
-@@ -17,7 +17,7 @@ do { \
- #define finish_arch_switch(prev) do {} while (0)
-
- typedef void (*vi_handler_t)(void);
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) (x)
-
- #define mb() barrier()
- #define rmb() barrier()
-diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
-index 25d0803..d6c8e36 100644
---- a/arch/score/kernel/process.c
-+++ b/arch/score/kernel/process.c
-@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
-
- return task_pt_regs(task)->cp0_epc;
- }
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- return sp;
--}
-diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
-index ef9e555..331bd29 100644
---- a/arch/sh/include/asm/cache.h
-+++ b/arch/sh/include/asm/cache.h
-@@ -9,10 +9,11 @@
- #define __ASM_SH_CACHE_H
- #ifdef __KERNEL__
-
-+#include <linux/const.h>
- #include <linux/init.h>
- #include <cpu/cache.h>
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
-index 03f2b55..b0270327 100644
---- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
-+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
-@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
-+static struct notifier_block shx3_cpu_notifier = {
- .notifier_call = shx3_cpu_callback,
- };
-
-diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
-index 325f98b17..6fdc4f7 100644
---- a/arch/sh/kernel/process.c
-+++ b/arch/sh/kernel/process.c
-@@ -54,7 +54,7 @@ void free_thread_info(struct thread_info *ti)
- void thread_info_cache_init(void)
- {
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
-- THREAD_SIZE, SLAB_PANIC, NULL);
-+ THREAD_SIZE, SLAB_PANIC | SLAB_USERCOPY, NULL);
- }
- #else
- struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index afeb710..8da5c79 100644
---- a/arch/sh/mm/mmap.c
-+++ b/arch/sh/mm/mmap.c
-@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- struct vm_area_struct *vma;
- unsigned long start_addr;
- int do_colour_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
-@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
-@@ -106,7 +106,7 @@ full_search:
- }
- return -ENOMEM;
- }
-- if (likely(!vma || addr + len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int do_colour_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
-@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
-@@ -178,28 +178,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- /* make sure it can fit in the remaining address space */
- if (likely(addr > len)) {
-- vma = find_vma(mm, addr-len);
-- if (!vma || addr <= vma->vm_start) {
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* remember the address as a hint for next time */
-- return (mm->free_area_cache = addr-len);
-+ return (mm->free_area_cache = addr);
- }
- }
-
- if (unlikely(mm->mmap_base < len))
- goto bottomup;
-
-- addr = mm->mmap_base-len;
-- if (do_colour_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-+ addr = mm->mmap_base - len;
-
- do {
-+ if (do_colour_align)
-+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (likely(!vma || addr+len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- }
-@@ -209,10 +210,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start-len;
-- if (do_colour_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-- } while (likely(len < vma->vm_start));
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- bottomup:
- /*
-diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
-index eddcfb3..b117d90 100644
---- a/arch/sparc/Makefile
-+++ b/arch/sparc/Makefile
-@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
- # Export what is needed by arch/sparc/boot/Makefile
- export VMLINUX_INIT VMLINUX_MAIN
- VMLINUX_INIT := $(head-y) $(init-y)
--VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
- VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
- VMLINUX_MAIN += $(drivers-y) $(net-y)
-
-diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
-index 07dd35e..2c6f765 100644
---- a/arch/sparc/include/asm/atomic_32.h
-+++ b/arch/sparc/include/asm/atomic_32.h
-@@ -13,6 +13,8 @@
-
- #include <linux/types.h>
-
-+#include <asm-generic/atomic64.h>
-+
- #ifdef __KERNEL__
-
- #include <asm-generic/atomic64.h>
-diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
-index 9f421df..71e4800 100644
---- a/arch/sparc/include/asm/atomic_64.h
-+++ b/arch/sparc/include/asm/atomic_64.h
-@@ -14,18 +14,40 @@
- #define ATOMIC64_INIT(i) { (i) }
-
- #define atomic_read(v) (*(volatile int *)&(v)->counter)
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return *(const volatile int *)&v->counter;
-+}
- #define atomic64_read(v) (*(volatile long *)&(v)->counter)
-+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ return *(const volatile long *)&v->counter;
-+}
-
- #define atomic_set(v, i) (((v)->counter) = i)
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
- #define atomic64_set(v, i) (((v)->counter) = i)
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ v->counter = i;
-+}
-
- extern void atomic_add(int, atomic_t *);
-+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
- extern void atomic64_add(long, atomic64_t *);
-+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
- extern void atomic_sub(int, atomic_t *);
-+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
- extern void atomic64_sub(long, atomic64_t *);
-+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
-
- extern int atomic_add_ret(int, atomic_t *);
-+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
- extern long atomic64_add_ret(long, atomic64_t *);
-+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
- extern int atomic_sub_ret(int, atomic_t *);
- extern long atomic64_sub_ret(long, atomic64_t *);
-
-@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
- #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
-
- #define atomic_inc_return(v) atomic_add_ret(1, v)
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_ret_unchecked(1, v);
-+}
- #define atomic64_inc_return(v) atomic64_add_ret(1, v)
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ return atomic64_add_ret_unchecked(1, v);
-+}
-
- #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
- #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
-
- #define atomic_add_return(i, v) atomic_add_ret(i, v)
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ return atomic_add_ret_unchecked(i, v);
-+}
- #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
-+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
-+{
-+ return atomic64_add_ret_unchecked(i, v);
-+}
-
- /*
- * atomic_inc_and_test - increment and test
-@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
- * other cases.
- */
- #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_inc_return_unchecked(v) == 0;
-+}
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
- #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
-@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
- #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
-
- #define atomic_inc(v) atomic_add(1, v)
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
-+}
- #define atomic64_inc(v) atomic64_add(1, v)
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
-+{
-+ atomic64_add_unchecked(1, v);
-+}
-
- #define atomic_dec(v) atomic_sub(1, v)
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_sub_unchecked(1, v);
-+}
- #define atomic64_dec(v) atomic64_sub(1, v)
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
-+{
-+ atomic64_sub_unchecked(1, v);
-+}
-
- #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
- #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
-
- #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
-- int c, old;
-+ int c, old, new;
- c = atomic_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addcc %2, %0, %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "tvs %%icc, 6\n"
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a)
-+ : "cc");
-+
-+ old = atomic_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
-@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- #define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
- #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
- {
-- long c, old;
-+ long c, old, new;
- c = atomic64_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic64_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addcc %2, %0, %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "tvs %%xcc, 6\n"
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a)
-+ : "cc");
-+
-+ old = atomic64_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
- }
-- return c != (u);
-+ return c != u;
- }
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
-index 69358b5..9d0d492 100644
---- a/arch/sparc/include/asm/cache.h
-+++ b/arch/sparc/include/asm/cache.h
-@@ -7,10 +7,12 @@
- #ifndef _SPARC_CACHE_H
- #define _SPARC_CACHE_H
-
-+#include <linux/const.h>
-+
- #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES 32
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #ifdef CONFIG_SPARC32
- #define SMP_CACHE_BYTES_SHIFT 5
-diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
-index 4269ca6..e3da77f 100644
---- a/arch/sparc/include/asm/elf_32.h
-+++ b/arch/sparc/include/asm/elf_32.h
-@@ -114,6 +114,13 @@ typedef struct {
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN 16
-+#define PAX_DELTA_STACK_LEN 16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. This can NOT be done in userspace
- on Sparc. */
-diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
-index 7df8b7f..4946269 100644
---- a/arch/sparc/include/asm/elf_64.h
-+++ b/arch/sparc/include/asm/elf_64.h
-@@ -180,6 +180,13 @@ typedef struct {
- #define ELF_ET_DYN_BASE 0x0000010000000000UL
- #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
-+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
-+#endif
-+
- extern unsigned long sparc64_elf_hwcap;
- #define ELF_HWCAP sparc64_elf_hwcap
-
-diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
-index 97a9047..290b0cd 100644
---- a/arch/sparc/include/asm/oplib_64.h
-+++ b/arch/sparc/include/asm/oplib_64.h
-@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
- /* You must call prom_init() before using any of the library services,
- * preferably as early as possible. Pass it the romvec pointer.
- */
--extern void prom_init(void *cif_handler, void *cif_stack);
-+void prom_init(void *cif_handler);
-+void prom_init_report(void);
-
- /* Boot argument acquisition, returns the boot command line string. */
- extern char *prom_getbootargs(void);
-diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
-index 156707b..aefa786 100644
---- a/arch/sparc/include/asm/page_32.h
-+++ b/arch/sparc/include/asm/page_32.h
-@@ -8,6 +8,8 @@
- #ifndef _SPARC_PAGE_H
- #define _SPARC_PAGE_H
-
-+#include <linux/const.h>
-+
- #define PAGE_SHIFT 12
-
- #ifndef __ASSEMBLY__
-diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
-index ca2b344..c6084f89 100644
---- a/arch/sparc/include/asm/pgalloc_32.h
-+++ b/arch/sparc/include/asm/pgalloc_32.h
-@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
- BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
- #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
- #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
-+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
-
- BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
- #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
-diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
-index 40b2d7a..22a665b 100644
---- a/arch/sparc/include/asm/pgalloc_64.h
-+++ b/arch/sparc/include/asm/pgalloc_64.h
-@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
- }
-
- #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
-+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
-
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
-diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
-index 59ba6f6..4518128 100644
---- a/arch/sparc/include/asm/pgtable.h
-+++ b/arch/sparc/include/asm/pgtable.h
-@@ -5,4 +5,8 @@
- #else
- #include <asm/pgtable_32.h>
- #endif
-+
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #endif
-diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
-index a790cc6..091ed94 100644
---- a/arch/sparc/include/asm/pgtable_32.h
-+++ b/arch/sparc/include/asm/pgtable_32.h
-@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
- BTFIXUPDEF_INT(page_none)
- BTFIXUPDEF_INT(page_copy)
- BTFIXUPDEF_INT(page_readonly)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+BTFIXUPDEF_INT(page_shared_noexec)
-+BTFIXUPDEF_INT(page_copy_noexec)
-+BTFIXUPDEF_INT(page_readonly_noexec)
-+#endif
-+
- BTFIXUPDEF_INT(page_kernel)
-
- #define PMD_SHIFT SUN4C_PMD_SHIFT
-@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
- #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
- #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+extern pgprot_t PAGE_SHARED_NOEXEC;
-+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
-+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#endif
-+
- extern unsigned long page_kernel;
-
- #ifdef MODULE
-diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
-index f6ae2b2..b03ffc7 100644
---- a/arch/sparc/include/asm/pgtsrmmu.h
-+++ b/arch/sparc/include/asm/pgtsrmmu.h
-@@ -115,6 +115,13 @@
- SRMMU_EXEC | SRMMU_REF)
- #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
- SRMMU_EXEC | SRMMU_REF)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
-+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
-+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
-+#endif
-+
- #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
- SRMMU_DIRTY | SRMMU_REF)
-
-diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
-index 64718ba..a7e4178 100644
---- a/arch/sparc/include/asm/setup.h
-+++ b/arch/sparc/include/asm/setup.h
-@@ -21,6 +21,10 @@ extern unsigned char boot_cpu_id;
- extern unsigned char boot_cpu_id4;
- #endif
-
-+#ifdef CONFIG_SPARC64
-+void __init start_early_boot(void);
-+#endif
-+
- #endif /* __KERNEL__ */
-
- #endif /* _SPARC_SETUP_H */
-diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
-index 9689176..63c18ea 100644
---- a/arch/sparc/include/asm/spinlock_64.h
-+++ b/arch/sparc/include/asm/spinlock_64.h
-@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
-
- /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-
--static void inline arch_read_lock(arch_rwlock_t *lock)
-+static inline void arch_read_lock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__ (
- "1: ldsw [%2], %0\n"
- " brlz,pn %0, 2f\n"
--"4: add %0, 1, %1\n"
-+"4: addcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%icc, 1b\n"
-@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
- " .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
-- : "memory");
-+ : "memory", "cc");
- }
-
--static int inline arch_read_trylock(arch_rwlock_t *lock)
-+static inline int arch_read_trylock(arch_rwlock_t *lock)
- {
- int tmp1, tmp2;
-
-@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
- "1: ldsw [%2], %0\n"
- " brlz,a,pn %0, 2f\n"
- " mov 0, %0\n"
--" add %0, 1, %1\n"
-+" addcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%icc, 1b\n"
-@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
- return tmp1;
- }
-
--static void inline arch_read_unlock(arch_rwlock_t *lock)
-+static inline void arch_read_unlock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
- "1: lduw [%2], %0\n"
--" sub %0, 1, %1\n"
-+" subcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%xcc, 1b\n"
-@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_lock(arch_rwlock_t *lock)
-+static inline void arch_write_lock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2;
-
-@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_unlock(arch_rwlock_t *lock)
-+static inline void arch_write_unlock(arch_rwlock_t *lock)
- {
- __asm__ __volatile__(
- " stw %%g0, [%0]"
-@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static int inline arch_write_trylock(arch_rwlock_t *lock)
-+static inline int arch_write_trylock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2, result;
-
-diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
-index fa57532..e1a4c53 100644
---- a/arch/sparc/include/asm/thread_info_32.h
-+++ b/arch/sparc/include/asm/thread_info_32.h
-@@ -50,6 +50,8 @@ struct thread_info {
- unsigned long w_saved;
-
- struct restart_block restart_block;
-+
-+ unsigned long lowest_stack;
- };
-
- /*
-diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
-index 60d86be..5e005d8 100644
---- a/arch/sparc/include/asm/thread_info_64.h
-+++ b/arch/sparc/include/asm/thread_info_64.h
-@@ -63,7 +63,10 @@ struct thread_info {
- struct pt_regs *kern_una_regs;
- unsigned int kern_una_insn;
-
-- unsigned long fpregs[0] __attribute__ ((aligned(64)));
-+ unsigned long lowest_stack;
-+
-+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
-+ __attribute__ ((aligned(64)));
- };
-
- #endif /* !(__ASSEMBLY__) */
-@@ -104,13 +107,15 @@ struct thread_info {
- #define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
-
- #if PAGE_SHIFT == 13
--#define THREAD_SIZE (2*PAGE_SIZE)
-+#define THREAD_ORDER 1
- #define THREAD_SHIFT (PAGE_SHIFT + 1)
- #else /* PAGE_SHIFT == 13 */
--#define THREAD_SIZE PAGE_SIZE
-+#define THREAD_ORDER 0
- #define THREAD_SHIFT PAGE_SHIFT
- #endif /* PAGE_SHIFT == 13 */
-
-+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-+
- #define PREEMPT_ACTIVE 0x10000000
-
- /*
-@@ -214,10 +219,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
- #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
- /* flag bit 6 is available */
- #define TIF_32BIT 7 /* 32-bit binary */
--/* flag bit 8 is available */
-+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
- #define TIF_SECCOMP 9 /* secure computing */
- #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
-+
- /* NOTE: Thread flags >= 12 should be ones we have no interest
- * in using in assembly, else we can't use the mask as
- * an immediate value in instructions such as andcc.
-@@ -238,12 +244,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
- #define _TIF_FREEZE (1<<TIF_FREEZE)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
-
- #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
- _TIF_DO_NOTIFY_RESUME_MASK | \
- _TIF_NEED_RESCHED)
- #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
-
-+#define _TIF_WORK_SYSCALL \
-+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
-+
-+
- /*
- * Thread-synchronous status.
- *
-diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
-index e88fbe5..bd0eda7 100644
---- a/arch/sparc/include/asm/uaccess.h
-+++ b/arch/sparc/include/asm/uaccess.h
-@@ -1,5 +1,6 @@
- #ifndef ___ASM_SPARC_UACCESS_H
- #define ___ASM_SPARC_UACCESS_H
-+
- #if defined(__sparc__) && defined(__arch64__)
- #include <asm/uaccess_64.h>
- #else
-diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
-index 8303ac4..d2eec81 100644
---- a/arch/sparc/include/asm/uaccess_32.h
-+++ b/arch/sparc/include/asm/uaccess_32.h
-@@ -46,6 +46,7 @@
- #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
- #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
- #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- ({ (void)(type); __access_ok((unsigned long)(addr), size); })
-
-@@ -249,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
-
- static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-- if (n && __access_ok((unsigned long) to, n))
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (n && __access_ok((unsigned long) to, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
- return __copy_user(to, (__force void __user *) from, n);
-- else
-+ } else
- return n;
- }
-
- static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+
- return __copy_user(to, (__force void __user *) from, n);
- }
-
- static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-- if (n && __access_ok((unsigned long) from, n))
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (n && __access_ok((unsigned long) from, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
- return __copy_user((__force void __user *) to, from, n);
-- else
-+ } else
- return n;
- }
-
- static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- return __copy_user((__force void __user *) to, from, n);
- }
-
-diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
-index 6d6c731..c55ac5e 100644
---- a/arch/sparc/include/asm/uaccess_64.h
-+++ b/arch/sparc/include/asm/uaccess_64.h
-@@ -10,6 +10,7 @@
- #include <linux/compiler.h>
- #include <linux/string.h>
- #include <linux/thread_info.h>
-+#include <linux/kernel.h>
- #include <asm/asi.h>
- #include <asm/system.h>
- #include <asm/spitfire.h>
-@@ -53,6 +54,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
- return 1;
- }
-
-+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
-+{
-+ return 1;
-+}
-+
- static inline int access_ok(int type, const void __user * addr, unsigned long size)
- {
- return 1;
-@@ -213,8 +219,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
- static inline unsigned long __must_check
- copy_from_user(void *to, const void __user *from, unsigned long size)
- {
-- unsigned long ret = ___copy_from_user(to, from, size);
-+ unsigned long ret;
-
-+ if ((long)size < 0 || size > INT_MAX)
-+ return size;
-+
-+ if (!__builtin_constant_p(size))
-+ check_object_size(to, size, false);
-+
-+ ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
-@@ -230,8 +243,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
- static inline unsigned long __must_check
- copy_to_user(void __user *to, const void *from, unsigned long size)
- {
-- unsigned long ret = ___copy_to_user(to, from, size);
-+ unsigned long ret;
-
-+ if ((long)size < 0 || size > INT_MAX)
-+ return size;
-+
-+ if (!__builtin_constant_p(size))
-+ check_object_size(from, size, true);
-+
-+ ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
-diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
-index cb85458..e063f17 100644
---- a/arch/sparc/kernel/Makefile
-+++ b/arch/sparc/kernel/Makefile
-@@ -3,7 +3,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- extra-y := head_$(BITS).o
- extra-y += init_task.o
-diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
-index 27728e1..0010e923 100644
---- a/arch/sparc/kernel/ds.c
-+++ b/arch/sparc/kernel/ds.c
-@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
- char *base, *p;
- int msg_len, loops;
-
-+ if (strlen(var) + strlen(value) + 2 >
-+ sizeof(pkt) - sizeof(pkt.header)) {
-+ printk(KERN_ERR PFX
-+ "contents length: %zu, which more than max: %lu,"
-+ "so could not set (%s) variable to (%s).\n",
-+ strlen(var) + strlen(value) + 2,
-+ sizeof(pkt) - sizeof(pkt.header), var, value);
-+ return;
-+ }
-+
- memset(&pkt, 0, sizeof(pkt));
- pkt.header.data.tag.type = DS_DATA;
- pkt.header.data.handle = cp->handle;
-diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
-index 0c218e4..f8125bc 100644
---- a/arch/sparc/kernel/entry.h
-+++ b/arch/sparc/kernel/entry.h
-@@ -59,13 +59,10 @@ struct popc_6insn_patch_entry {
- extern struct popc_6insn_patch_entry __popc_6insn_patch,
- __popc_6insn_patch_end;
-
--extern void __init per_cpu_patch(void);
--extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-- struct sun4v_1insn_patch_entry *);
--extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-- struct sun4v_2insn_patch_entry *);
--extern void __init sun4v_patch(void);
--extern void __init boot_cpu_id_too_large(int cpu);
-+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-+ struct sun4v_1insn_patch_entry *);
-+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-+ struct sun4v_2insn_patch_entry *);
- extern unsigned int dcache_parity_tl1_occurred;
- extern unsigned int icache_parity_tl1_occurred;
-
-diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
-index 0d810c2..fec9fd6 100644
---- a/arch/sparc/kernel/head_64.S
-+++ b/arch/sparc/kernel/head_64.S
-@@ -629,14 +629,12 @@ tlb_fixup_done:
- sethi %hi(init_thread_union), %g6
- or %g6, %lo(init_thread_union), %g6
- ldx [%g6 + TI_TASK], %g4
-- mov %sp, %l6
-
- wr %g0, ASI_P, %asi
- mov 1, %g1
- sllx %g1, THREAD_SHIFT, %g1
- sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
- add %g6, %g1, %sp
-- mov 0, %fp
-
- /* Set per-cpu pointer initially to zero, this makes
- * the boot-cpu use the in-kernel-image per-cpu areas
-@@ -663,44 +661,14 @@ tlb_fixup_done:
- nop
- #endif
-
-- mov %l6, %o1 ! OpenPROM stack
- call prom_init
- mov %l7, %o0 ! OpenPROM cif handler
-
-- /* Initialize current_thread_info()->cpu as early as possible.
-- * In order to do that accurately we have to patch up the get_cpuid()
-- * assembler sequences. And that, in turn, requires that we know
-- * if we are on a Starfire box or not. While we're here, patch up
-- * the sun4v sequences as well.
-+ /* To create a one-register-window buffer between the kernel's
-+ * initial stack and the last stack frame we use from the firmware,
-+ * do the rest of the boot from a C helper function.
- */
-- call check_if_starfire
-- nop
-- call per_cpu_patch
-- nop
-- call sun4v_patch
-- nop
--
--#ifdef CONFIG_SMP
-- call hard_smp_processor_id
-- nop
-- cmp %o0, NR_CPUS
-- blu,pt %xcc, 1f
-- nop
-- call boot_cpu_id_too_large
-- nop
-- /* Not reached... */
--
--1:
--#else
-- mov 0, %o0
--#endif
-- sth %o0, [%g6 + TI_CPU]
--
-- call prom_init_report
-- nop
--
-- /* Off we go.... */
-- call start_kernel
-+ call start_early_boot
- nop
- /* Not reached... */
-
-diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
-index 9365432..b69d224 100644
---- a/arch/sparc/kernel/hvtramp.S
-+++ b/arch/sparc/kernel/hvtramp.S
-@@ -111,7 +111,6 @@ hv_cpu_startup:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- call init_irqwork_curcpu
- nop
-diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
-index a19c8a0..d04a60b 100644
---- a/arch/sparc/kernel/leon_kernel.c
-+++ b/arch/sparc/kernel/leon_kernel.c
-@@ -53,11 +53,13 @@ static inline unsigned int leon_eirq_get(int cpu)
- static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
- {
- unsigned int eirq;
-+ struct irq_bucket *p;
- int cpu = sparc_leon3_cpuid();
-
- eirq = leon_eirq_get(cpu);
-- if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
-- generic_handle_irq(irq_map[eirq]->irq);
-+ p = irq_map[eirq];
-+ if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
-+ generic_handle_irq(p->irq);
- }
-
- /* The extended IRQ controller has been found, this function registers it */
-diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
-index f793742..4d880af 100644
---- a/arch/sparc/kernel/process_32.c
-+++ b/arch/sparc/kernel/process_32.c
-@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
- rw->ins[4], rw->ins[5],
- rw->ins[6],
- rw->ins[7]);
-- printk("%pS\n", (void *) rw->ins[7]);
-+ printk("%pA\n", (void *) rw->ins[7]);
- rw = (struct reg_window32 *) rw->ins[6];
- }
- spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
-@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
-
- printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
- r->psr, r->pc, r->npc, r->y, print_tainted());
-- printk("PC: <%pS>\n", (void *) r->pc);
-+ printk("PC: <%pA>\n", (void *) r->pc);
- printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
- r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
- printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
- r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
-- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
-+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
-
- printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
-@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
- rw = (struct reg_window32 *) fp;
- pc = rw->ins[7];
- printk("[%08lx : ", pc);
-- printk("%pS ] ", (void *) pc);
-+ printk("%pA ] ", (void *) pc);
- fp = rw->ins[6];
- } while (++count < 16);
- printk("\n");
-diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
-index 3739a06..48b2ff0 100644
---- a/arch/sparc/kernel/process_64.c
-+++ b/arch/sparc/kernel/process_64.c
-@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
- printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
- rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
- if (regs->tstate & TSTATE_PRIV)
-- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
-+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
- }
-
- void show_regs(struct pt_regs *regs)
- {
- printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
- regs->tpc, regs->tnpc, regs->y, print_tainted());
-- printk("TPC: <%pS>\n", (void *) regs->tpc);
-+ printk("TPC: <%pA>\n", (void *) regs->tpc);
- printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
- regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
- regs->u_regs[3]);
-@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
- printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
- regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
- regs->u_regs[15]);
-- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
-+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
- show_regwindow(regs);
- show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
- }
-@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
- ((tp && tp->task) ? tp->task->pid : -1));
-
- if (gp->tstate & TSTATE_PRIV) {
-- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
-+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
- (void *) gp->tpc,
- (void *) gp->o7,
- (void *) gp->i7,
-diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
-index 741df91..97cdf05 100644
---- a/arch/sparc/kernel/prom_common.c
-+++ b/arch/sparc/kernel/prom_common.c
-@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
-
- unsigned int prom_early_allocated __initdata;
-
--static struct of_pdt_ops prom_sparc_ops __initdata = {
-+static struct of_pdt_ops prom_sparc_ops __initconst = {
- .nextprop = prom_common_nextprop,
- .getproplen = prom_getproplen,
- .getproperty = prom_getproperty,
-diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
-index 96ee50a..68ce124 100644
---- a/arch/sparc/kernel/ptrace_64.c
-+++ b/arch/sparc/kernel/ptrace_64.c
-@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- asmlinkage int syscall_trace_enter(struct pt_regs *regs)
- {
- int ret = 0;
-@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
- /* do the secure computing check first */
- secure_computing(regs->u_regs[UREG_G1]);
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- ret = tracehook_report_syscall_entry(regs);
-
-@@ -1086,6 +1095,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
-
- asmlinkage void syscall_trace_leave(struct pt_regs *regs)
- {
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- #ifdef CONFIG_AUDITSYSCALL
- if (unlikely(current->audit_context)) {
- unsigned long tstate = regs->tstate;
-diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
-index a854a1c..52488a5 100644
---- a/arch/sparc/kernel/setup_64.c
-+++ b/arch/sparc/kernel/setup_64.c
-@@ -30,6 +30,7 @@
- #include <linux/cpu.h>
- #include <linux/initrd.h>
- #include <linux/module.h>
-+#include <linux/start_kernel.h>
-
- #include <asm/system.h>
- #include <asm/io.h>
-@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
-
- static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
--void __init per_cpu_patch(void)
-+static void __init per_cpu_patch(void)
- {
- struct cpuid_patch_entry *p;
- unsigned long ver;
-@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
- }
- }
-
--void __init sun4v_patch(void)
-+static void __init sun4v_patch(void)
- {
- extern void sun4v_hvapi_init(void);
-
-@@ -316,14 +317,25 @@ static void __init popc_patch(void)
- }
- }
-
--#ifdef CONFIG_SMP
--void __init boot_cpu_id_too_large(int cpu)
-+void __init start_early_boot(void)
- {
-- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-- cpu, NR_CPUS);
-- prom_halt();
-+ int cpu;
-+
-+ check_if_starfire();
-+ per_cpu_patch();
-+ sun4v_patch();
-+
-+ cpu = hard_smp_processor_id();
-+ if (cpu >= NR_CPUS) {
-+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-+ cpu, NR_CPUS);
-+ prom_halt();
-+ }
-+ current_thread_info()->cpu = cpu;
-+
-+ prom_init_report();
-+ start_kernel();
- }
--#endif
-
- /* On Ultra, we support all of the v8 capabilities. */
- unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
-diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index ffd1245..948b0b7 100644
---- a/arch/sparc/kernel/smp_64.c
-+++ b/arch/sparc/kernel/smp_64.c
-@@ -871,8 +871,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
- extern unsigned long xcall_flush_dcache_page_spitfire;
-
- #ifdef CONFIG_DEBUG_DCFLUSH
--extern atomic_t dcpage_flushes;
--extern atomic_t dcpage_flushes_xcall;
-+extern atomic_unchecked_t dcpage_flushes;
-+extern atomic_unchecked_t dcpage_flushes_xcall;
- #endif
-
- static inline void __local_flush_dcache_page(struct page *page)
-@@ -896,7 +896,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
- return;
-
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
-
- this_cpu = get_cpu();
-@@ -920,7 +920,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpumask_of(cpu));
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes_xcall);
-+ atomic_inc_unchecked(&dcpage_flushes_xcall);
- #endif
- }
- }
-@@ -939,7 +939,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
- preempt_disable();
-
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
- data0 = 0;
- pg_addr = page_address(page);
-@@ -956,7 +956,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpu_online_mask);
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes_xcall);
-+ atomic_inc_unchecked(&dcpage_flushes_xcall);
- #endif
- }
- __local_flush_dcache_page(page);
-diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index 42b282f..408977c 100644
---- a/arch/sparc/kernel/sys_sparc_32.c
-+++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
- unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
- {
- struct vm_area_struct * vmm;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
-@@ -56,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (ARCH_SUN4C && len > 0x20000000)
- return -ENOMEM;
- if (!addr)
-- addr = TASK_UNMAPPED_BASE;
-+ addr = current->mm->mmap_base;
-
- if (flags & MAP_SHARED)
- addr = COLOUR_ALIGN(addr);
-@@ -71,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- }
- if (TASK_SIZE - PAGE_SIZE - len < addr)
- return -ENOMEM;
-- if (!vmm || addr + len <= vmm->vm_start)
-+ if (check_heap_stack_gap(vmm, &addr, len, offset))
- return addr;
- addr = vmm->vm_end;
- if (flags & MAP_SHARED)
-diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 5e4252b..379f84f 100644
---- a/arch/sparc/kernel/sys_sparc_64.c
-+++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -119,12 +119,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- unsigned long task_size = TASK_SIZE;
- unsigned long start_addr;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
-- if ((flags & MAP_SHARED) &&
-+ if ((filp || (flags & MAP_SHARED)) &&
- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- return -EINVAL;
- return addr;
-@@ -139,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -146,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
- if (len > mm->cached_hole_size) {
-- start_addr = addr = mm->free_area_cache;
-+ start_addr = addr = mm->free_area_cache;
- } else {
-- start_addr = addr = TASK_UNMAPPED_BASE;
-+ start_addr = addr = mm->mmap_base;
- mm->cached_hole_size = 0;
- }
-
-@@ -174,14 +178,14 @@ full_search:
- vma = find_vma(mm, VA_EXCLUDE_END);
- }
- if (unlikely(task_size < addr)) {
-- if (start_addr != TASK_UNMAPPED_BASE) {
-- start_addr = addr = TASK_UNMAPPED_BASE;
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = addr = mm->mmap_base;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
-- if (likely(!vma || addr + len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -207,6 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- unsigned long task_size = STACK_TOP32;
- unsigned long addr = addr0;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- /* This should only ever run for 32-bit processes. */
- BUG_ON(!test_thread_flag(TIF_32BIT));
-@@ -215,7 +220,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
-- if ((flags & MAP_SHARED) &&
-+ if ((filp || (flags & MAP_SHARED)) &&
- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- return -EINVAL;
- return addr;
-@@ -236,8 +241,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
-@@ -257,28 +261,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- /* make sure it can fit in the remaining address space */
- if (likely(addr > len)) {
-- vma = find_vma(mm, addr-len);
-- if (!vma || addr <= vma->vm_start) {
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* remember the address as a hint for next time */
-- return (mm->free_area_cache = addr-len);
-+ return (mm->free_area_cache = addr);
- }
- }
-
- if (unlikely(mm->mmap_base < len))
- goto bottomup;
-
-- addr = mm->mmap_base-len;
-- if (do_color_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-+ addr = mm->mmap_base - len;
-
- do {
-+ if (do_color_align)
-+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (likely(!vma || addr+len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- }
-@@ -288,10 +293,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start-len;
-- if (do_color_align)
-- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
-- } while (likely(len < vma->vm_start));
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- bottomup:
- /*
-@@ -361,10 +364,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
- EXPORT_SYMBOL(get_fb_unmapped_area);
-
- /* Essentially the same as PowerPC. */
--static unsigned long mmap_rnd(void)
-+static unsigned long mmap_rnd(struct mm_struct *mm)
- {
- unsigned long rnd = 0UL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- unsigned long val = get_random_int();
- if (test_thread_flag(TIF_32BIT))
-@@ -377,7 +384,7 @@ static unsigned long mmap_rnd(void)
-
- void arch_pick_mmap_layout(struct mm_struct *mm)
- {
-- unsigned long random_factor = mmap_rnd();
-+ unsigned long random_factor = mmap_rnd(mm);
- unsigned long gap;
-
- /*
-@@ -390,6 +397,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- gap == RLIM_INFINITY ||
- sysctl_legacy_va_layout) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- } else {
-@@ -402,6 +415,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- gap = (task_size / 6 * 5);
-
- mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- mm->unmap_area = arch_unmap_area_topdown;
- }
-diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
-index 557212c..2cc50b0 100644
---- a/arch/sparc/kernel/syscalls.S
-+++ b/arch/sparc/kernel/syscalls.S
-@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
- #endif
- .align 32
- 1: ldx [%g6 + TI_FLAGS], %l5
-- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
-+ andcc %l5, _TIF_WORK_SYSCALL, %g0
- be,pt %icc, rtrap
- nop
- call syscall_trace_leave
-@@ -179,7 +179,7 @@ linux_sparc_syscall32:
-
- srl %i3, 0, %o3 ! IEU0
- srl %i2, 0, %o2 ! IEU0 Group
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- bne,pn %icc, linux_syscall_trace32 ! CTI
- mov %i0, %l5 ! IEU1
- 5: call %l7 ! CTI Group brk forced
-@@ -203,7 +203,7 @@ linux_sparc_syscall:
-
- mov %i3, %o3 ! IEU1
- mov %i4, %o4 ! IEU0 Group
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- bne,pn %icc, linux_syscall_trace ! CTI Group
- mov %i0, %l5 ! IEU0
- 2: call %l7 ! CTI Group brk forced
-@@ -218,7 +218,7 @@ ret_sys_call:
-
- cmp %o0, -ERESTART_RESTARTBLOCK
- bgeu,pn %xcc, 1f
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
-
- 2:
-diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
-index 7408201..b349841 100644
---- a/arch/sparc/kernel/sysfs.c
-+++ b/arch/sparc/kernel/sysfs.c
-@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
-+static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
- };
-
-diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
-index 8fa84a3..3fc8ad5 100644
---- a/arch/sparc/kernel/trampoline_64.S
-+++ b/arch/sparc/kernel/trampoline_64.S
-@@ -112,10 +112,13 @@ startup_continue:
- brnz,pn %g1, 1b
- nop
-
-- sethi %hi(p1275buf), %g2
-- or %g2, %lo(p1275buf), %g2
-- ldx [%g2 + 0x10], %l2
-- add %l2, -(192 + 128), %sp
-+ /* Get onto temporary stack which will be in the locked
-+ * kernel image.
-+ */
-+ sethi %hi(tramp_stack), %g1
-+ or %g1, %lo(tramp_stack), %g1
-+ add %g1, TRAMP_STACK_SIZE, %g1
-+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
- flushw
-
- /* Setup the loop variables:
-@@ -397,7 +400,6 @@ after_lock_tlb:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- rdpr %pstate, %o1
- or %o1, PSTATE_IE, %o1
-diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
-index 591f20c..0f1b925 100644
---- a/arch/sparc/kernel/traps_32.c
-+++ b/arch/sparc/kernel/traps_32.c
-@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
- #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
- #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
-
-+extern void gr_handle_kernel_exploit(void);
-+
- void die_if_kernel(char *str, struct pt_regs *regs)
- {
- static int die_counter;
-@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
- count++ < 30 &&
- (((unsigned long) rw) >= PAGE_OFFSET) &&
- !(((unsigned long) rw) & 0x7)) {
-- printk("Caller[%08lx]: %pS\n", rw->ins[7],
-+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
- (void *) rw->ins[7]);
- rw = (struct reg_window32 *)rw->ins[6];
- }
- }
- printk("Instruction DUMP:");
- instruction_dump ((unsigned long *) regs->pc);
-- if(regs->psr & PSR_PS)
-+ if(regs->psr & PSR_PS) {
-+ gr_handle_kernel_exploit();
- do_exit(SIGKILL);
-+ }
- do_exit(SIGSEGV);
- }
-
-diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index 0cbdaa4..f37a97c 100644
---- a/arch/sparc/kernel/traps_64.c
-+++ b/arch/sparc/kernel/traps_64.c
-@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
- i + 1,
- p->trapstack[i].tstate, p->trapstack[i].tpc,
- p->trapstack[i].tnpc, p->trapstack[i].tt);
-- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
-+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
- }
- }
-
-@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
-
- lvl -= 0x100;
- if (regs->tstate & TSTATE_PRIV) {
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (lvl == 6)
-+ pax_report_refcount_overflow(regs);
-+#endif
-+
- sprintf(buffer, "Kernel bad sw trap %lx", lvl);
- die_if_kernel(buffer, regs);
- }
-@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
- void bad_trap_tl1(struct pt_regs *regs, long lvl)
- {
- char buffer[32];
--
-+
- if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
- 0, lvl, SIGTRAP) == NOTIFY_STOP)
- return;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (lvl == 6)
-+ pax_report_refcount_overflow(regs);
-+#endif
-+
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-
- sprintf (buffer, "Bad trap %lx at tl>0", lvl);
-@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
- regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
- printk("%s" "ERROR(%d): ",
- (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
-- printk("TPC<%pS>\n", (void *) regs->tpc);
-+ printk("TPC<%pA>\n", (void *) regs->tpc);
- printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
- (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
- (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
-@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
- smp_processor_id(),
- (type & 0x1) ? 'I' : 'D',
- regs->tpc);
-- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
- panic("Irrecoverable Cheetah+ parity error.");
- }
-
-@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
- smp_processor_id(),
- (type & 0x1) ? 'I' : 'D',
- regs->tpc);
-- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
- }
-
- struct sun4v_error_entry {
-@@ -1786,8 +1797,8 @@ struct sun4v_error_entry {
- u16 err_pad;
- };
-
--static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
--static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
-+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
-+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
-
- static const char *sun4v_err_type_to_str(u32 type)
- {
-@@ -1807,7 +1818,7 @@ static const char *sun4v_err_type_to_str(u32 type)
- }
- }
-
--static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
-+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_unchecked_t *ocnt)
- {
- int cnt;
-
-@@ -1842,8 +1853,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
-
- show_regs(regs);
-
-- if ((cnt = atomic_read(ocnt)) != 0) {
-- atomic_set(ocnt, 0);
-+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
-+ atomic_set_unchecked(ocnt, 0);
- wmb();
- printk("%s: Queue overflowed %d times.\n",
- pfx, cnt);
-@@ -1895,7 +1906,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
- */
- void sun4v_resum_overflow(struct pt_regs *regs)
- {
-- atomic_inc(&sun4v_resum_oflow_cnt);
-+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
- }
-
- /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
-@@ -1948,7 +1959,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
- /* XXX Actually even this can make not that much sense. Perhaps
- * XXX we should just pull the plug and panic directly from here?
- */
-- atomic_inc(&sun4v_nonresum_oflow_cnt);
-+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
- }
-
- unsigned long sun4v_err_itlb_vaddr;
-@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
-
- printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
- regs->tpc, tl);
-- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
- printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
-+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
- (void *) regs->u_regs[UREG_I7]);
- printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
- "pte[%lx] error[%lx]\n",
-@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
-
- printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
- regs->tpc, tl);
-- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
- printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
-+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
- (void *) regs->u_regs[UREG_I7]);
- printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
- "pte[%lx] error[%lx]\n",
-@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
- fp = (unsigned long)sf->fp + STACK_BIAS;
- }
-
-- printk(" [%016lx] %pS\n", pc, (void *) pc);
-+ printk(" [%016lx] %pA\n", pc, (void *) pc);
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = tsk->curr_ret_stack;
- if (tsk->ret_stack && index >= graph) {
- pc = tsk->ret_stack[index - graph].ret;
-- printk(" [%016lx] %pS\n", pc, (void *) pc);
-+ printk(" [%016lx] %pA\n", pc, (void *) pc);
- graph++;
- }
- }
-@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
- return (struct reg_window *) (fp + STACK_BIAS);
- }
-
-+extern void gr_handle_kernel_exploit(void);
-+
- void die_if_kernel(char *str, struct pt_regs *regs)
- {
- static int die_counter;
-@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
- while (rw &&
- count++ < 30 &&
- kstack_valid(tp, (unsigned long) rw)) {
-- printk("Caller[%016lx]: %pS\n", rw->ins[7],
-+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
- (void *) rw->ins[7]);
-
- rw = kernel_stack_up(rw);
-@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
- }
- user_instruction_dump ((unsigned int __user *) regs->tpc);
- }
-- if (regs->tstate & TSTATE_PRIV)
-+ if (regs->tstate & TSTATE_PRIV) {
-+ gr_handle_kernel_exploit();
- do_exit(SIGKILL);
-+ }
- do_exit(SIGSEGV);
- }
- EXPORT_SYMBOL(die_if_kernel);
-diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
-index 1c90812..439d7e8 100644
---- a/arch/sparc/kernel/unaligned_64.c
-+++ b/arch/sparc/kernel/unaligned_64.c
-@@ -285,7 +285,7 @@ static void log_unaligned(struct pt_regs *regs)
- static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
-
- if (__ratelimit(&ratelimit)) {
-- printk("Kernel unaligned access at TPC[%lx] %pS\n",
-+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
- regs->tpc, (void *) regs->tpc);
- }
- }
-diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
-index eb1624b..55100de 100644
---- a/arch/sparc/kernel/us3_cpufreq.c
-+++ b/arch/sparc/kernel/us3_cpufreq.c
-@@ -18,14 +18,12 @@
- #include <asm/head.h>
- #include <asm/timer.h>
-
--static struct cpufreq_driver *cpufreq_us3_driver;
--
- struct us3_freq_percpu_info {
- struct cpufreq_frequency_table table[4];
- };
-
- /* Indexed by cpu number. */
--static struct us3_freq_percpu_info *us3_freq_table;
-+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
-
- /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
- * in the Safari config register.
-@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
-
- static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
- {
-- if (cpufreq_us3_driver)
-- us3_set_cpu_divider_index(policy->cpu, 0);
-+ us3_set_cpu_divider_index(policy->cpu, 0);
-
- return 0;
- }
-
-+static int __init us3_freq_init(void);
-+static void __exit us3_freq_exit(void);
-+
-+static struct cpufreq_driver cpufreq_us3_driver = {
-+ .init = us3_freq_cpu_init,
-+ .verify = us3_freq_verify,
-+ .target = us3_freq_target,
-+ .get = us3_freq_get,
-+ .exit = us3_freq_cpu_exit,
-+ .owner = THIS_MODULE,
-+ .name = "UltraSPARC-III",
-+
-+};
-+
- static int __init us3_freq_init(void)
- {
- unsigned long manuf, impl, ver;
-@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
- (impl == CHEETAH_IMPL ||
- impl == CHEETAH_PLUS_IMPL ||
- impl == JAGUAR_IMPL ||
-- impl == PANTHER_IMPL)) {
-- struct cpufreq_driver *driver;
--
-- ret = -ENOMEM;
-- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
-- if (!driver)
-- goto err_out;
--
-- us3_freq_table = kzalloc(
-- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
-- GFP_KERNEL);
-- if (!us3_freq_table)
-- goto err_out;
--
-- driver->init = us3_freq_cpu_init;
-- driver->verify = us3_freq_verify;
-- driver->target = us3_freq_target;
-- driver->get = us3_freq_get;
-- driver->exit = us3_freq_cpu_exit;
-- driver->owner = THIS_MODULE,
-- strcpy(driver->name, "UltraSPARC-III");
--
-- cpufreq_us3_driver = driver;
-- ret = cpufreq_register_driver(driver);
-- if (ret)
-- goto err_out;
--
-- return 0;
--
--err_out:
-- if (driver) {
-- kfree(driver);
-- cpufreq_us3_driver = NULL;
-- }
-- kfree(us3_freq_table);
-- us3_freq_table = NULL;
-- return ret;
-- }
-+ impl == PANTHER_IMPL))
-+ return cpufreq_register_driver(&cpufreq_us3_driver);
-
- return -ENODEV;
- }
-
- static void __exit us3_freq_exit(void)
- {
-- if (cpufreq_us3_driver) {
-- cpufreq_unregister_driver(cpufreq_us3_driver);
-- kfree(cpufreq_us3_driver);
-- cpufreq_us3_driver = NULL;
-- kfree(us3_freq_table);
-- us3_freq_table = NULL;
-- }
-+ cpufreq_unregister_driver(&cpufreq_us3_driver);
- }
-
- MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
-diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
-index 4961516..f82ff86 100644
---- a/arch/sparc/lib/Makefile
-+++ b/arch/sparc/lib/Makefile
-@@ -2,7 +2,7 @@
- #
-
- asflags-y := -ansi -DST_DIV0=0x02
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
- lib-$(CONFIG_SPARC32) += memcpy.o memset.o
-diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
-index 59186e0..f747d7a 100644
---- a/arch/sparc/lib/atomic_64.S
-+++ b/arch/sparc/lib/atomic_64.S
-@@ -18,7 +18,12 @@
- atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: lduw [%o1], %g1
-- add %g1, %o0, %g7
-+ addcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
-@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_add, .-atomic_add
-
-+ .globl atomic_add_unchecked
-+ .type atomic_add_unchecked,#function
-+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: lduw [%o1], %g1
-+ add %g1, %o0, %g7
-+ cas [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %icc, 2f
-+ nop
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic_add_unchecked, .-atomic_add_unchecked
-+
- .globl atomic_sub
- .type atomic_sub,#function
- atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: lduw [%o1], %g1
-- sub %g1, %o0, %g7
-+ subcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
-@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_sub, .-atomic_sub
-
-+ .globl atomic_sub_unchecked
-+ .type atomic_sub_unchecked,#function
-+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: lduw [%o1], %g1
-+ sub %g1, %o0, %g7
-+ cas [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %icc, 2f
-+ nop
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
-+
- .globl atomic_add_ret
- .type atomic_add_ret,#function
- atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: lduw [%o1], %g1
-- add %g1, %o0, %g7
-+ addcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
-@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_add_ret, .-atomic_add_ret
-
-+ .globl atomic_add_ret_unchecked
-+ .type atomic_add_ret_unchecked,#function
-+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: lduw [%o1], %g1
-+ addcc %g1, %o0, %g7
-+ cas [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %icc, 2f
-+ add %g7, %o0, %g7
-+ sra %g7, 0, %o0
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
-+
- .globl atomic_sub_ret
- .type atomic_sub_ret,#function
- atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: lduw [%o1], %g1
-- sub %g1, %o0, %g7
-+ subcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %icc, 6
-+#endif
-+
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
-@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
- atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: ldx [%o1], %g1
-- add %g1, %o0, %g7
-+ addcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %xcc, 6
-+#endif
-+
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
-@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_add, .-atomic64_add
-
-+ .globl atomic64_add_unchecked
-+ .type atomic64_add_unchecked,#function
-+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: ldx [%o1], %g1
-+ addcc %g1, %o0, %g7
-+ casx [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %xcc, 2f
-+ nop
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
-+
- .globl atomic64_sub
- .type atomic64_sub,#function
- atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: ldx [%o1], %g1
-- sub %g1, %o0, %g7
-+ subcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %xcc, 6
-+#endif
-+
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
-@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_sub, .-atomic64_sub
-
-+ .globl atomic64_sub_unchecked
-+ .type atomic64_sub_unchecked,#function
-+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: ldx [%o1], %g1
-+ subcc %g1, %o0, %g7
-+ casx [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %xcc, 2f
-+ nop
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
-+
- .globl atomic64_add_ret
- .type atomic64_add_ret,#function
- atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: ldx [%o1], %g1
-- add %g1, %o0, %g7
-+ addcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %xcc, 6
-+#endif
-+
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
-@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
- 2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_add_ret, .-atomic64_add_ret
-
-+ .globl atomic64_add_ret_unchecked
-+ .type atomic64_add_ret_unchecked,#function
-+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
-+ BACKOFF_SETUP(%o2)
-+1: ldx [%o1], %g1
-+ addcc %g1, %o0, %g7
-+ casx [%o1], %g1, %g7
-+ cmp %g1, %g7
-+ bne,pn %xcc, 2f
-+ add %g7, %o0, %g7
-+ mov %g7, %o0
-+ retl
-+ nop
-+2: BACKOFF_SPIN(%o2, %o3, 1b)
-+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
-+
- .globl atomic64_sub_ret
- .type atomic64_sub_ret,#function
- atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
- 1: ldx [%o1], %g1
-- sub %g1, %o0, %g7
-+ subcc %g1, %o0, %g7
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ tvs %xcc, 6
-+#endif
-+
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
-diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index fbb8005..984a269 100644
---- a/arch/sparc/lib/ksyms.c
-+++ b/arch/sparc/lib/ksyms.c
-@@ -133,12 +133,18 @@ EXPORT_SYMBOL(__clear_user);
-
- /* Atomic counter implementation. */
- EXPORT_SYMBOL(atomic_add);
-+EXPORT_SYMBOL(atomic_add_unchecked);
- EXPORT_SYMBOL(atomic_add_ret);
-+EXPORT_SYMBOL(atomic_add_ret_unchecked);
- EXPORT_SYMBOL(atomic_sub);
-+EXPORT_SYMBOL(atomic_sub_unchecked);
- EXPORT_SYMBOL(atomic_sub_ret);
- EXPORT_SYMBOL(atomic64_add);
-+EXPORT_SYMBOL(atomic64_add_unchecked);
- EXPORT_SYMBOL(atomic64_add_ret);
-+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
- EXPORT_SYMBOL(atomic64_sub);
-+EXPORT_SYMBOL(atomic64_sub_unchecked);
- EXPORT_SYMBOL(atomic64_sub_ret);
-
- /* Atomic bit operations. */
-diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
-index 301421c1..e2535d1 100644
---- a/arch/sparc/mm/Makefile
-+++ b/arch/sparc/mm/Makefile
-@@ -2,7 +2,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
- obj-y += fault_$(BITS).o
-diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index 802b806..483d6e9 100644
---- a/arch/sparc/mm/fault_32.c
-+++ b/arch/sparc/mm/fault_32.c
-@@ -21,6 +21,9 @@
- #include <linux/perf_event.h>
- #include <linux/interrupt.h>
- #include <linux/kdebug.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-
- #include <asm/system.h>
- #include <asm/page.h>
-@@ -208,6 +211,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
- return safe_compute_effective_address(regs, insn);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#ifdef CONFIG_PAX_DLRESOLVE
-+static void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+ vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ unsigned int *kaddr;
-+
-+ vmf->page = alloc_page(GFP_HIGHUSER);
-+ if (!vmf->page)
-+ return VM_FAULT_OOM;
-+
-+ kaddr = kmap(vmf->page);
-+ memset(kaddr, 0, PAGE_SIZE);
-+ kaddr[0] = 0x9DE3BFA8U; /* save */
-+ flush_dcache_page(vmf->page);
-+ kunmap(vmf->page);
-+ return VM_FAULT_MAJOR;
-+}
-+
-+static const struct vm_operations_struct pax_vm_ops = {
-+ .close = pax_emuplt_close,
-+ .fault = pax_emuplt_fault
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ int ret;
-+
-+ INIT_LIST_HEAD(&vma->anon_vma_chain);
-+ vma->vm_mm = current->mm;
-+ vma->vm_start = addr;
-+ vma->vm_end = addr + PAGE_SIZE;
-+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+ vma->vm_ops = &pax_vm_ops;
-+
-+ ret = insert_vm_struct(current->mm, vma);
-+ if (ret)
-+ return ret;
-+
-+ ++current->mm->total_vm;
-+ return 0;
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int sethi1, sethi2, jmpl;
-+
-+ err = get_user(sethi1, (unsigned int *)regs->pc);
-+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
-+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+ {
-+ unsigned int addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+ addr = regs->u_regs[UREG_G1];
-+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ba;
-+
-+ err = get_user(ba, (unsigned int *)regs->pc);
-+
-+ if (err)
-+ break;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
-+ unsigned int addr;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+ else
-+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, bajmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
-+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ else
-+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 1 */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int addr, save, call;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+ else
-+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+
-+ err = get_user(save, (unsigned int *)addr);
-+ err |= get_user(call, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ if (err)
-+ break;
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ struct vm_area_struct *vma;
-+ unsigned long call_dl_resolve;
-+
-+ down_read(&current->mm->mmap_sem);
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_read(&current->mm->mmap_sem);
-+ if (likely(call_dl_resolve))
-+ goto emulate;
-+
-+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+
-+ down_write(&current->mm->mmap_sem);
-+ if (current->mm->call_dl_resolve) {
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ goto emulate;
-+ }
-+
-+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ if (pax_insert_vma(vma, call_dl_resolve)) {
-+ up_write(&current->mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ current->mm->call_dl_resolve = call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->pc = call_dl_resolve;
-+ regs->npc = addr+4;
-+ return 3;
-+ }
-+#endif
-+
-+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
-+ if ((save & 0xFFC00000U) == 0x05000000U &&
-+ (call & 0xFFFFE000U) == 0x85C0A000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G2] = addr + 4;
-+ addr = (save & 0x003FFFFFU) << 10;
-+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 2 */
-+ unsigned int save, call, nop;
-+
-+ err = get_user(save, (unsigned int *)(regs->pc-4));
-+ err |= get_user(call, (unsigned int *)regs->pc);
-+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
-+ if (err)
-+ break;
-+
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
-+
-+ regs->u_regs[UREG_RETPC] = regs->pc;
-+ regs->pc = dl_resolve;
-+ regs->npc = dl_resolve+4;
-+ return 3;
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
- int text_fault)
- {
-@@ -280,6 +554,24 @@ good_area:
- if(!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- } else {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- /* Allow reads even for write-only mappings */
- if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
-diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index bfd7c02..6e941d8 100644
---- a/arch/sparc/mm/fault_64.c
-+++ b/arch/sparc/mm/fault_64.c
-@@ -21,6 +21,9 @@
- #include <linux/kprobes.h>
- #include <linux/kdebug.h>
- #include <linux/percpu.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
- printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
- regs->tpc);
- printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
-- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
-+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
- printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
- dump_stack();
- unhandled_fault(regs->tpc, current, regs);
-@@ -282,6 +285,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
- show_regs(regs);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#ifdef CONFIG_PAX_DLRESOLVE
-+static void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+ vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ unsigned int *kaddr;
-+
-+ vmf->page = alloc_page(GFP_HIGHUSER);
-+ if (!vmf->page)
-+ return VM_FAULT_OOM;
-+
-+ kaddr = kmap(vmf->page);
-+ memset(kaddr, 0, PAGE_SIZE);
-+ kaddr[0] = 0x9DE3BFA8U; /* save */
-+ flush_dcache_page(vmf->page);
-+ kunmap(vmf->page);
-+ return VM_FAULT_MAJOR;
-+}
-+
-+static const struct vm_operations_struct pax_vm_ops = {
-+ .close = pax_emuplt_close,
-+ .fault = pax_emuplt_fault
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ int ret;
-+
-+ INIT_LIST_HEAD(&vma->anon_vma_chain);
-+ vma->vm_mm = current->mm;
-+ vma->vm_start = addr;
-+ vma->vm_end = addr + PAGE_SIZE;
-+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+ vma->vm_ops = &pax_vm_ops;
-+
-+ ret = insert_vm_struct(current->mm, vma);
-+ if (ret)
-+ return ret;
-+
-+ ++current->mm->total_vm;
-+ return 0;
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->tpc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int sethi1, sethi2, jmpl;
-+
-+ err = get_user(sethi1, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+ addr = regs->u_regs[UREG_G1];
-+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ba;
-+
-+ err = get_user(ba, (unsigned int *)regs->tpc);
-+
-+ if (err)
-+ break;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
-+ unsigned long addr;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+ else
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, bajmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
-+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+ else
-+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #4 */
-+ unsigned int sethi, mov1, call, mov2;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ mov1 == 0x8210000FU &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ mov2 == 0x9E100001U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
-+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #5 */
-+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
-+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
-+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ (or1 & 0xFFFFE000U) == 0x82106000U &&
-+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
-+ sllx == 0x83287020U &&
-+ jmpl == 0x81C04005U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
-+ regs->u_regs[UREG_G1] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
-+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #6 */
-+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
-+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ sllx == 0x83287020U &&
-+ (or & 0xFFFFE000U) == 0x8A116000U &&
-+ jmpl == 0x81C04005U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
-+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 1 */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+ unsigned int save, call;
-+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+ else
-+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ err = get_user(save, (unsigned int *)addr);
-+ err |= get_user(call, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ if (err)
-+ break;
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ struct vm_area_struct *vma;
-+ unsigned long call_dl_resolve;
-+
-+ down_read(&current->mm->mmap_sem);
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_read(&current->mm->mmap_sem);
-+ if (likely(call_dl_resolve))
-+ goto emulate;
-+
-+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+
-+ down_write(&current->mm->mmap_sem);
-+ if (current->mm->call_dl_resolve) {
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ goto emulate;
-+ }
-+
-+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ if (pax_insert_vma(vma, call_dl_resolve)) {
-+ up_write(&current->mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ current->mm->call_dl_resolve = call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->tpc = call_dl_resolve;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+#endif
-+
-+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
-+ if ((save & 0xFFC00000U) == 0x05000000U &&
-+ (call & 0xFFFFE000U) == 0x85C0A000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G2] = addr + 4;
-+ addr = (save & 0x003FFFFFU) << 10;
-+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+
-+ /* PaX: 64-bit PLT stub */
-+ err = get_user(sethi1, (unsigned int *)addr);
-+ err |= get_user(sethi2, (unsigned int *)(addr+4));
-+ err |= get_user(or1, (unsigned int *)(addr+8));
-+ err |= get_user(or2, (unsigned int *)(addr+12));
-+ err |= get_user(sllx, (unsigned int *)(addr+16));
-+ err |= get_user(add, (unsigned int *)(addr+20));
-+ err |= get_user(jmpl, (unsigned int *)(addr+24));
-+ err |= get_user(nop, (unsigned int *)(addr+28));
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ (or1 & 0xFFFFE000U) == 0x88112000U &&
-+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
-+ sllx == 0x89293020U &&
-+ add == 0x8A010005U &&
-+ jmpl == 0x89C14000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
-+ regs->u_regs[UREG_G4] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
-+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
-+ regs->u_regs[UREG_G4] = addr + 24;
-+ addr = regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ do { /* PaX: unpatched PLT emulation step 2 */
-+ unsigned int save, call, nop;
-+
-+ err = get_user(save, (unsigned int *)(regs->tpc-4));
-+ err |= get_user(call, (unsigned int *)regs->tpc);
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
-+ if (err)
-+ break;
-+
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ dl_resolve &= 0xFFFFFFFFUL;
-+
-+ regs->u_regs[UREG_RETPC] = regs->tpc;
-+ regs->tpc = dl_resolve;
-+ regs->tnpc = dl_resolve+4;
-+ return 3;
-+ }
-+ } while (0);
-+#endif
-+
-+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (ba & 0xFFF00000U) == 0x30600000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
- {
- struct mm_struct *mm = current->mm;
-@@ -348,6 +811,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
- if (!vma)
- goto bad_area;
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ /* PaX: detect ITLB misses on non-exec pages */
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
-+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
-+ {
-+ if (address != regs->tpc)
-+ goto good_area;
-+
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- /* Pure DTLB misses do not tell us whether the fault causing
- * load/store/atomic was a write or not, it only says that there
- * was no match. So in such a case we (carefully) read the
-diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
-index 42c55df..20da942 100644
---- a/arch/sparc/mm/gup.c
-+++ b/arch/sparc/mm/gup.c
-@@ -106,6 +106,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- return 1;
- }
-
-+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
-+ struct page **pages)
-+{
-+ struct mm_struct *mm = current->mm;
-+ unsigned long addr, len, end;
-+ unsigned long next, flags;
-+ pgd_t *pgdp;
-+ int nr = 0;
-+
-+ start &= PAGE_MASK;
-+ addr = start;
-+ len = (unsigned long) nr_pages << PAGE_SHIFT;
-+ end = start + len;
-+
-+ local_irq_save(flags);
-+ pgdp = pgd_offset(mm, addr);
-+ do {
-+ pgd_t pgd = *pgdp;
-+
-+ next = pgd_addr_end(addr, end);
-+ if (pgd_none(pgd))
-+ break;
-+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
-+ break;
-+ } while (pgdp++, addr = next, addr != end);
-+ local_irq_restore(flags);
-+
-+ return nr;
-+}
-+
- int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
- {
-diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 07e1453..ae6e02e 100644
---- a/arch/sparc/mm/hugetlbpage.c
-+++ b/arch/sparc/mm/hugetlbpage.c
-@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
-- unsigned long flags)
-+ unsigned long flags,
-+ unsigned long offset)
- {
- struct mm_struct *mm = current->mm;
- struct vm_area_struct * vma;
-@@ -67,7 +68,7 @@ full_search:
- }
- return -ENOMEM;
- }
-- if (likely(!vma || addr + len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -85,7 +86,8 @@ static unsigned long
- hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
-- const unsigned long flags)
-+ const unsigned long flags,
-+ const unsigned long offset)
- {
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
-@@ -105,26 +107,28 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- /* make sure it can fit in the remaining address space */
- if (likely(addr > len)) {
-- vma = find_vma(mm, addr-len);
-- if (!vma || addr <= vma->vm_start) {
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* remember the address as a hint for next time */
-- return (mm->free_area_cache = addr-len);
-+ return (mm->free_area_cache = addr);
- }
- }
-
- if (unlikely(mm->mmap_base < len))
- goto bottomup;
-
-- addr = (mm->mmap_base-len) & HPAGE_MASK;
-+ addr = mm->mmap_base - len;
-
- do {
-+ addr &= HPAGE_MASK;
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (likely(!vma || addr+len <= vma->vm_start)) {
-+ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- }
-@@ -134,8 +138,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = (vma->vm_start-len) & HPAGE_MASK;
-- } while (likely(len < vma->vm_start));
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- bottomup:
- /*
-@@ -163,6 +167,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-
- if (test_thread_flag(TIF_32BIT))
- task_size = STACK_TOP32;
-@@ -181,16 +186,15 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- if (addr) {
- addr = ALIGN(addr, HPAGE_SIZE);
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
- if (mm->get_unmapped_area == arch_get_unmapped_area)
- return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-- pgoff, flags);
-+ pgoff, flags, offset);
- else
- return hugetlb_get_unmapped_area_topdown(file, addr, len,
-- pgoff, flags);
-+ pgoff, flags, offset);
- }
-
- pte_t *huge_pte_alloc(struct mm_struct *mm,
-diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
-index 7b00de6..78239f4 100644
---- a/arch/sparc/mm/init_32.c
-+++ b/arch/sparc/mm/init_32.c
-@@ -316,6 +316,9 @@ extern void device_scan(void);
- pgprot_t PAGE_SHARED __read_mostly;
- EXPORT_SYMBOL(PAGE_SHARED);
-
-+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
-+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
-+
- void __init paging_init(void)
- {
- switch(sparc_cpu_model) {
-@@ -344,17 +347,17 @@ void __init paging_init(void)
-
- /* Initialize the protection map with non-constant, MMU dependent values. */
- protection_map[0] = PAGE_NONE;
-- protection_map[1] = PAGE_READONLY;
-- protection_map[2] = PAGE_COPY;
-- protection_map[3] = PAGE_COPY;
-+ protection_map[1] = PAGE_READONLY_NOEXEC;
-+ protection_map[2] = PAGE_COPY_NOEXEC;
-+ protection_map[3] = PAGE_COPY_NOEXEC;
- protection_map[4] = PAGE_READONLY;
- protection_map[5] = PAGE_READONLY;
- protection_map[6] = PAGE_COPY;
- protection_map[7] = PAGE_COPY;
- protection_map[8] = PAGE_NONE;
-- protection_map[9] = PAGE_READONLY;
-- protection_map[10] = PAGE_SHARED;
-- protection_map[11] = PAGE_SHARED;
-+ protection_map[9] = PAGE_READONLY_NOEXEC;
-+ protection_map[10] = PAGE_SHARED_NOEXEC;
-+ protection_map[11] = PAGE_SHARED_NOEXEC;
- protection_map[12] = PAGE_READONLY;
- protection_map[13] = PAGE_READONLY;
- protection_map[14] = PAGE_SHARED;
-diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index d2c5737..05a3e21 100644
---- a/arch/sparc/mm/init_64.c
-+++ b/arch/sparc/mm/init_64.c
-@@ -170,9 +170,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
- int num_kernel_image_mappings;
-
- #ifdef CONFIG_DEBUG_DCFLUSH
--atomic_t dcpage_flushes = ATOMIC_INIT(0);
-+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
- #ifdef CONFIG_SMP
--atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
-+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
- #endif
- #endif
-
-@@ -180,7 +180,7 @@ inline void flush_dcache_page_impl(struct page *page)
- {
- BUG_ON(tlb_type == hypervisor);
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
-
- #ifdef DCACHE_ALIASING_POSSIBLE
-@@ -421,10 +421,10 @@ void mmu_info(struct seq_file *m)
-
- #ifdef CONFIG_DEBUG_DCFLUSH
- seq_printf(m, "DCPageFlushes\t: %d\n",
-- atomic_read(&dcpage_flushes));
-+ atomic_read_unchecked(&dcpage_flushes));
- #ifdef CONFIG_SMP
- seq_printf(m, "DCPageFlushesXC\t: %d\n",
-- atomic_read(&dcpage_flushes_xcall));
-+ atomic_read_unchecked(&dcpage_flushes_xcall));
- #endif /* CONFIG_SMP */
- #endif /* CONFIG_DEBUG_DCFLUSH */
- }
-diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
-index cbef74e..c38fead 100644
---- a/arch/sparc/mm/srmmu.c
-+++ b/arch/sparc/mm/srmmu.c
-@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
- PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
- BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
- BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
-+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
-+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
-+#endif
-+
- BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
- page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
-
-diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
-index 9c86b4b..8050f38 100644
---- a/arch/sparc/prom/cif.S
-+++ b/arch/sparc/prom/cif.S
-@@ -11,11 +11,10 @@
- .text
- .globl prom_cif_direct
- prom_cif_direct:
-+ save %sp, -192, %sp
- sethi %hi(p1275buf), %o1
- or %o1, %lo(p1275buf), %o1
-- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
-- save %o2, -192, %sp
-- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
-+ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
- mov %g4, %l0
- mov %g5, %l1
- mov %g6, %l3
-diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
-index 5016c5e..ffb1cc5 100644
---- a/arch/sparc/prom/init_64.c
-+++ b/arch/sparc/prom/init_64.c
-@@ -26,13 +26,13 @@ phandle prom_chosen_node;
- * failure. It gets passed the pointer to the PROM vector.
- */
-
--extern void prom_cif_init(void *, void *);
-+extern void prom_cif_init(void *);
-
--void __init prom_init(void *cif_handler, void *cif_stack)
-+void __init prom_init(void *cif_handler)
- {
- phandle node;
-
-- prom_cif_init(cif_handler, cif_stack);
-+ prom_cif_init(cif_handler);
-
- prom_chosen_node = prom_finddevice(prom_chosen_path);
- if (!prom_chosen_node || (s32)prom_chosen_node == -1)
-diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
-index d9850c2..5bbbc23 100644
---- a/arch/sparc/prom/p1275.c
-+++ b/arch/sparc/prom/p1275.c
-@@ -21,7 +21,6 @@
- struct {
- long prom_callback; /* 0x00 */
- void (*prom_cif_handler)(long *); /* 0x08 */
-- unsigned long prom_cif_stack; /* 0x10 */
- } p1275buf;
-
- extern void prom_world(int);
-@@ -53,5 +52,4 @@ void p1275_cmd_direct(unsigned long *args)
- void prom_cif_init(void *cif_handler, void *cif_stack)
- {
- p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
-- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
- }
-diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index 6cb8319..ee12bac 100644
---- a/arch/tile/Kconfig
-+++ b/arch/tile/Kconfig
-@@ -142,6 +142,7 @@ source "kernel/Kconfig.hz"
-
- config KEXEC
- bool "kexec system call"
-+ depends on !GRKERNSEC_KMEM
- ---help---
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
-index 27fe667..36d474c 100644
---- a/arch/tile/include/asm/atomic_64.h
-+++ b/arch/tile/include/asm/atomic_64.h
-@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- /* Atomic dec and inc don't implement barrier, so provide them if needed. */
- #define smp_mb__before_atomic_dec() smp_mb()
- #define smp_mb__after_atomic_dec() smp_mb()
-diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
-index 392e533..536b092 100644
---- a/arch/tile/include/asm/cache.h
-+++ b/arch/tile/include/asm/cache.h
-@@ -15,11 +15,12 @@
- #ifndef _ASM_TILE_CACHE_H
- #define _ASM_TILE_CACHE_H
-
-+#include <linux/const.h>
- #include <arch/chip.h>
-
- /* bytes per L1 data cache line */
- #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /* bytes per L2 cache line */
- #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
-diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
-index ef34d2caa..d6ce60c 100644
---- a/arch/tile/include/asm/uaccess.h
-+++ b/arch/tile/include/asm/uaccess.h
-@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
-
-- if (likely(sz == -1 || sz >= n))
-+ if (likely(sz == (size_t)-1 || sz >= n))
- n = _copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-diff --git a/arch/um/Makefile b/arch/um/Makefile
-index 7730af6..880804f 100644
---- a/arch/um/Makefile
-+++ b/arch/um/Makefile
-@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
- $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
- $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
-
-+ifdef CONSTIFY_PLUGIN
-+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
-+
- #This will adjust *FLAGS accordingly to the platform.
- include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
-
-diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
-index 19e1bdd..3665b77 100644
---- a/arch/um/include/asm/cache.h
-+++ b/arch/um/include/asm/cache.h
-@@ -1,6 +1,7 @@
- #ifndef __UM_CACHE_H
- #define __UM_CACHE_H
-
-+#include <linux/const.h>
-
- #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
- # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
-@@ -12,6 +13,6 @@
- # define L1_CACHE_SHIFT 5
- #endif
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif
-diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
-index 6c03acd..a5e0215 100644
---- a/arch/um/include/asm/kmap_types.h
-+++ b/arch/um/include/asm/kmap_types.h
-@@ -23,6 +23,7 @@ enum km_type {
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
-+ KM_CLEARPAGE,
- KM_TYPE_NR
- };
-
-diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
-index 7cfc3ce..cbd1a58 100644
---- a/arch/um/include/asm/page.h
-+++ b/arch/um/include/asm/page.h
-@@ -14,6 +14,9 @@
- #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
- #define PAGE_MASK (~(PAGE_SIZE-1))
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #ifndef __ASSEMBLY__
-
- struct page;
-diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
-index 0032f92..cd151e0 100644
---- a/arch/um/include/asm/pgtable-3level.h
-+++ b/arch/um/include/asm/pgtable-3level.h
-@@ -58,6 +58,7 @@
- #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
- #define pud_populate(mm, pud, pmd) \
- set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
-+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
-
- #ifdef CONFIG_64BIT
- #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
-index c533835..84db18e 100644
---- a/arch/um/kernel/process.c
-+++ b/arch/um/kernel/process.c
-@@ -406,22 +406,6 @@ int singlestepping(void * t)
- return 2;
- }
-
--/*
-- * Only x86 and x86_64 have an arch_align_stack().
-- * All other arches have "#define arch_align_stack(x) (x)"
-- * in their asm/system.h
-- * As this is included in UML from asm-um/system-generic.h,
-- * we can use it to behave as the subarch does.
-- */
--#ifndef arch_align_stack
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() % 8192;
-- return sp & ~0xf;
--}
--#endif
--
- unsigned long get_wchan(struct task_struct *p)
- {
- unsigned long stack_page, sp, ip;
-diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
-index ad8f795..2c7eec6 100644
---- a/arch/unicore32/include/asm/cache.h
-+++ b/arch/unicore32/include/asm/cache.h
-@@ -12,8 +12,10 @@
- #ifndef __UNICORE_CACHE_H__
- #define __UNICORE_CACHE_H__
-
--#define L1_CACHE_SHIFT (5)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#include <linux/const.h>
-+
-+#define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 28a1bca..0443883 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -75,6 +75,7 @@ config X86
- select HAVE_BPF_JIT if (X86_64 && NET)
- select CLKEVT_I8253
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
-+ select HAVE_ARCH_SECCOMP_FILTER
- select ARCH_SUPPORTS_ATOMIC_RMW
-
- config INSTRUCTION_DECODER
-@@ -236,7 +237,7 @@ config X86_HT
-
- config X86_32_LAZY_GS
- def_bool y
-- depends on X86_32 && !CC_STACKPROTECTOR
-+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
-
- config ARCH_HWEIGHT_CFLAGS
- string
-@@ -526,6 +527,7 @@ config SCHED_OMIT_FRAME_POINTER
-
- menuconfig PARAVIRT_GUEST
- bool "Paravirtualized guest support"
-+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
- ---help---
- Say Y here to get to see options related to running Linux under
- various hypervisors. This option alone does not add any kernel code.
-@@ -903,6 +905,7 @@ config VM86
-
- config X86_16BIT
- bool "Enable support for 16-bit segments" if EXPERT
-+ depends on !GRKERNSEC
- default y
- ---help---
- This option is required by programs like Wine to run 16-bit
-@@ -1040,7 +1043,7 @@ choice
-
- config NOHIGHMEM
- bool "off"
-- depends on !X86_NUMAQ
-+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Linux can use up to 64 Gigabytes of physical memory on x86 systems.
- However, the address space of 32-bit x86 processors is only 4
-@@ -1077,7 +1080,7 @@ config NOHIGHMEM
-
- config HIGHMEM4G
- bool "4GB"
-- depends on !X86_NUMAQ
-+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Select this if you have a 32-bit processor and between 1 and 4
- gigabytes of physical RAM.
-@@ -1131,7 +1134,7 @@ config PAGE_OFFSET
- hex
- default 0xB0000000 if VMSPLIT_3G_OPT
- default 0x80000000 if VMSPLIT_2G
-- default 0x78000000 if VMSPLIT_2G_OPT
-+ default 0x70000000 if VMSPLIT_2G_OPT
- default 0x40000000 if VMSPLIT_1G
- default 0xC0000000
- depends on X86_32
-@@ -1514,6 +1517,7 @@ config SECCOMP
-
- config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
-+ depends on X86_64 || !PAX_MEMORY_UDEREF
- ---help---
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
-@@ -1532,6 +1536,7 @@ source kernel/Kconfig.hz
-
- config KEXEC
- bool "kexec system call"
-+ depends on !GRKERNSEC_KMEM
- ---help---
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -1634,6 +1639,8 @@ config X86_NEED_RELOCS
- config PHYSICAL_ALIGN
- hex "Alignment value to which kernel should be aligned" if X86_32
- default "0x1000000"
-+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
-+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
- range 0x2000 0x1000000
- ---help---
- This value puts the alignment restrictions on physical address
-@@ -1665,9 +1672,10 @@ config HOTPLUG_CPU
- Say N if you want to disable CPU hotplug.
-
- config COMPAT_VDSO
-- def_bool y
-+ def_bool n
- prompt "Compat VDSO support"
- depends on X86_32 || IA32_EMULATION
-+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
- ---help---
- Map the 32-bit VDSO to the predictable old-style address too.
-
-@@ -1720,6 +1728,22 @@ config CMDLINE_OVERRIDE
- This is used to work around broken boot loaders. This should
- be set to 'N' under normal conditions.
-
-+config DEFAULT_MODIFY_LDT_SYSCALL
-+ bool "Allow userspace to modify the LDT by default"
-+ default y
-+
-+ ---help---
-+ Modifying the LDT (Local Descriptor Table) may be needed to run a
-+ 16-bit or segmented code such as Dosemu or Wine. This is done via
-+ a system call which is not needed to run portable applications,
-+ and which can sometimes be abused to exploit some weaknesses of
-+ the architecture, opening new vulnerabilities.
-+
-+ For this reason this option allows one to enable or disable the
-+ feature at runtime. It is recommended to say 'N' here to leave
-+ the system protected, and to enable it at runtime only if needed
-+ by setting the sys.kernel.modify_ldt sysctl.
-+
- endmenu
-
- config ARCH_ENABLE_MEMORY_HOTPLUG
-diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index e3ca7e0..b30b28a 100644
---- a/arch/x86/Kconfig.cpu
-+++ b/arch/x86/Kconfig.cpu
-@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
-
- config X86_F00F_BUG
- def_bool y
-- depends on M586MMX || M586TSC || M586 || M486 || M386
-+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
-
- config X86_INVD_BUG
- def_bool y
-@@ -365,7 +365,7 @@ config X86_POPAD_OK
-
- config X86_ALIGNMENT_16
- def_bool y
-- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-
- config X86_INTEL_USERCOPY
- def_bool y
-@@ -411,7 +411,7 @@ config X86_CMPXCHG64
- # generates cmov.
- config X86_CMOV
- def_bool y
-- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-
- config X86_MINIMUM_CPU_FAMILY
- int
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index bf56e17..91465a1 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -81,7 +81,7 @@ config X86_PTDUMP
- config DEBUG_RODATA
- bool "Write protect kernel read-only data structures"
- default y
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && BROKEN
- ---help---
- Mark the kernel read-only data as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
-@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
-
- config DEBUG_SET_MODULE_RONX
- bool "Set loadable kernel module data as NX and text as RO"
-- depends on MODULES
-+ depends on MODULES && BROKEN
- ---help---
- This option helps catch unintended modifications to loadable
- kernel module's text and read-only data. It also prevents execution
-@@ -272,7 +272,7 @@ config OPTIMIZE_INLINING
-
- config DEBUG_STRICT_USER_COPY_CHECKS
- bool "Strict copy size checks"
-- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
-+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
- ---help---
- Enabling this option turns a certain set of sanity checks for user
- copy operations into compile time failures.
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 03dbc7f5b..e1aa479 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -40,12 +40,12 @@ ifeq ($(CONFIG_X86_32),y)
- KBUILD_CFLAGS += $(cflags-y)
-
- # temporary until string.h is fixed
-- KBUILD_CFLAGS += -ffreestanding
- else
- BITS := 64
- UTS_MACHINE := x86_64
- CHECKFLAGS += -D__x86_64__ -m64
-
-+ biarch := $(call cc-option,-m64)
- KBUILD_AFLAGS += -m64
- KBUILD_CFLAGS += -m64
-
-@@ -72,6 +72,8 @@ else
- KBUILD_CFLAGS += -maccumulate-outgoing-args
- endif
-
-+KBUILD_CFLAGS += -ffreestanding
-+
- ifdef CONFIG_CC_STACKPROTECTOR
- cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
-@@ -199,3 +201,12 @@ define archhelp
- echo ' FDARGS="..." arguments for the booted kernel'
- echo ' FDINITRD=file initrd for the booted kernel'
- endef
-+
-+define OLD_LD
-+
-+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
-+*** Please upgrade your binutils to 2.18 or newer
-+endef
-+
-+archprepare:
-+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
-diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
-index e80542b..c5099c3 100644
---- a/arch/x86/boot/Makefile
-+++ b/arch/x86/boot/Makefile
-@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
- $(call cc-option, -fno-unit-at-a-time)) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
-+ifdef CONSTIFY_PLUGIN
-+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
- KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
- GCOV_PROFILE := n
-
-diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
-index 878e4b9..20537ab 100644
---- a/arch/x86/boot/bitops.h
-+++ b/arch/x86/boot/bitops.h
-@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
- u8 v;
- const u32 *p = (const u32 *)addr;
-
-- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
-+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
- return v;
- }
-
-@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
-
- static inline void set_bit(int nr, void *addr)
- {
-- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
-+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
- }
-
- #endif /* BOOT_BITOPS_H */
-diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
-index c7093bd..d4247ffe0 100644
---- a/arch/x86/boot/boot.h
-+++ b/arch/x86/boot/boot.h
-@@ -85,7 +85,7 @@ static inline void io_delay(void)
- static inline u16 ds(void)
- {
- u16 seg;
-- asm("movw %%ds,%0" : "=rm" (seg));
-+ asm volatile("movw %%ds,%0" : "=rm" (seg));
- return seg;
- }
-
-@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
- static inline int memcmp(const void *s1, const void *s2, size_t len)
- {
- u8 diff;
-- asm("repe; cmpsb; setnz %0"
-+ asm volatile("repe; cmpsb; setnz %0"
- : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
- return diff;
- }
-diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index cda5cef..c1f26aa 100644
---- a/arch/x86/boot/compressed/Makefile
-+++ b/arch/x86/boot/compressed/Makefile
-@@ -15,6 +15,9 @@ KBUILD_CFLAGS += $(cflags-y)
- KBUILD_CFLAGS += -mno-mmx -mno-sse
- KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
- KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
-+ifdef CONSTIFY_PLUGIN
-+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
-
- KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
- GCOV_PROFILE := n
-diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
-index 67a655a..b924059 100644
---- a/arch/x86/boot/compressed/head_32.S
-+++ b/arch/x86/boot/compressed/head_32.S
-@@ -76,7 +76,7 @@ ENTRY(startup_32)
- notl %eax
- andl %eax, %ebx
- #else
-- movl $LOAD_PHYSICAL_ADDR, %ebx
-+ movl $____LOAD_PHYSICAL_ADDR, %ebx
- #endif
-
- /* Target address to relocate to for decompression */
-@@ -162,7 +162,7 @@ relocated:
- * and where it was actually loaded.
- */
- movl %ebp, %ebx
-- subl $LOAD_PHYSICAL_ADDR, %ebx
-+ subl $____LOAD_PHYSICAL_ADDR, %ebx
- jz 2f /* Nothing to be done if loaded at compiled addr. */
- /*
- * Process relocations.
-@@ -170,8 +170,7 @@ relocated:
-
- 1: subl $4, %edi
- movl (%edi), %ecx
-- testl %ecx, %ecx
-- jz 2f
-+ jecxz 2f
- addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
- jmp 1b
- 2:
-diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
-index 35af09d..99c9676 100644
---- a/arch/x86/boot/compressed/head_64.S
-+++ b/arch/x86/boot/compressed/head_64.S
-@@ -91,7 +91,7 @@ ENTRY(startup_32)
- notl %eax
- andl %eax, %ebx
- #else
-- movl $LOAD_PHYSICAL_ADDR, %ebx
-+ movl $____LOAD_PHYSICAL_ADDR, %ebx
- #endif
-
- /* Target address to relocate to for decompression */
-@@ -233,7 +233,7 @@ ENTRY(startup_64)
- notq %rax
- andq %rax, %rbp
- #else
-- movq $LOAD_PHYSICAL_ADDR, %rbp
-+ movq $____LOAD_PHYSICAL_ADDR, %rbp
- #endif
-
- /* Target address to relocate to for decompression */
-diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index 3a19d04..1bef1d5 100644
---- a/arch/x86/boot/compressed/misc.c
-+++ b/arch/x86/boot/compressed/misc.c
-@@ -226,7 +226,7 @@ void __putstr(int error, const char *s)
-
- void *memset(void *s, int c, size_t n)
- {
-- int i;
-+ size_t i;
- char *ss = s;
-
- for (i = 0; i < n; i++)
-@@ -282,7 +282,7 @@ static void parse_elf(void *output)
- Elf32_Ehdr ehdr;
- Elf32_Phdr *phdrs, *phdr;
- #endif
-- void *dest;
-+ void *dest, *prev;
- int i;
-
- memcpy(&ehdr, output, sizeof(ehdr));
-@@ -310,13 +310,16 @@ static void parse_elf(void *output)
- case PT_LOAD:
- #ifdef CONFIG_RELOCATABLE
- dest = output;
-- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
-+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
- #else
- dest = (void *)(phdr->p_paddr);
- #endif
- memcpy(dest,
- output + phdr->p_offset,
- phdr->p_filesz);
-+ if (i)
-+ memset(prev, 0xff, dest - prev);
-+ prev = dest + phdr->p_filesz;
- break;
- default: /* Ignore other PT_* */ break;
- }
-@@ -363,7 +366,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
- error("Destination address too large");
- #endif
- #ifndef CONFIG_RELOCATABLE
-- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
-+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
- error("Wrong destination address");
- #endif
-
-diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
-index 4d3ff03..e4972ff 100644
---- a/arch/x86/boot/cpucheck.c
-+++ b/arch/x86/boot/cpucheck.c
-@@ -74,7 +74,7 @@ static int has_fpu(void)
- u16 fcw = -1, fsw = -1;
- u32 cr0;
-
-- asm("movl %%cr0,%0" : "=r" (cr0));
-+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
- if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
- cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
- asm volatile("movl %0,%%cr0" : : "r" (cr0));
-@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
- {
- u32 f0, f1;
-
-- asm("pushfl ; "
-+ asm volatile("pushfl ; "
- "pushfl ; "
- "popl %0 ; "
- "movl %0,%1 ; "
-@@ -115,7 +115,7 @@ static void get_flags(void)
- set_bit(X86_FEATURE_FPU, cpu.flags);
-
- if (has_eflag(X86_EFLAGS_ID)) {
-- asm("cpuid"
-+ asm volatile("cpuid"
- : "=a" (max_intel_level),
- "=b" (cpu_vendor[0]),
- "=d" (cpu_vendor[1]),
-@@ -124,7 +124,7 @@ static void get_flags(void)
-
- if (max_intel_level >= 0x00000001 &&
- max_intel_level <= 0x0000ffff) {
-- asm("cpuid"
-+ asm volatile("cpuid"
- : "=a" (tfms),
- "=c" (cpu.flags[4]),
- "=d" (cpu.flags[0])
-@@ -136,7 +136,7 @@ static void get_flags(void)
- cpu.model += ((tfms >> 16) & 0xf) << 4;
- }
-
-- asm("cpuid"
-+ asm volatile("cpuid"
- : "=a" (max_amd_level)
- : "a" (0x80000000)
- : "ebx", "ecx", "edx");
-@@ -144,7 +144,7 @@ static void get_flags(void)
- if (max_amd_level >= 0x80000001 &&
- max_amd_level <= 0x8000ffff) {
- u32 eax = 0x80000001;
-- asm("cpuid"
-+ asm volatile("cpuid"
- : "+a" (eax),
- "=c" (cpu.flags[6]),
- "=d" (cpu.flags[1])
-@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 ecx = MSR_K7_HWCR;
- u32 eax, edx;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- eax &= ~(1 << 15);
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- get_flags(); /* Make sure it really did something */
- err = check_flags();
-@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 ecx = MSR_VIA_FCR;
- u32 eax, edx;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- eax |= (1<<1)|(1<<7);
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- set_bit(X86_FEATURE_CX8, cpu.flags);
- err = check_flags();
-@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 eax, edx;
- u32 level = 1;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-- asm("cpuid"
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-+ asm volatile("cpuid"
- : "+a" (level), "=d" (cpu.flags[0])
- : : "ecx", "ebx");
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- err = check_flags();
- }
-diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index bdb4d45..77703de 100644
---- a/arch/x86/boot/header.S
-+++ b/arch/x86/boot/header.S
-@@ -224,10 +224,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
- # single linked list of
- # struct setup_data
-
--pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
-+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
-
- #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
-+#else
- #define VO_INIT_SIZE (VO__end - VO__text)
-+#endif
- #if ZO_INIT_SIZE > VO_INIT_SIZE
- #define INIT_SIZE ZO_INIT_SIZE
- #else
-diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
-index db75d07..8e6d0af 100644
---- a/arch/x86/boot/memory.c
-+++ b/arch/x86/boot/memory.c
-@@ -19,7 +19,7 @@
-
- static int detect_memory_e820(void)
- {
-- int count = 0;
-+ unsigned int count = 0;
- struct biosregs ireg, oreg;
- struct e820entry *desc = boot_params.e820_map;
- static struct e820entry buf; /* static so it is zeroed */
-diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
-index 11e8c6e..fdbb1ed 100644
---- a/arch/x86/boot/video-vesa.c
-+++ b/arch/x86/boot/video-vesa.c
-@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
-
- boot_params.screen_info.vesapm_seg = oreg.es;
- boot_params.screen_info.vesapm_off = oreg.di;
-+ boot_params.screen_info.vesapm_size = oreg.cx;
- }
-
- /*
-diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
-index 43eda28..5ab5fdb 100644
---- a/arch/x86/boot/video.c
-+++ b/arch/x86/boot/video.c
-@@ -96,7 +96,7 @@ static void store_mode_params(void)
- static unsigned int get_entry(void)
- {
- char entry_buf[4];
-- int i, len = 0;
-+ unsigned int i, len = 0;
- int key;
- unsigned int v;
-
-diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 5b577d5..eb7f25e 100644
---- a/arch/x86/crypto/aes-x86_64-asm_64.S
-+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
-@@ -8,6 +8,8 @@
- * including this sentence is retained in full.
- */
-
-+#include <asm/alternative-asm.h>
-+
- .extern crypto_ft_tab
- .extern crypto_it_tab
- .extern crypto_fl_tab
-@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
- je B192; \
- leaq 32(r9),r9;
-
-+#define ret pax_force_retaddr; ret
-+
- #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
- movq r1,r2; \
- movq r3,r4; \
-diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 3470624..9b476a3 100644
---- a/arch/x86/crypto/aesni-intel_asm.S
-+++ b/arch/x86/crypto/aesni-intel_asm.S
-@@ -31,6 +31,7 @@
-
- #include <linux/linkage.h>
- #include <asm/inst.h>
-+#include <asm/alternative-asm.h>
-
- #ifdef __x86_64__
- .data
-@@ -199,7 +200,7 @@ enc: .octa 0x2
- * num_initial_blocks = b mod 4
- * encrypt the initial num_initial_blocks blocks and apply ghash on
- * the ciphertext
--* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
- * are clobbered
- * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
- */
-@@ -208,8 +209,8 @@ enc: .octa 0x2
- .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
- XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- mov arg7, %r10 # %r10 = AAD
-- mov arg8, %r12 # %r12 = aadLen
-- mov %r12, %r11
-+ mov arg8, %r15 # %r15 = aadLen
-+ mov %r15, %r11
- pxor %xmm\i, %xmm\i
- _get_AAD_loop\num_initial_blocks\operation:
- movd (%r10), \TMP1
-@@ -217,15 +218,15 @@ _get_AAD_loop\num_initial_blocks\operation:
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
- add $4, %r10
-- sub $4, %r12
-+ sub $4, %r15
- jne _get_AAD_loop\num_initial_blocks\operation
- cmp $16, %r11
- je _get_AAD_loop2_done\num_initial_blocks\operation
-- mov $16, %r12
-+ mov $16, %r15
- _get_AAD_loop2\num_initial_blocks\operation:
- psrldq $4, %xmm\i
-- sub $4, %r12
-- cmp %r11, %r12
-+ sub $4, %r15
-+ cmp %r11, %r15
- jne _get_AAD_loop2\num_initial_blocks\operation
- _get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
-@@ -437,7 +438,7 @@ _initial_blocks_done\num_initial_blocks\operation:
- * num_initial_blocks = b mod 4
- * encrypt the initial num_initial_blocks blocks and apply ghash on
- * the ciphertext
--* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
- * are clobbered
- * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
- */
-@@ -446,8 +447,8 @@ _initial_blocks_done\num_initial_blocks\operation:
- .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
- XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- mov arg7, %r10 # %r10 = AAD
-- mov arg8, %r12 # %r12 = aadLen
-- mov %r12, %r11
-+ mov arg8, %r15 # %r15 = aadLen
-+ mov %r15, %r11
- pxor %xmm\i, %xmm\i
- _get_AAD_loop\num_initial_blocks\operation:
- movd (%r10), \TMP1
-@@ -455,15 +456,15 @@ _get_AAD_loop\num_initial_blocks\operation:
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
- add $4, %r10
-- sub $4, %r12
-+ sub $4, %r15
- jne _get_AAD_loop\num_initial_blocks\operation
- cmp $16, %r11
- je _get_AAD_loop2_done\num_initial_blocks\operation
-- mov $16, %r12
-+ mov $16, %r15
- _get_AAD_loop2\num_initial_blocks\operation:
- psrldq $4, %xmm\i
-- sub $4, %r12
-- cmp %r11, %r12
-+ sub $4, %r15
-+ cmp %r11, %r15
- jne _get_AAD_loop2\num_initial_blocks\operation
- _get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
-@@ -1264,7 +1265,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
- *****************************************************************************/
-
- ENTRY(aesni_gcm_dec)
-- push %r12
-+ push %r15
- push %r13
- push %r14
- mov %rsp, %r14
-@@ -1274,8 +1275,8 @@ ENTRY(aesni_gcm_dec)
- */
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-- mov %arg6, %r12
-- movdqu (%r12), %xmm13 # %xmm13 = HashKey
-+ mov %arg6, %r15
-+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-@@ -1303,10 +1304,10 @@ ENTRY(aesni_gcm_dec)
- movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
- mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
-- mov %r13, %r12
-- and $(3<<4), %r12
-+ mov %r13, %r15
-+ and $(3<<4), %r15
- jz _initial_num_blocks_is_0_decrypt
-- cmp $(2<<4), %r12
-+ cmp $(2<<4), %r15
- jb _initial_num_blocks_is_1_decrypt
- je _initial_num_blocks_is_2_decrypt
- _initial_num_blocks_is_3_decrypt:
-@@ -1356,16 +1357,16 @@ _zero_cipher_left_decrypt:
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
-- lea SHIFT_MASK+16(%rip), %r12
-- sub %r13, %r12
-+ lea SHIFT_MASK+16(%rip), %r15
-+ sub %r13, %r15
- # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
-- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
-+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
-
- movdqa %xmm1, %xmm2
- pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
-- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
-+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
- # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm2
-@@ -1394,9 +1395,9 @@ _less_than_8_bytes_left_decrypt:
- sub $1, %r13
- jne _less_than_8_bytes_left_decrypt
- _multiple_of_16_bytes_decrypt:
-- mov arg8, %r12 # %r13 = aadLen (number of bytes)
-- shl $3, %r12 # convert into number of bits
-- movd %r12d, %xmm15 # len(A) in %xmm15
-+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
-+ shl $3, %r15 # convert into number of bits
-+ movd %r15d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1435,8 +1436,10 @@ _return_T_done_decrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
-- pop %r12
-+ pop %r15
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_gcm_dec)
-
-
- /*****************************************************************************
-@@ -1523,7 +1526,7 @@ _return_T_done_decrypt:
- * poly = x^128 + x^127 + x^126 + x^121 + 1
- ***************************************************************************/
- ENTRY(aesni_gcm_enc)
-- push %r12
-+ push %r15
- push %r13
- push %r14
- mov %rsp, %r14
-@@ -1533,8 +1536,8 @@ ENTRY(aesni_gcm_enc)
- #
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp
-- mov %arg6, %r12
-- movdqu (%r12), %xmm13
-+ mov %arg6, %r15
-+ movdqu (%r15), %xmm13
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-@@ -1558,13 +1561,13 @@ ENTRY(aesni_gcm_enc)
- movdqa %xmm13, HashKey(%rsp)
- mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
- and $-16, %r13
-- mov %r13, %r12
-+ mov %r13, %r15
-
- # Encrypt first few blocks
-
-- and $(3<<4), %r12
-+ and $(3<<4), %r15
- jz _initial_num_blocks_is_0_encrypt
-- cmp $(2<<4), %r12
-+ cmp $(2<<4), %r15
- jb _initial_num_blocks_is_1_encrypt
- je _initial_num_blocks_is_2_encrypt
- _initial_num_blocks_is_3_encrypt:
-@@ -1617,14 +1620,14 @@ _zero_cipher_left_encrypt:
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
-- lea SHIFT_MASK+16(%rip), %r12
-- sub %r13, %r12
-+ lea SHIFT_MASK+16(%rip), %r15
-+ sub %r13, %r15
- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
-- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
-+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
- pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
-- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
-+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
- # get the appropriate mask to mask out top 16-r13 bytes of xmm0
- pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
- movdqa SHUF_MASK(%rip), %xmm10
-@@ -1657,9 +1660,9 @@ _less_than_8_bytes_left_encrypt:
- sub $1, %r13
- jne _less_than_8_bytes_left_encrypt
- _multiple_of_16_bytes_encrypt:
-- mov arg8, %r12 # %r12 = addLen (number of bytes)
-- shl $3, %r12
-- movd %r12d, %xmm15 # len(A) in %xmm15
-+ mov arg8, %r15 # %r15 = addLen (number of bytes)
-+ shl $3, %r15
-+ movd %r15d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1698,8 +1701,10 @@ _return_T_done_encrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
-- pop %r12
-+ pop %r15
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_gcm_enc)
-
- #endif
-
-@@ -1714,6 +1719,7 @@ _key_expansion_256a:
- pxor %xmm1, %xmm0
- movaps %xmm0, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
-
- .align 4
-@@ -1738,6 +1744,7 @@ _key_expansion_192a:
- shufps $0b01001110, %xmm2, %xmm1
- movaps %xmm1, 0x10(TKEYP)
- add $0x20, TKEYP
-+ pax_force_retaddr
- ret
-
- .align 4
-@@ -1757,6 +1764,7 @@ _key_expansion_192b:
-
- movaps %xmm0, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
-
- .align 4
-@@ -1769,6 +1777,7 @@ _key_expansion_256b:
- pxor %xmm1, %xmm2
- movaps %xmm2, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
-
- /*
-@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
- #ifndef __x86_64__
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_set_key)
-
- /*
- * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
-@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
- popl KLEN
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_enc)
-
- /*
- * _aesni_enc1: internal ABI
-@@ -1959,6 +1972,7 @@ _aesni_enc1:
- AESENC KEY STATE
- movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2067,6 +2081,7 @@ _aesni_enc4:
- AESENCLAST KEY STATE2
- AESENCLAST KEY STATE3
- AESENCLAST KEY STATE4
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
- popl KLEN
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_dec)
-
- /*
- * _aesni_dec1: internal ABI
-@@ -2146,6 +2163,7 @@ _aesni_dec1:
- AESDEC KEY STATE
- movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2254,6 +2272,7 @@ _aesni_dec4:
- AESDECLAST KEY STATE2
- AESDECLAST KEY STATE3
- AESDECLAST KEY STATE4
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
- popl KEYP
- popl LEN
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_ecb_enc)
-
- /*
- * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
-@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
- popl KEYP
- popl LEN
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_ecb_dec)
-
- /*
- * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
-@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
- popl LEN
- popl IVP
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_cbc_enc)
-
- /*
- * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
-@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
- popl LEN
- popl IVP
- #endif
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_cbc_dec)
-
- #ifdef __x86_64__
- .align 16
-@@ -2526,6 +2553,7 @@ _aesni_inc_init:
- mov $1, TCTR_LOW
- MOVQ_R64_XMM TCTR_LOW INC
- MOVQ_R64_XMM CTR TCTR_LOW
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2554,6 +2582,7 @@ _aesni_inc:
- .Linc_low:
- movaps CTR, IV
- PSHUFB_XMM BSWAP_MASK IV
-+ pax_force_retaddr
- ret
-
- /*
-@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
- .Lctr_enc_ret:
- movups IV, (IVP)
- .Lctr_enc_just_ret:
-+ pax_force_retaddr
- ret
-+ENDPROC(aesni_ctr_enc)
- #endif
-diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-index 391d245..c73d634 100644
---- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
-+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-@@ -20,6 +20,8 @@
- *
- */
-
-+#include <asm/alternative-asm.h>
-+
- .file "blowfish-x86_64-asm.S"
- .text
-
-@@ -151,9 +153,11 @@ __blowfish_enc_blk:
- jnz __enc_xor;
-
- write_block();
-+ pax_force_retaddr
- ret;
- __enc_xor:
- xor_block();
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -188,6 +192,7 @@ blowfish_dec_blk:
-
- movq %r11, %rbp;
-
-+ pax_force_retaddr
- ret;
-
- /**********************************************************************
-@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
-
- popq %rbx;
- popq %rbp;
-+ pax_force_retaddr
- ret;
-
- __enc_xor4:
-@@ -349,6 +355,7 @@ __enc_xor4:
-
- popq %rbx;
- popq %rbp;
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
- popq %rbx;
- popq %rbp;
-
-+ pax_force_retaddr
- ret;
-
-diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-index 6214a9b..5c0f959 100644
---- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
-+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-@@ -1,3 +1,5 @@
-+#include <asm/alternative-asm.h>
-+
- # enter ECRYPT_encrypt_bytes
- .text
- .p2align 5
-@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
- # bytesatleast65:
- ._bytesatleast65:
-@@ -891,6 +894,7 @@ ECRYPT_keysetup:
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
- # enter ECRYPT_ivsetup
- .text
-@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
-diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
-index b2c2f57..f30325b 100644
---- a/arch/x86/crypto/sha1_ssse3_asm.S
-+++ b/arch/x86/crypto/sha1_ssse3_asm.S
-@@ -28,6 +28,8 @@
- * (at your option) any later version.
- */
-
-+#include <asm/alternative-asm.h>
-+
- #define CTX %rdi // arg1
- #define BUF %rsi // arg2
- #define CNT %rdx // arg3
-@@ -75,9 +77,9 @@
- \name:
- push %rbx
- push %rbp
-- push %r12
-+ push %r14
-
-- mov %rsp, %r12
-+ mov %rsp, %r14
- sub $64, %rsp # allocate workspace
- and $~15, %rsp # align stack
-
-@@ -99,11 +101,12 @@
- xor %rax, %rax
- rep stosq
-
-- mov %r12, %rsp # deallocate workspace
-+ mov %r14, %rsp # deallocate workspace
-
-- pop %r12
-+ pop %r14
- pop %rbp
- pop %rbx
-+ pax_force_retaddr
- ret
-
- .size \name, .-\name
-diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-index 5b012a2..9712c31 100644
---- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-@@ -20,6 +20,8 @@
- *
- */
-
-+#include <asm/alternative-asm.h>
-+
- .file "twofish-x86_64-asm-3way.S"
- .text
-
-@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
-
- __enc_xor3:
-@@ -271,6 +274,7 @@ __enc_xor3:
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
-
- .global twofish_dec_blk_3way
-@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
-
-diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index 7bcf3fc..560ff4c 100644
---- a/arch/x86/crypto/twofish-x86_64-asm_64.S
-+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
-@@ -21,6 +21,7 @@
- .text
-
- #include <asm/asm-offsets.h>
-+#include <asm/alternative-asm.h>
-
- #define a_offset 0
- #define b_offset 4
-@@ -268,6 +269,7 @@ twofish_enc_blk:
-
- popq R1
- movq $1,%rax
-+ pax_force_retaddr
- ret
-
- twofish_dec_blk:
-@@ -319,4 +321,5 @@ twofish_dec_blk:
-
- popq R1
- movq $1,%rax
-+ pax_force_retaddr
- ret
-diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index fd84387..887aa7ef 100644
---- a/arch/x86/ia32/ia32_aout.c
-+++ b/arch/x86/ia32/ia32_aout.c
-@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
- unsigned long dump_start, dump_size;
- struct user32 dump;
-
-+ memset(&dump, 0, sizeof(dump));
-+
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
-@@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- current->mm->free_area_cache = TASK_UNMAPPED_BASE;
- current->mm->cached_hole_size = 0;
-
-+ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
-+ if (retval < 0) {
-+ /* Someone check-me: is this error path enough? */
-+ send_sig(SIGKILL, current, 0);
-+ return retval;
-+ }
-+
- install_exec_creds(bprm);
- current->flags &= ~PF_FORKNOEXEC;
-
-@@ -410,13 +419,6 @@ beyond_if:
-
- set_brk(current->mm->start_brk, current->mm->brk);
-
-- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
-- if (retval < 0) {
-- /* Someone check-me: is this error path enough? */
-- send_sig(SIGKILL, current, 0);
-- return retval;
-- }
--
- current->mm->start_stack =
- (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
- /* start thread */
-diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index 6557769..c135ca5 100644
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -73,6 +73,10 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
- switch (from->si_code >> 16) {
- case __SI_FAULT >> 16:
- break;
-+ case __SI_SYS >> 16:
-+ put_user_ex(from->si_syscall, &to->si_syscall);
-+ put_user_ex(from->si_arch, &to->si_arch);
-+ break;
- case __SI_CHLD >> 16:
- put_user_ex(from->si_utime, &to->si_utime);
- put_user_ex(from->si_stime, &to->si_stime);
-@@ -169,7 +173,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
- }
- seg = get_fs();
- set_fs(KERNEL_DS);
-- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
-+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
- set_fs(seg);
- if (ret >= 0 && uoss_ptr) {
- if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
-@@ -276,7 +280,7 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_COMPAT_NSIG_WORDS > 1
- && __copy_from_user((((char *) &set.sig) + 4),
-- &frame->extramask,
-+ frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
-@@ -370,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
- */
- static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
- size_t frame_size,
-- void **fpstate)
-+ void __user **fpstate)
- {
- unsigned long sp;
-
-@@ -391,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
-
- if (used_math()) {
- sp = sp - sig_xstate_ia32_size;
-- *fpstate = (struct _fpstate_ia32 *) sp;
-+ *fpstate = (struct _fpstate_ia32 __user *) sp;
- if (save_i387_xstate_ia32(*fpstate) < 0)
- return (void __user *) -1L;
- }
-@@ -399,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
- sp -= frame_size;
- /* Align the stack pointer according to the i386 ABI,
- * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-- sp = ((sp + 4) & -16ul) - 4;
-+ sp = ((sp - 12) & -16ul) - 4;
- return (void __user *) sp;
- }
-
-@@ -447,7 +451,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
- sigreturn);
- else
-- restorer = &frame->retcode;
-+ restorer = frame->retcode;
- }
-
- put_user_try {
-@@ -457,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
- * These are actually not used anymore, but left because some
- * gdb versions depend on them as a marker.
- */
-- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
-
- if (err)
-@@ -499,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- 0xb8,
- __NR_ia32_rt_sigreturn,
- 0x80cd,
-- 0,
-+ 0
- };
-
- frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
-@@ -529,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
-+ else if (current->mm->context.vdso)
-+ /* Return stub is in 32bit vsyscall page */
-+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
- else
-- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-- rt_sigreturn);
-+ restorer = frame->retcode;
- put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
-
- /*
- * Not actually used anymore, but left because some gdb
- * versions need it.
- */
-- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
-
- if (err)
-diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 95b4eb3..87e6dc1 100644
---- a/arch/x86/ia32/ia32entry.S
-+++ b/arch/x86/ia32/ia32entry.S
-@@ -13,7 +13,9 @@
- #include <asm/thread_info.h>
- #include <asm/segment.h>
- #include <asm/irqflags.h>
-+#include <asm/pgtable.h>
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
- #include <linux/elf-em.h>
-@@ -61,12 +63,12 @@
- */
- .macro LOAD_ARGS32 offset, _r9=0
- .if \_r9
-- movl \offset+16(%rsp),%r9d
-+ movl \offset+R9(%rsp),%r9d
- .endif
-- movl \offset+40(%rsp),%ecx
-- movl \offset+48(%rsp),%edx
-- movl \offset+56(%rsp),%esi
-- movl \offset+64(%rsp),%edi
-+ movl \offset+RCX(%rsp),%ecx
-+ movl \offset+RDX(%rsp),%edx
-+ movl \offset+RSI(%rsp),%esi
-+ movl \offset+RDI(%rsp),%edi
- movl %eax,%eax /* zero extension */
- .endm
-
-@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
- ENDPROC(native_irq_enable_sysexit)
- #endif
-
-+ .macro pax_enter_kernel_user
-+ pax_set_fptr_mask
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel_user
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
-+#endif
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushq %rax
-+ pushq %r11
-+ call pax_randomize_kstack
-+ popq %r11
-+ popq %rax
-+#endif
-+ .endm
-+
-+.macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+.endm
-+
- /*
- * 32bit SYSENTER instruction entry.
- *
-@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
- CFI_REGISTER rsp,rbp
- SWAPGS_UNSAFE_STACK
- movq PER_CPU_VAR(kernel_stack), %rsp
-- addq $(KERNEL_STACK_OFFSET),%rsp
-- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs, here we enable it straight after entry:
-- */
-- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %ebp,%ebp /* zero extension */
- pushq_cfi $__USER32_DS
- /*CFI_REL_OFFSET ss,0*/
-@@ -134,25 +156,44 @@ ENTRY(ia32_sysenter_target)
- CFI_REL_OFFSET rsp,0
- pushfq_cfi
- /*CFI_REL_OFFSET rflags,0*/
-- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
-- CFI_REGISTER rip,r10
-+ orl $X86_EFLAGS_IF,(%rsp)
-+ GET_THREAD_INFO(%r11)
-+ movl TI_sysenter_return(%r11), %r11d
-+ CFI_REGISTER rip,r11
- pushq_cfi $__USER32_CS
- /*CFI_REL_OFFSET cs,0*/
- movl %eax, %eax
-- pushq_cfi %r10
-+ pushq_cfi %r11
- CFI_REL_OFFSET rip,0
- pushq_cfi %rax
- cld
- SAVE_ARGS 0,1,0
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs, here we enable it straight after entry:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
- /* no need to do an access_ok check here because rbp has been
- 32bit zero extended */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov pax_user_shadow_base,%r11
-+ add %r11,%rbp
-+#endif
-+
- 1: movl (%rbp),%ebp
- .section __ex_table,"a"
- .quad 1b,ia32_badarg
- .previous
-- GET_THREAD_INFO(%r10)
-- orl $TS_COMPAT,TI_status(%r10)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz sysenter_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,16 +203,18 @@ sysenter_do_call:
- sysenter_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-- GET_THREAD_INFO(%r10)
-+ GET_THREAD_INFO(%r11)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
- jnz sysexit_audit
- sysexit_from_sys_call:
-- andl $~TS_COMPAT,TI_status(%r10)
-+ pax_exit_kernel_user
-+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
- /* clear IF, that popfq doesn't enable interrupts early */
-- andl $~0x200,EFLAGS-R11(%rsp)
-- movl RIP-R11(%rsp),%edx /* User %eip */
-+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
-+ movl RIP(%rsp),%edx /* User %eip */
- CFI_REGISTER rip,rdx
- RESTORE_ARGS 0,24,0,0,0,0
- xorq %r8,%r8
-@@ -194,6 +237,9 @@ sysexit_from_sys_call:
- movl %eax,%esi /* 2nd arg: syscall number */
- movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
- call audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -205,7 +251,7 @@ sysexit_from_sys_call:
- .endm
-
- .macro auditsys_exit exit
-- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
-+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jnz ia32_ret_from_sys_call
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -215,12 +261,12 @@ sysexit_from_sys_call:
- movzbl %al,%edi /* zero-extend that into %edi */
- inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
- call audit_syscall_exit
-- GET_THREAD_INFO(%r10)
-+ GET_THREAD_INFO(%r11)
- movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
- movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl %edi,TI_flags(%r10)
-+ testl %edi,TI_flags(%r11)
- jz \exit
- CLEAR_RREGS -ARGOFFSET
- jmp int_with_check
-@@ -238,7 +284,7 @@ sysexit_audit:
-
- sysenter_tracesys:
- #ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jz sysenter_auditsys
- #endif
- SAVE_REST
-@@ -250,6 +296,9 @@ sysenter_tracesys:
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp sysenter_do_call
- CFI_ENDPROC
- ENDPROC(ia32_sysenter_target)
-@@ -277,19 +326,25 @@ ENDPROC(ia32_sysenter_target)
- ENTRY(ia32_cstar_target)
- CFI_STARTPROC32 simple
- CFI_SIGNAL_FRAME
-- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
-+ CFI_DEF_CFA rsp,0
- CFI_REGISTER rip,rcx
- /*CFI_REGISTER rflags,r11*/
- SWAPGS_UNSAFE_STACK
- movl %esp,%r8d
- CFI_REGISTER rsp,r8
- movq PER_CPU_VAR(kernel_stack),%rsp
-+ SAVE_ARGS 8*6,0,0
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
- */
- ENABLE_INTERRUPTS(CLBR_NONE)
-- SAVE_ARGS 8,0,0
- movl %eax,%eax /* zero extension */
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -305,13 +360,19 @@ ENTRY(ia32_cstar_target)
- /* no need to do an access_ok check here because r8 has been
- 32bit zero extended */
- /* hardware stack frame is complete now */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov pax_user_shadow_base,%r11
-+ add %r11,%r8
-+#endif
-+
- 1: movl (%r8),%r9d
- .section __ex_table,"a"
- .quad 1b,ia32_badarg
- .previous
-- GET_THREAD_INFO(%r10)
-- orl $TS_COMPAT,TI_status(%r10)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz cstar_tracesys
- cmpq $IA32_NR_syscalls-1,%rax
-@@ -321,14 +382,16 @@ cstar_do_call:
- cstar_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-- GET_THREAD_INFO(%r10)
-+ GET_THREAD_INFO(%r11)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
- jnz sysretl_audit
- sysretl_from_sys_call:
-- andl $~TS_COMPAT,TI_status(%r10)
-- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
-+ pax_exit_kernel_user
-+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
-+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
- movl RIP-ARGOFFSET(%rsp),%ecx
- CFI_REGISTER rip,rcx
- movl EFLAGS-ARGOFFSET(%rsp),%r11d
-@@ -355,7 +418,7 @@ sysretl_audit:
-
- cstar_tracesys:
- #ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jz cstar_auditsys
- #endif
- xchgl %r9d,%ebp
-@@ -369,6 +432,9 @@ cstar_tracesys:
- xchgl %ebp,%r9d
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp cstar_do_call
- END(ia32_cstar_target)
-
-@@ -409,20 +475,26 @@ ENTRY(ia32_syscall)
- CFI_REL_OFFSET rip,RIP-RIP
- PARAVIRT_ADJUST_EXCEPTION_FRAME
- SWAPGS
-- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs and here we enable it straight after entry:
-- */
-- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %eax,%eax
- pushq_cfi %rax
- cld
- /* note the registers are not zero extended to the sf.
- this could be a problem. */
- SAVE_ARGS 0,1,0
-- GET_THREAD_INFO(%r10)
-- orl $TS_COMPAT,TI_status(%r10)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- jnz ia32_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -445,6 +517,9 @@ ia32_tracesys:
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp ia32_do_call
- END(ia32_syscall)
-
-@@ -455,6 +530,7 @@ ia32_badsys:
-
- quiet_ni_syscall:
- movq $-ENOSYS,%rax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
-
-diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index f6f5c53..8e51d70 100644
---- a/arch/x86/ia32/sys_ia32.c
-+++ b/arch/x86/ia32/sys_ia32.c
-@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
- */
- static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
- {
-- typeof(ubuf->st_uid) uid = 0;
-- typeof(ubuf->st_gid) gid = 0;
-+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
-+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
- SET_UID(uid, stat->uid);
- SET_GID(gid, stat->gid);
- if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
-@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- }
- set_fs(KERNEL_DS);
- ret = sys_rt_sigprocmask(how,
-- set ? (sigset_t __user *)&s : NULL,
-- oset ? (sigset_t __user *)&s : NULL,
-+ set ? (sigset_t __force_user *)&s : NULL,
-+ oset ? (sigset_t __force_user *)&s : NULL,
- sigsetsize);
- set_fs(old_fs);
- if (ret)
-@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
- return alarm_setitimer(seconds);
- }
-
--asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
-+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
- int options)
- {
- return compat_sys_wait4(pid, stat_addr, options, NULL);
-@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
-- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
-+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
- set_fs(old_fs);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
-@@ -363,13 +363,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
- asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
- compat_size_t sigsetsize)
- {
-- sigset_t s;
-+ sigset_t s = { };
- compat_sigset_t s32;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
-- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
-+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
- set_fs(old_fs);
- if (!ret) {
- switch (_NSIG_WORDS) {
-@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
- if (copy_siginfo_from_user32(&info, uinfo))
- return -EFAULT;
- set_fs(KERNEL_DS);
-- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
-+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
- set_fs(old_fs);
- return ret;
- }
-@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
- return -EFAULT;
-
- set_fs(KERNEL_DS);
-- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
-+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
- count);
- set_fs(old_fs);
-
-diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 091508b..2cc2c2d 100644
---- a/arch/x86/include/asm/alternative-asm.h
-+++ b/arch/x86/include/asm/alternative-asm.h
-@@ -4,10 +4,10 @@
-
- #ifdef CONFIG_SMP
- .macro LOCK_PREFIX
--1: lock
-+672: lock
- .section .smp_locks,"a"
- .balign 4
-- .long 1b - .
-+ .long 672b - .
- .previous
- .endm
- #else
-@@ -15,6 +15,45 @@
- .endm
- #endif
-
-+#ifdef KERNEXEC_PLUGIN
-+ .macro pax_force_retaddr_bts rip=0
-+ btsq $63,\rip(%rsp)
-+ .endm
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ .macro pax_force_retaddr rip=0, reload=0
-+ btsq $63,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
-+ btsq $63,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
-+#endif
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .if \reload
-+ pax_set_fptr_mask
-+ .endif
-+ orq %r12,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
-+ orq %r12,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
-+ movabs $0x8000000000000000,%r12
-+ .endm
-+#endif
-+#else
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .endm
-+ .macro pax_force_fptr ptr
-+ .endm
-+ .macro pax_force_retaddr_bts rip=0
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
-+#endif
-+
- .macro altinstruction_entry orig alt feature orig_len alt_len
- .long \orig - .
- .long \alt - .
-diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index 37ad100..7d47faa 100644
---- a/arch/x86/include/asm/alternative.h
-+++ b/arch/x86/include/asm/alternative.h
-@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- ".section .discard,\"aw\",@progbits\n" \
- " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
- ".previous\n" \
-- ".section .altinstr_replacement, \"ax\"\n" \
-+ ".section .altinstr_replacement, \"a\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous"
-
-diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index 1a6c09a..fec2432 100644
---- a/arch/x86/include/asm/apic.h
-+++ b/arch/x86/include/asm/apic.h
-@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
-
- #ifdef CONFIG_X86_LOCAL_APIC
-
--extern unsigned int apic_verbosity;
-+extern int apic_verbosity;
- extern int local_apic_timer_c2_ok;
-
- extern int disable_apic;
-diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
-index 20370c6..a2eb9b0 100644
---- a/arch/x86/include/asm/apm.h
-+++ b/arch/x86/include/asm/apm.h
-@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
- __asm__ __volatile__(APM_DO_ZERO_SEGS
- "pushl %%edi\n\t"
- "pushl %%ebp\n\t"
-- "lcall *%%cs:apm_bios_entry\n\t"
-+ "lcall *%%ss:apm_bios_entry\n\t"
- "setc %%al\n\t"
- "popl %%ebp\n\t"
- "popl %%edi\n\t"
-@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
- __asm__ __volatile__(APM_DO_ZERO_SEGS
- "pushl %%edi\n\t"
- "pushl %%ebp\n\t"
-- "lcall *%%cs:apm_bios_entry\n\t"
-+ "lcall *%%ss:apm_bios_entry\n\t"
- "setc %%bl\n\t"
- "popl %%ebp\n\t"
- "popl %%edi\n\t"
-diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 58cb6d4..a8df22ae 100644
---- a/arch/x86/include/asm/atomic.h
-+++ b/arch/x86/include/asm/atomic.h
-@@ -22,7 +22,18 @@
- */
- static inline int atomic_read(const atomic_t *v)
- {
-- return (*(volatile int *)&(v)->counter);
-+ return (*(volatile const int *)&(v)->counter);
-+}
-+
-+/**
-+ * atomic_read_unchecked - read atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically reads the value of @v.
-+ */
-+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return (*(volatile const int *)&(v)->counter);
- }
-
- /**
-@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
- }
-
- /**
-+ * atomic_set_unchecked - set atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ * @i: required value
-+ *
-+ * Atomically sets the value of @v to @i.
-+ */
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
-+
-+/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
-@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
- */
- static inline void atomic_add(int i, atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "addl %1,%0"
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subl %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter)
-+ : "ir" (i));
-+}
-+
-+/**
-+ * atomic_add_unchecked - add integer to atomic variable
-+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
- }
-@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
- */
- static inline void atomic_sub(int i, atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "subl %1,%0"
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addl %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter)
-+ : "ir" (i));
-+}
-+
-+/**
-+ * atomic_sub_unchecked - subtract integer from atomic variable
-+ * @i: integer value to subtract
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically subtracts @i from @v.
-+ */
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
- }
-@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
-+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addl %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
-@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
- */
- static inline void atomic_inc(atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "incl %0"
-+ asm volatile(LOCK_PREFIX "incl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter));
-+}
-+
-+/**
-+ * atomic_inc_unchecked - increment atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically increments @v by 1.
-+ */
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "incl %0\n"
- : "+m" (v->counter));
- }
-
-@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
- */
- static inline void atomic_dec(atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "decl %0"
-+ asm volatile(LOCK_PREFIX "decl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter));
-+}
-+
-+/**
-+ * atomic_dec_unchecked - decrement atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically decrements @v by 1.
-+ */
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "decl %0\n"
- : "+m" (v->counter));
- }
-
-@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "decl %0; sete %1"
-+ asm volatile(LOCK_PREFIX "decl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
-@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "incl %0; sete %1"
-+ asm volatile(LOCK_PREFIX "incl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
-+ : "+m" (v->counter), "=qm" (c)
-+ : : "memory");
-+ return c != 0;
-+}
-+
-+/**
-+ * atomic_inc_and_test_unchecked - increment and test
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically increments @v by 1
-+ * and returns true if the result is zero, or false for all
-+ * other cases.
-+ */
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ unsigned char c;
-+
-+ asm volatile(LOCK_PREFIX "incl %0\n"
-+ "sete %1\n"
- : "+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
-@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
-+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subl %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sets %1\n"
- : "+m" (v->counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
-@@ -170,7 +332,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
- *
- * Atomically adds @i to @v and returns @i + @v
- */
--static inline int atomic_add_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
- {
- #ifdef CONFIG_M386
- int __i;
-@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
- goto no_xadd;
- #endif
- /* Modern 486+ processor */
-- return i + xadd(&v->counter, i);
-+ return i + xadd_check_overflow(&v->counter, i);
-
- #ifdef CONFIG_M386
- no_xadd: /* Legacy 386 processor */
-@@ -192,21 +354,58 @@ no_xadd: /* Legacy 386 processor */
- }
-
- /**
-+ * atomic_add_return_unchecked - add integer and return
-+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + @v
-+ */
-+static inline int __intentional_overflow(-1) atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
-+{
-+#ifdef CONFIG_M386
-+ int __i;
-+ unsigned long flags;
-+ if (unlikely(boot_cpu_data.x86 <= 3))
-+ goto no_xadd;
-+#endif
-+ /* Modern 486+ processor */
-+ return i + xadd(&v->counter, i);
-+
-+#ifdef CONFIG_M386
-+no_xadd: /* Legacy 386 processor */
-+ raw_local_irq_save(flags);
-+ __i = atomic_read_unchecked(v);
-+ atomic_set_unchecked(v, i + __i);
-+ raw_local_irq_restore(flags);
-+ return i + __i;
-+#endif
-+}
-+
-+/**
- * atomic_sub_return - subtract integer and return
- * @v: pointer of type atomic_t
- * @i: integer value to subtract
- *
- * Atomically subtracts @i from @v and returns @v - @i
- */
--static inline int atomic_sub_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
- {
- return atomic_add_return(-i, v);
- }
-
- #define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int __intentional_overflow(-1) atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
- #define atomic_dec_return(v) (atomic_sub_return(1, v))
-
--static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
- {
- return cmpxchg(&v->counter, old, new);
- }
-@@ -216,6 +415,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
- return xchg(&v->counter, new);
- }
-
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-+
- /**
- * __atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
-@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
- */
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
-- int c, old;
-+ int c, old, new;
- c = atomic_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addl %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "subl %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a));
-+
-+ old = atomic_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
-@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- return c;
- }
-
-+/**
-+ * atomic_inc_not_zero_hint - increment if not null
-+ * @v: pointer of type atomic_t
-+ * @hint: probable value of the atomic before the increment
-+ *
-+ * This version of atomic_inc_not_zero() gives a hint of probable
-+ * value of the atomic. This helps processor to not read the memory
-+ * before doing the atomic read/modify/write cycle, lowering
-+ * number of bus transactions on some arches.
-+ *
-+ * Returns: 0 if increment was not done, 1 otherwise.
-+ */
-+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
-+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
-+{
-+ int val, c = hint, new;
-+
-+ /* sanity test, should be removed by compiler if hint is a constant */
-+ if (!hint)
-+ return __atomic_add_unless(v, 1, 0);
-+
-+ do {
-+ asm volatile("incl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "decl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c));
-+
-+ val = atomic_cmpxchg(v, c, new);
-+ if (val == c)
-+ return 1;
-+ c = val;
-+ } while (c);
-+
-+ return 0;
-+}
-
- /*
- * atomic_dec_if_positive - decrement by 1 if old value positive
-@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
- #endif
-
- /* These are x86-specific, used by some header files */
--#define atomic_clear_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "andl %0,%1" \
-- : : "r" (~(mask)), "m" (*(addr)) : "memory")
-+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-
--#define atomic_set_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "orl %0,%1" \
-- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
-- : "memory")
-+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-
- /* Atomic operations are already serializing on x86 */
- #define smp_mb__before_atomic_dec() barrier()
-diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index 24098aa..aabcac7 100644
---- a/arch/x86/include/asm/atomic64_32.h
-+++ b/arch/x86/include/asm/atomic64_32.h
-@@ -12,6 +12,14 @@ typedef struct {
- u64 __aligned(8) counter;
- } atomic64_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ u64 __aligned(8) counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
-+
- #define ATOMIC64_INIT(val) { (val) }
-
- #ifdef CONFIG_X86_CMPXCHG64
-@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
- }
-
- /**
-+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
-+ * @p: pointer to type atomic64_unchecked_t
-+ * @o: expected value
-+ * @n: new value
-+ *
-+ * Atomically sets @v to @n if it was equal to @o and returns
-+ * the old value.
-+ */
-+
-+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
-+{
-+ return cmpxchg64(&v->counter, o, n);
-+}
-+
-+/**
- * atomic64_xchg - xchg atomic64 variable
- * @v: pointer to type atomic64_t
- * @n: value to assign
-@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
- }
-
- /**
-+ * atomic64_set_unchecked - set atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ * @n: value to assign
-+ *
-+ * Atomically sets the value of @v to @n.
-+ */
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
-+{
-+ unsigned high = (unsigned)(i >> 32);
-+ unsigned low = (unsigned)i;
-+ asm volatile(ATOMIC64_ALTERNATIVE(set)
-+ : "+b" (low), "+c" (high)
-+ : "S" (v)
-+ : "eax", "edx", "memory"
-+ );
-+}
-+
-+/**
- * atomic64_read - read atomic64 variable
- * @v: pointer to type atomic64_t
- *
-@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
- }
-
- /**
-+ * atomic64_read_unchecked - read atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically reads the value of @v and returns it.
-+ */
-+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
-+{
-+ long long r;
-+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
-+ : "=A" (r), "+c" (v)
-+ : : "memory"
-+ );
-+ return r;
-+ }
-+
-+/**
- * atomic64_add_return - add and return
- * @i: integer value to add
- * @v: pointer to type atomic64_t
-@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
- return i;
- }
-
-+/**
-+ * atomic64_add_return_unchecked - add and return
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + *@v
-+ */
-+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
-+{
-+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
-+ : "+A" (i), "+c" (v)
-+ : : "memory"
-+ );
-+ return i;
-+}
-+
- /*
- * Other variants with different arithmetic operators:
- */
-@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
- return a;
- }
-
-+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ long long a;
-+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
-+ : "=A" (a)
-+ : "S" (v)
-+ : "memory", "ecx"
-+ );
-+ return a;
-+}
-+
- static inline long long atomic64_dec_return(atomic64_t *v)
- {
- long long a;
-@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
- }
-
- /**
-+ * atomic64_add_unchecked - add integer to atomic64 variable
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
-+{
-+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
-+ : "+A" (i), "+c" (v)
-+ : : "memory"
-+ );
-+ return i;
-+}
-+
-+/**
- * atomic64_sub - subtract the atomic64 variable
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
-diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index 0e1cbfc..a891fc7 100644
---- a/arch/x86/include/asm/atomic64_64.h
-+++ b/arch/x86/include/asm/atomic64_64.h
-@@ -18,7 +18,19 @@
- */
- static inline long atomic64_read(const atomic64_t *v)
- {
-- return (*(volatile long *)&(v)->counter);
-+ return (*(volatile const long *)&(v)->counter);
-+}
-+
-+/**
-+ * atomic64_read_unchecked - read atomic64 variable
-+ * @v: pointer of type atomic64_unchecked_t
-+ *
-+ * Atomically reads the value of @v.
-+ * Doesn't imply a read memory barrier.
-+ */
-+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ return (*(volatile const long *)&(v)->counter);
- }
-
- /**
-@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
- }
-
- /**
-+ * atomic64_set_unchecked - set atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ * @i: required value
-+ *
-+ * Atomically sets the value of @v to @i.
-+ */
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ v->counter = i;
-+}
-+
-+/**
- * atomic64_add - add integer to atomic64 variable
- * @i: integer value to add
- * @v: pointer to type atomic64_t
-@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
- */
- static inline void atomic64_add(long i, atomic64_t *v)
- {
-+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subq %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "er" (i), "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_add_unchecked - add integer to atomic64 variable
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
-+{
- asm volatile(LOCK_PREFIX "addq %1,%0"
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
-@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
- */
- static inline void atomic64_sub(long i, atomic64_t *v)
- {
-- asm volatile(LOCK_PREFIX "subq %1,%0"
-+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addq %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "er" (i), "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_sub_unchecked - subtract the atomic64 variable
-+ * @i: integer value to subtract
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically subtracts @i from @v.
-+ */
-+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
- }
-@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
-+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addq %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
-@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
- */
- static inline void atomic64_inc(atomic64_t *v)
- {
-+ asm volatile(LOCK_PREFIX "incq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_inc_unchecked - increment atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically increments @v by 1.
-+ */
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
-+{
- asm volatile(LOCK_PREFIX "incq %0"
- : "=m" (v->counter)
- : "m" (v->counter));
-@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
- */
- static inline void atomic64_dec(atomic64_t *v)
- {
-- asm volatile(LOCK_PREFIX "decq %0"
-+ asm volatile(LOCK_PREFIX "decq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_dec_unchecked - decrement atomic64 variable
-+ * @v: pointer to type atomic64_t
-+ *
-+ * Atomically decrements @v by 1.
-+ */
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "decq %0\n"
- : "=m" (v->counter)
- : "m" (v->counter));
- }
-@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "decq %0; sete %1"
-+ asm volatile(LOCK_PREFIX "decq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
-@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "incq %0; sete %1"
-+ asm volatile(LOCK_PREFIX "incq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "m" (v->counter) : "memory");
- return c != 0;
-@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
- {
- unsigned char c;
-
-- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
-+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subq %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sets %1\n"
- : "=m" (v->counter), "=qm" (c)
- : "er" (i), "m" (v->counter) : "memory");
- return c;
-@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
- */
- static inline long atomic64_add_return(long i, atomic64_t *v)
- {
-+ return i + xadd_check_overflow(&v->counter, i);
-+}
-+
-+/**
-+ * atomic64_add_return_unchecked - add and return
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + @v
-+ */
-+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
-+{
- return i + xadd(&v->counter, i);
- }
-
-@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
- }
-
- #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ return atomic64_add_return_unchecked(1, v);
-+}
- #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
-
- static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
-@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
- return cmpxchg(&v->counter, old, new);
- }
-
-+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
- static inline long atomic64_xchg(atomic64_t *v, long new)
- {
- return xchg(&v->counter, new);
-@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
- */
- static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
- {
-- long c, old;
-+ long c, old, new;
- c = atomic64_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic64_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("add %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "sub %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a));
-+
-+ old = atomic64_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
- }
-- return c != (u);
-+ return c != u;
- }
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
-index 1775d6e..f84af0c 100644
---- a/arch/x86/include/asm/bitops.h
-+++ b/arch/x86/include/asm/bitops.h
-@@ -38,7 +38,7 @@
- * a mask operation on a byte.
- */
- #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
--#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
-+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
- #define CONST_MASK(nr) (1 << ((nr) & 7))
-
- /**
-@@ -344,7 +344,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
--static inline unsigned long __ffs(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
- {
- asm("bsf %1,%0"
- : "=r" (word)
-@@ -358,7 +358,7 @@ static inline unsigned long __ffs(unsigned long word)
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
--static inline unsigned long ffz(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
- {
- asm("bsf %1,%0"
- : "=r" (word)
-@@ -372,7 +372,7 @@ static inline unsigned long ffz(unsigned long word)
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
--static inline unsigned long __fls(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
- {
- asm("bsr %1,%0"
- : "=r" (word)
-@@ -419,7 +419,7 @@ static inline int ffs(int x)
- * set bit if value is nonzero. The last (most significant) bit is
- * at position 32.
- */
--static inline int fls(int x)
-+static inline int __intentional_overflow(-1) fls(int x)
- {
- int r;
- #ifdef CONFIG_X86_CMOV
-diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
-index 5e1a2ee..c9f9533 100644
---- a/arch/x86/include/asm/boot.h
-+++ b/arch/x86/include/asm/boot.h
-@@ -11,10 +11,15 @@
- #include <asm/pgtable_types.h>
-
- /* Physical address where kernel should be loaded. */
--#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
-+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
- + (CONFIG_PHYSICAL_ALIGN - 1)) \
- & ~(CONFIG_PHYSICAL_ALIGN - 1))
-
-+#ifndef __ASSEMBLY__
-+extern unsigned char __LOAD_PHYSICAL_ADDR[];
-+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
-+#endif
-+
- /* Minimum kernel alignment, as a power of two */
- #ifdef CONFIG_X86_64
- #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
-diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
-index 48f99f1..d78ebf9 100644
---- a/arch/x86/include/asm/cache.h
-+++ b/arch/x86/include/asm/cache.h
-@@ -5,12 +5,13 @@
-
- /* L1 cache line size */
- #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-+#define __read_only __attribute__((__section__(".data..read_only")))
-
- #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
--#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
-+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
-
- #ifdef CONFIG_X86_VSMP
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
-index 4e12668..501d239 100644
---- a/arch/x86/include/asm/cacheflush.h
-+++ b/arch/x86/include/asm/cacheflush.h
-@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
- unsigned long pg_flags = pg->flags & _PGMT_MASK;
-
- if (pg_flags == _PGMT_DEFAULT)
-- return -1;
-+ return ~0UL;
- else if (pg_flags == _PGMT_WC)
- return _PAGE_CACHE_WC;
- else if (pg_flags == _PGMT_UC_MINUS)
-diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
-index a9e3a74..44966f3 100644
---- a/arch/x86/include/asm/calling.h
-+++ b/arch/x86/include/asm/calling.h
-@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
- #define RSP (152)
- #define SS (160)
-
--#define ARGOFFSET R11
--#define SWFRAME ORIG_RAX
-+#define ARGOFFSET R15
-
- .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
-- subq $9*8+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
-- movq_cfi rdi, 8*8
-- movq_cfi rsi, 7*8
-- movq_cfi rdx, 6*8
-+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
-+ movq_cfi rdi, RDI
-+ movq_cfi rsi, RSI
-+ movq_cfi rdx, RDX
-
- .if \save_rcx
-- movq_cfi rcx, 5*8
-+ movq_cfi rcx, RCX
- .endif
-
-- movq_cfi rax, 4*8
-+ movq_cfi rax, RAX
-
- .if \save_r891011
-- movq_cfi r8, 3*8
-- movq_cfi r9, 2*8
-- movq_cfi r10, 1*8
-- movq_cfi r11, 0*8
-+ movq_cfi r8, R8
-+ movq_cfi r9, R9
-+ movq_cfi r10, R10
-+ movq_cfi r11, R11
- .endif
-
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
- .endm
-
--#define ARG_SKIP (9*8)
-+#define ARG_SKIP ORIG_RAX
-
- .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
- rstor_r8910=1, rstor_rdx=1
-+
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12, r12
-+#endif
-+
- .if \rstor_r11
-- movq_cfi_restore 0*8, r11
-+ movq_cfi_restore R11, r11
- .endif
-
- .if \rstor_r8910
-- movq_cfi_restore 1*8, r10
-- movq_cfi_restore 2*8, r9
-- movq_cfi_restore 3*8, r8
-+ movq_cfi_restore R10, r10
-+ movq_cfi_restore R9, r9
-+ movq_cfi_restore R8, r8
- .endif
-
- .if \rstor_rax
-- movq_cfi_restore 4*8, rax
-+ movq_cfi_restore RAX, rax
- .endif
-
- .if \rstor_rcx
-- movq_cfi_restore 5*8, rcx
-+ movq_cfi_restore RCX, rcx
- .endif
-
- .if \rstor_rdx
-- movq_cfi_restore 6*8, rdx
-+ movq_cfi_restore RDX, rdx
- .endif
-
-- movq_cfi_restore 7*8, rsi
-- movq_cfi_restore 8*8, rdi
-+ movq_cfi_restore RSI, rsi
-+ movq_cfi_restore RDI, rdi
-
-- .if ARG_SKIP+\addskip > 0
-- addq $ARG_SKIP+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
-+ .if ORIG_RAX+\addskip > 0
-+ addq $ORIG_RAX+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
- .endif
- .endm
-
-- .macro LOAD_ARGS offset, skiprax=0
-- movq \offset(%rsp), %r11
-- movq \offset+8(%rsp), %r10
-- movq \offset+16(%rsp), %r9
-- movq \offset+24(%rsp), %r8
-- movq \offset+40(%rsp), %rcx
-- movq \offset+48(%rsp), %rdx
-- movq \offset+56(%rsp), %rsi
-- movq \offset+64(%rsp), %rdi
-+ .macro LOAD_ARGS skiprax=0
-+ movq R11(%rsp), %r11
-+ movq R10(%rsp), %r10
-+ movq R9(%rsp), %r9
-+ movq R8(%rsp), %r8
-+ movq RCX(%rsp), %rcx
-+ movq RDX(%rsp), %rdx
-+ movq RSI(%rsp), %rsi
-+ movq RDI(%rsp), %rdi
- .if \skiprax
- .else
-- movq \offset+72(%rsp), %rax
-+ movq RAX(%rsp), %rax
- .endif
- .endm
-
--#define REST_SKIP (6*8)
--
- .macro SAVE_REST
-- subq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET REST_SKIP
-- movq_cfi rbx, 5*8
-- movq_cfi rbp, 4*8
-- movq_cfi r12, 3*8
-- movq_cfi r13, 2*8
-- movq_cfi r14, 1*8
-- movq_cfi r15, 0*8
-+ movq_cfi rbx, RBX
-+ movq_cfi rbp, RBP
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
-+ movq_cfi r13, R13
-+ movq_cfi r14, R14
-+ movq_cfi r15, R15
- .endm
-
- .macro RESTORE_REST
-- movq_cfi_restore 0*8, r15
-- movq_cfi_restore 1*8, r14
-- movq_cfi_restore 2*8, r13
-- movq_cfi_restore 3*8, r12
-- movq_cfi_restore 4*8, rbp
-- movq_cfi_restore 5*8, rbx
-- addq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
-+ movq_cfi_restore R15, r15
-+ movq_cfi_restore R14, r14
-+ movq_cfi_restore R13, r13
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12, r12
-+#endif
-+
-+ movq_cfi_restore RBP, rbp
-+ movq_cfi_restore RBX, rbx
- .endm
-
- .macro SAVE_ALL
-diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
-index 46fc474..b02b0f9 100644
---- a/arch/x86/include/asm/checksum_32.h
-+++ b/arch/x86/include/asm/checksum_32.h
-@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
-
-+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
-+ int len, __wsum sum,
-+ int *src_err_ptr, int *dst_err_ptr);
-+
-+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
-+ int len, __wsum sum,
-+ int *src_err_ptr, int *dst_err_ptr);
-+
- /*
- * Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
-@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
- int *err_ptr)
- {
- might_sleep();
-- return csum_partial_copy_generic((__force void *)src, dst,
-+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
- len, sum, err_ptr, NULL);
- }
-
-@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
- {
- might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len))
-- return csum_partial_copy_generic(src, (__force void *)dst,
-+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
-
- if (len)
-diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
-index 5d3acdf..6447a02 100644
---- a/arch/x86/include/asm/cmpxchg.h
-+++ b/arch/x86/include/asm/cmpxchg.h
-@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
- __compiletime_error("Bad argument size for cmpxchg");
- extern void __xadd_wrong_size(void)
- __compiletime_error("Bad argument size for xadd");
-+extern void __xadd_check_overflow_wrong_size(void)
-+ __compiletime_error("Bad argument size for xadd_check_overflow");
-
- /*
- * Constants for operation sizes. On 32-bit, the 64-bit size it set to
-@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
- __ret; \
- })
-
-+#define __xadd_check_overflow(ptr, inc, lock) \
-+ ({ \
-+ __typeof__ (*(ptr)) __ret = (inc); \
-+ switch (sizeof(*(ptr))) { \
-+ case __X86_CASE_L: \
-+ asm volatile (lock "xaddl %0, %1\n" \
-+ "jno 0f\n" \
-+ "mov %0,%1\n" \
-+ "int $4\n0:\n" \
-+ _ASM_EXTABLE(0b, 0b) \
-+ : "+r" (__ret), "+m" (*(ptr)) \
-+ : : "memory", "cc"); \
-+ break; \
-+ case __X86_CASE_Q: \
-+ asm volatile (lock "xaddq %q0, %1\n" \
-+ "jno 0f\n" \
-+ "mov %0,%1\n" \
-+ "int $4\n0:\n" \
-+ _ASM_EXTABLE(0b, 0b) \
-+ : "+r" (__ret), "+m" (*(ptr)) \
-+ : : "memory", "cc"); \
-+ break; \
-+ default: \
-+ __xadd_check_overflow_wrong_size(); \
-+ } \
-+ __ret; \
-+ })
-+
- /*
- * xadd() adds "inc" to "*ptr" and atomically returns the previous
- * value of "*ptr".
-@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
- #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
- #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
-
-+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
-+
- #endif /* ASM_X86_CMPXCHG_H */
-diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
-index 30d737e..9830a9b 100644
---- a/arch/x86/include/asm/compat.h
-+++ b/arch/x86/include/asm/compat.h
-@@ -194,7 +194,7 @@ typedef struct user_regs_struct32 compat_elf_gregset_t;
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
--typedef u32 compat_uptr_t;
-+typedef u32 __user compat_uptr_t;
-
- static inline void __user *compat_ptr(compat_uptr_t uptr)
- {
-diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index b8a5fe5..fbbe2c2 100644
---- a/arch/x86/include/asm/cpufeature.h
-+++ b/arch/x86/include/asm/cpufeature.h
-@@ -198,8 +198,9 @@
-
- /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
- #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
--#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
-+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
- #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-+#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
-
- #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-
-@@ -364,7 +365,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
- ".previous\n"
-- ".section .altinstr_replacement,\"ax\"\n"
-+ ".section .altinstr_replacement,\"a\"\n"
- "3: movb $1,%0\n"
- "4:\n"
- ".previous\n"
-diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index 3225868..e0fb1f6 100644
---- a/arch/x86/include/asm/desc.h
-+++ b/arch/x86/include/asm/desc.h
-@@ -4,6 +4,7 @@
- #include <asm/desc_defs.h>
- #include <asm/ldt.h>
- #include <asm/mmu.h>
-+#include <asm/pgtable.h>
-
- #include <linux/smp.h>
-
-@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
-
- desc->type = (info->read_exec_only ^ 1) << 1;
- desc->type |= info->contents << 2;
-+ desc->type |= info->seg_not_present ^ 1;
-
- desc->s = 1;
- desc->dpl = 0x3;
-@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
- }
-
- extern struct desc_ptr idt_descr;
--extern gate_desc idt_table[];
--
--struct gdt_page {
-- struct desc_struct gdt[GDT_ENTRIES];
--} __attribute__((aligned(PAGE_SIZE)));
--
--DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
-+extern gate_desc idt_table[256];
-
-+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
- static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
- {
-- return per_cpu(gdt_page, cpu).gdt;
-+ return cpu_gdt_table[cpu];
- }
-
- #ifdef CONFIG_X86_64
-@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
- unsigned long base, unsigned dpl, unsigned flags,
- unsigned short seg)
- {
-- gate->a = (seg << 16) | (base & 0xffff);
-- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
-+ gate->gate.offset_low = base;
-+ gate->gate.seg = seg;
-+ gate->gate.reserved = 0;
-+ gate->gate.type = type;
-+ gate->gate.s = 0;
-+ gate->gate.dpl = dpl;
-+ gate->gate.p = 1;
-+ gate->gate.offset_high = base >> 16;
- }
-
- #endif
-@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
-
- static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
- {
-+ pax_open_kernel();
- memcpy(&idt[entry], gate, sizeof(*gate));
-+ pax_close_kernel();
- }
-
- static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
- {
-+ pax_open_kernel();
- memcpy(&ldt[entry], desc, 8);
-+ pax_close_kernel();
- }
-
- static inline void
-@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
- default: size = sizeof(*gdt); break;
- }
-
-+ pax_open_kernel();
- memcpy(&gdt[entry], desc, size);
-+ pax_close_kernel();
- }
-
- static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
-@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
-
- static inline void native_load_tr_desc(void)
- {
-+ pax_open_kernel();
- asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
-+ pax_close_kernel();
- }
-
- static inline void native_load_gdt(const struct desc_ptr *dtr)
-@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- unsigned int i;
-
-+ pax_open_kernel();
- for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
- gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
-+ pax_close_kernel();
- }
-
- /* This intentionally ignores lm, since 32-bit apps don't have that field. */
-@@ -292,7 +305,7 @@ static inline void load_LDT(mm_context_t *pc)
- preempt_enable();
- }
-
--static inline unsigned long get_desc_base(const struct desc_struct *desc)
-+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
- {
- return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
- }
-@@ -315,7 +328,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
- desc->limit = (limit >> 16) & 0xf;
- }
-
--static inline void _set_gate(int gate, unsigned type, void *addr,
-+static inline void _set_gate(int gate, unsigned type, const void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
- {
- gate_desc s;
-@@ -334,7 +347,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
- * Pentium F0 0F bugfix can have resulted in the mapped
- * IDT being write-protected.
- */
--static inline void set_intr_gate(unsigned int n, void *addr)
-+static inline void set_intr_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
-@@ -364,19 +377,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
- /*
- * This routine sets up an interrupt gate at directory privilege level 3.
- */
--static inline void set_system_intr_gate(unsigned int n, void *addr)
-+static inline void set_system_intr_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
- }
-
--static inline void set_system_trap_gate(unsigned int n, void *addr)
-+static inline void set_system_trap_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
- }
-
--static inline void set_trap_gate(unsigned int n, void *addr)
-+static inline void set_trap_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
-@@ -385,19 +398,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
- static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
- {
- BUG_ON((unsigned)n > 0xFF);
-- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
-+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
- }
-
--static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
-+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
- }
-
--static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
-+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
- }
-
-+#ifdef CONFIG_X86_32
-+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
-+{
-+ struct desc_struct d;
-+
-+ if (likely(limit))
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
-+}
-+#endif
-+
- #endif /* _ASM_X86_DESC_H */
-diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
-index 278441f..b95a174 100644
---- a/arch/x86/include/asm/desc_defs.h
-+++ b/arch/x86/include/asm/desc_defs.h
-@@ -31,6 +31,12 @@ struct desc_struct {
- unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
- unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
- };
-+ struct {
-+ u16 offset_low;
-+ u16 seg;
-+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
-+ unsigned offset_high: 16;
-+ } gate;
- };
- } __attribute__((packed));
-
-diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
-index 9a2d644..5abb141 100644
---- a/arch/x86/include/asm/div64.h
-+++ b/arch/x86/include/asm/div64.h
-@@ -33,7 +33,7 @@
- __mod; \
- })
-
--static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
-+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- union {
- u64 v64;
-diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
-index 908b969..a1f4eb4 100644
---- a/arch/x86/include/asm/e820.h
-+++ b/arch/x86/include/asm/e820.h
-@@ -69,7 +69,7 @@ struct e820map {
- #define ISA_START_ADDRESS 0xa0000
- #define ISA_END_ADDRESS 0x100000
-
--#define BIOS_BEGIN 0x000a0000
-+#define BIOS_BEGIN 0x000c0000
- #define BIOS_END 0x00100000
-
- #define BIOS_ROM_BASE 0xffe00000
-diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
-index 035cd81..db1e26d 100644
---- a/arch/x86/include/asm/efi.h
-+++ b/arch/x86/include/asm/efi.h
-@@ -95,6 +95,7 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
- extern void efi_memblock_x86_reserve_range(void);
- extern void efi_call_phys_prelog(void);
- extern void efi_call_phys_epilog(void);
-+extern void efi_setup_pgd(void);
-
- #ifndef CONFIG_EFI
- /*
-diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index 5f962df..7289f09 100644
---- a/arch/x86/include/asm/elf.h
-+++ b/arch/x86/include/asm/elf.h
-@@ -238,7 +238,25 @@ extern int force_personality32;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
-+#else
- #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+#ifdef CONFIG_X86_32
-+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
-+
-+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#else
-+#define PAX_ELF_ET_DYN_BASE 0x400000UL
-+
-+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
-+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
-+#endif
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-@@ -291,9 +309,7 @@ do { \
-
- #define ARCH_DLINFO \
- do { \
-- if (vdso_enabled) \
-- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
-- (unsigned long)current->mm->context.vdso); \
-+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
- } while (0)
-
- #define AT_SYSINFO 32
-@@ -304,7 +320,7 @@ do { \
-
- #endif /* !CONFIG_X86_32 */
-
--#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
-+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
-
- #define VDSO_ENTRY \
- ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
-@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
- #define compat_arch_setup_additional_pages syscall32_setup_pages
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- /*
- * True on X86_32 or when emulating IA32 on X86_64
- */
-diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
-index cc70c1c..d96d011 100644
---- a/arch/x86/include/asm/emergency-restart.h
-+++ b/arch/x86/include/asm/emergency-restart.h
-@@ -15,6 +15,6 @@ enum reboot_type {
-
- extern enum reboot_type reboot_type;
-
--extern void machine_emergency_restart(void);
-+extern void machine_emergency_restart(void) __noreturn;
-
- #endif /* _ASM_X86_EMERGENCY_RESTART_H */
-diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
-index dbe82a5..459eb0b 100644
---- a/arch/x86/include/asm/floppy.h
-+++ b/arch/x86/include/asm/floppy.h
-@@ -229,18 +229,18 @@ static struct fd_routine_l {
- int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
- } fd_routine[] = {
- {
-- request_dma,
-- free_dma,
-- get_dma_residue,
-- dma_mem_alloc,
-- hard_dma_setup
-+ ._request_dma = request_dma,
-+ ._free_dma = free_dma,
-+ ._get_dma_residue = get_dma_residue,
-+ ._dma_mem_alloc = dma_mem_alloc,
-+ ._dma_setup = hard_dma_setup
- },
- {
-- vdma_request_dma,
-- vdma_nop,
-- vdma_get_dma_residue,
-- vdma_mem_alloc,
-- vdma_dma_setup
-+ ._request_dma = vdma_request_dma,
-+ ._free_dma = vdma_nop,
-+ ._get_dma_residue = vdma_get_dma_residue,
-+ ._dma_mem_alloc = vdma_mem_alloc,
-+ ._dma_setup = vdma_dma_setup
- }
- };
-
-diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
-index d09bb03..0a3629b 100644
---- a/arch/x86/include/asm/futex.h
-+++ b/arch/x86/include/asm/futex.h
-@@ -12,20 +12,22 @@
- #include <asm/system.h>
-
- #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
-+ typecheck(u32 __user *, uaddr); \
- asm volatile("1:\t" insn "\n" \
- "2:\t.section .fixup,\"ax\"\n" \
- "3:\tmov\t%3, %1\n" \
- "\tjmp\t2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
-- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
- : "i" (-EFAULT), "0" (oparg), "1" (0))
-
- #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
-+ typecheck(u32 __user *, uaddr); \
- asm volatile("1:\tmovl %2, %0\n" \
- "\tmovl\t%0, %3\n" \
- "\t" insn "\n" \
-- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
-+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
- "\tjnz\t1b\n" \
- "3:\t.section .fixup,\"ax\"\n" \
- "4:\tmov\t%5, %1\n" \
-@@ -34,7 +36,7 @@
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
- : "=&a" (oldval), "=&r" (ret), \
-- "+m" (*uaddr), "=&r" (tem) \
-+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0))
-
- static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-
- switch (op) {
- case FUTEX_OP_SET:
-- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
-+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
-- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
-+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
- uaddr, oparg);
- break;
- case FUTEX_OP_OR:
-@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
-+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
- "2:\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
- "\tjmp 2b\n"
- "\t.previous\n"
- _ASM_EXTABLE(1b, 3b)
-- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
-+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
- : "memory"
- );
-diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
-index eb92a6e..b98b2f4 100644
---- a/arch/x86/include/asm/hw_irq.h
-+++ b/arch/x86/include/asm/hw_irq.h
-@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
- extern void enable_IO_APIC(void);
-
- /* Statistics */
--extern atomic_t irq_err_count;
--extern atomic_t irq_mis_count;
-+extern atomic_unchecked_t irq_err_count;
-+extern atomic_unchecked_t irq_mis_count;
-
- /* EISA */
- extern void eisa_set_level_irq(unsigned int irq);
-diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
-index a850b4d..1d8dfb7 100644
---- a/arch/x86/include/asm/i387.h
-+++ b/arch/x86/include/asm/i387.h
-@@ -88,10 +88,12 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
- }
-
- #ifdef CONFIG_X86_64
--static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-+static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
- {
- int err;
-
-+ fx = (struct i387_fxsave_struct __user *)____m(fx);
-+
- /* See comment in fxsave() below. */
- #ifdef CONFIG_AS_FXSAVEQ
- asm volatile("1: fxrstorq %[fx]\n\t"
-@@ -121,6 +123,8 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
- {
- int err;
-
-+ fx = (struct i387_fxsave_struct __user *)____m(fx);
-+
- /*
- * Clear the bytes not touched by the fxsave and reserved
- * for the SW usage.
-@@ -189,15 +193,15 @@ static inline void fpu_fxsave(struct fpu *fpu)
- #else /* CONFIG_X86_32 */
-
- /* perform fxrstor iff the processor has extended states, otherwise frstor */
--static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-+static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
- {
- /*
- * The "nop" is needed to make the instructions the same
- * length.
- */
- alternative_input(
-- "nop ; frstor %1",
-- "fxrstor %1",
-+ __copyuser_seg" frstor %1; nop",
-+ __copyuser_seg" fxrstor %1",
- X86_FEATURE_FXSR,
- "m" (*fx));
-
-@@ -256,7 +260,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
-
- static inline int fpu_fxrstor_checking(struct fpu *fpu)
- {
-- return fxrstor_checking(&fpu->state->fxsave);
-+ int ret;
-+ mm_segment_t fs;
-+
-+ fs = get_fs();
-+ set_fs(KERNEL_DS);
-+ ret = fxrstor_checking(&fpu->state->fxsave);
-+ set_fs(fs);
-+ return ret;
- }
-
- static inline int fpu_restore_checking(struct fpu *fpu)
-@@ -424,7 +435,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
- static inline bool interrupted_user_mode(void)
- {
- struct pt_regs *regs = get_irq_regs();
-- return regs && user_mode_vm(regs);
-+ return regs && user_mode(regs);
- }
-
- /*
-diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
-index a203659..9889f1c 100644
---- a/arch/x86/include/asm/i8259.h
-+++ b/arch/x86/include/asm/i8259.h
-@@ -62,7 +62,7 @@ struct legacy_pic {
- void (*init)(int auto_eoi);
- int (*irq_pending)(unsigned int irq);
- void (*make_irq)(unsigned int irq);
--};
-+} __do_const;
-
- extern struct legacy_pic *legacy_pic;
- extern struct legacy_pic null_legacy_pic;
-diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
-index 1f7e625..541485f 100644
---- a/arch/x86/include/asm/ia32.h
-+++ b/arch/x86/include/asm/ia32.h
-@@ -126,6 +126,12 @@ typedef struct compat_siginfo {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-+
-+ struct {
-+ unsigned int _call_addr; /* calling insn */
-+ int _syscall; /* triggering system call number */
-+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
-+ } _sigsys;
- } _sifields;
- } compat_siginfo_t;
-
-diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index d8e8eef..1765f78 100644
---- a/arch/x86/include/asm/io.h
-+++ b/arch/x86/include/asm/io.h
-@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
- "m" (*(volatile type __force *)addr) barrier); }
-
- build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
--build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
--build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
-+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
-+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
-
- build_mmio_read(__readb, "b", unsigned char, "=q", )
--build_mmio_read(__readw, "w", unsigned short, "=r", )
--build_mmio_read(__readl, "l", unsigned int, "=r", )
-+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
-+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
-
- build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
- build_mmio_write(writew, "w", unsigned short, "r", :"memory")
-@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
- return ioremap_nocache(offset, size);
- }
-
--extern void iounmap(volatile void __iomem *addr);
-+extern void iounmap(const volatile void __iomem *addr);
-
- extern void set_iounmap_nonlazy(void);
-
-@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
-
- #include <linux/vmalloc.h>
-
-+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
-+{
-+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
-+}
-+
-+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
-+{
-+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
-+}
-+
- /*
- * Convert a virtual cached pointer to an uncached pointer
- */
-diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
-index 0a8b519..80e7d5b 100644
---- a/arch/x86/include/asm/irqflags.h
-+++ b/arch/x86/include/asm/irqflags.h
-@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
- sti; \
- sysexit
-
-+#define GET_CR0_INTO_RDI mov %cr0, %rdi
-+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
-+#define GET_CR3_INTO_RDI mov %cr3, %rdi
-+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
-+
- #else
- #define INTERRUPT_RETURN iret
- #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
-index 5478825..839e88c 100644
---- a/arch/x86/include/asm/kprobes.h
-+++ b/arch/x86/include/asm/kprobes.h
-@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
- #define RELATIVEJUMP_SIZE 5
- #define RELATIVECALL_OPCODE 0xe8
- #define RELATIVE_ADDR_SIZE 4
--#define MAX_STACK_SIZE 64
--#define MIN_STACK_SIZE(ADDR) \
-- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-- THREAD_SIZE - (unsigned long)(ADDR))) \
-- ? (MAX_STACK_SIZE) \
-- : (((unsigned long)current_thread_info()) + \
-- THREAD_SIZE - (unsigned long)(ADDR)))
-+#define MAX_STACK_SIZE 64UL
-+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
-
- #define flush_insn_slot(p) do { } while (0)
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 9171618..fe2b1da 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -45,6 +45,7 @@
- #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
- #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
- 0xFFFFFF0000000000ULL)
-+#define CR3_PCID_INVD (1UL << 63)
- #define CR4_RESERVED_BITS \
- (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
- | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
-@@ -460,7 +461,7 @@ struct kvm_arch {
- unsigned int n_requested_mmu_pages;
- unsigned int n_max_mmu_pages;
- unsigned int indirect_shadow_pages;
-- atomic_t invlpg_counter;
-+ atomic_unchecked_t invlpg_counter;
- struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
- /*
- * Hash table of struct kvm_mmu_page.
-diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
-index 9cdae5d..3534f04 100644
---- a/arch/x86/include/asm/local.h
-+++ b/arch/x86/include/asm/local.h
-@@ -11,33 +11,97 @@ typedef struct {
- atomic_long_t a;
- } local_t;
-
-+typedef struct {
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l, i) atomic_long_set(&(l)->a, (i))
-+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
-
- static inline void local_inc(local_t *l)
- {
-- asm volatile(_ASM_INC "%0"
-+ asm volatile(_ASM_INC "%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_DEC "%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (l->a.counter));
-+}
-+
-+static inline void local_inc_unchecked(local_unchecked_t *l)
-+{
-+ asm volatile(_ASM_INC "%0\n"
- : "+m" (l->a.counter));
- }
-
- static inline void local_dec(local_t *l)
- {
-- asm volatile(_ASM_DEC "%0"
-+ asm volatile(_ASM_DEC "%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_INC "%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (l->a.counter));
-+}
-+
-+static inline void local_dec_unchecked(local_unchecked_t *l)
-+{
-+ asm volatile(_ASM_DEC "%0\n"
- : "+m" (l->a.counter));
- }
-
- static inline void local_add(long i, local_t *l)
- {
-- asm volatile(_ASM_ADD "%1,%0"
-+ asm volatile(_ASM_ADD "%1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_SUB "%1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (l->a.counter)
-+ : "ir" (i));
-+}
-+
-+static inline void local_add_unchecked(long i, local_unchecked_t *l)
-+{
-+ asm volatile(_ASM_ADD "%1,%0\n"
- : "+m" (l->a.counter)
- : "ir" (i));
- }
-
- static inline void local_sub(long i, local_t *l)
- {
-- asm volatile(_ASM_SUB "%1,%0"
-+ asm volatile(_ASM_SUB "%1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_ADD "%1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (l->a.counter)
-+ : "ir" (i));
-+}
-+
-+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
-+{
-+ asm volatile(_ASM_SUB "%1,%0\n"
- : "+m" (l->a.counter)
- : "ir" (i));
- }
-@@ -55,7 +119,16 @@ static inline int local_sub_and_test(long i, local_t *l)
- {
- unsigned char c;
-
-- asm volatile(_ASM_SUB "%2,%0; sete %1"
-+ asm volatile(_ASM_SUB "%2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_ADD "%2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
-@@ -73,7 +146,16 @@ static inline int local_dec_and_test(local_t *l)
- {
- unsigned char c;
-
-- asm volatile(_ASM_DEC "%0; sete %1"
-+ asm volatile(_ASM_DEC "%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_INC "%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
-@@ -91,7 +173,16 @@ static inline int local_inc_and_test(local_t *l)
- {
- unsigned char c;
-
-- asm volatile(_ASM_INC "%0; sete %1"
-+ asm volatile(_ASM_INC "%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_DEC "%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sete %1\n"
- : "+m" (l->a.counter), "=qm" (c)
- : : "memory");
- return c != 0;
-@@ -110,7 +201,16 @@ static inline int local_add_negative(long i, local_t *l)
- {
- unsigned char c;
-
-- asm volatile(_ASM_ADD "%2,%0; sets %1"
-+ asm volatile(_ASM_ADD "%2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_SUB "%2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ "sets %1\n"
- : "+m" (l->a.counter), "=qm" (c)
- : "ir" (i) : "memory");
- return c;
-@@ -133,7 +233,15 @@ static inline long local_add_return(long i, local_t *l)
- #endif
- /* Modern 486+ processor */
- __i = i;
-- asm volatile(_ASM_XADD "%0, %1;"
-+ asm volatile(_ASM_XADD "%0, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ _ASM_MOV "%0,%1\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- : "+r" (i), "+m" (l->a.counter)
- : : "memory");
- return i + __i;
-@@ -148,6 +256,38 @@ no_xadd: /* Legacy 386 processor */
- #endif
- }
-
-+/**
-+ * local_add_return_unchecked - add and return
-+ * @i: integer value to add
-+ * @l: pointer to type local_unchecked_t
-+ *
-+ * Atomically adds @i to @l and returns @i + @l
-+ */
-+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
-+{
-+ long __i;
-+#ifdef CONFIG_M386
-+ unsigned long flags;
-+ if (unlikely(boot_cpu_data.x86 <= 3))
-+ goto no_xadd;
-+#endif
-+ /* Modern 486+ processor */
-+ __i = i;
-+ asm volatile(_ASM_XADD "%0, %1\n"
-+ : "+r" (i), "+m" (l->a.counter)
-+ : : "memory");
-+ return i + __i;
-+
-+#ifdef CONFIG_M386
-+no_xadd: /* Legacy 386 processor */
-+ local_irq_save(flags);
-+ __i = local_read_unchecked(l);
-+ local_set_unchecked(l, i + __i);
-+ local_irq_restore(flags);
-+ return i + __i;
-+#endif
-+}
-+
- static inline long local_sub_return(long i, local_t *l)
- {
- return local_add_return(-i, l);
-@@ -158,6 +298,8 @@ static inline long local_sub_return(long i, local_t *l)
-
- #define local_cmpxchg(l, o, n) \
- (cmpxchg_local(&((l)->a.counter), (o), (n)))
-+#define local_cmpxchg_unchecked(l, o, n) \
-+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
- /* Always has a lock prefix */
- #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
-
-diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
-index 593e51d..fa69c9a 100644
---- a/arch/x86/include/asm/mman.h
-+++ b/arch/x86/include/asm/mman.h
-@@ -5,4 +5,14 @@
-
- #include <asm-generic/mman.h>
-
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+#ifdef CONFIG_X86_32
-+#define arch_mmap_check i386_mmap_check
-+int i386_mmap_check(unsigned long addr, unsigned long len,
-+ unsigned long flags);
-+#endif
-+#endif
-+#endif
-+
- #endif /* _ASM_X86_MMAN_H */
-diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 5f55e69..e20bfb1 100644
---- a/arch/x86/include/asm/mmu.h
-+++ b/arch/x86/include/asm/mmu.h
-@@ -9,7 +9,7 @@
- * we put the segment information here.
- */
- typedef struct {
-- void *ldt;
-+ struct desc_struct *ldt;
- int size;
-
- #ifdef CONFIG_X86_64
-@@ -18,7 +18,19 @@ typedef struct {
- #endif
-
- struct mutex lock;
-- void *vdso;
-+ unsigned long vdso;
-+
-+#ifdef CONFIG_X86_32
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ unsigned long user_cs_base;
-+ unsigned long user_cs_limit;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+ cpumask_t cpu_user_cs_mask;
-+#endif
-+
-+#endif
-+#endif
- } mm_context_t;
-
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index 6902152..737f889 100644
---- a/arch/x86/include/asm/mmu_context.h
-+++ b/arch/x86/include/asm/mmu_context.h
-@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
-
- static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
- {
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ unsigned int i;
-+ pgd_t *pgd;
-+
-+ pax_open_kernel();
-+ pgd = get_cpu_pgd(smp_processor_id());
-+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
-+ set_pgd_batched(pgd+i, native_make_pgd(0));
-+ pax_close_kernel();
-+#endif
-+
- #ifdef CONFIG_SMP
- if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
- percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
- {
- unsigned cpu = smp_processor_id();
-+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
-+ int tlbstate = TLBSTATE_OK;
-+#endif
-
- if (likely(prev != next)) {
- #ifdef CONFIG_SMP
-+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
-+ tlbstate = percpu_read(cpu_tlbstate.state);
-+#endif
- percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- percpu_write(cpu_tlbstate.active_mm, next);
- #endif
- cpumask_set_cpu(cpu, mm_cpumask(next));
-
- /* Re-load page tables */
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ pax_open_kernel();
-+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
-+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
-+ pax_close_kernel();
-+ load_cr3(get_cpu_pgd(cpu));
-+#else
- load_cr3(next->pgd);
-+#endif
-
- /* stop flush ipis for the previous mm */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
-@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- */
- if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
-- }
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+ if (!(__supported_pte_mask & _PAGE_NX)) {
-+ smp_mb__before_clear_bit();
-+ cpumask_clear_cpu(cpu, &prev->context.cpu_user_cs_mask);
-+ smp_mb__after_clear_bit();
-+ cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
-+ }
-+#endif
-+
-+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
-+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
-+ prev->context.user_cs_limit != next->context.user_cs_limit))
-+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
- #ifdef CONFIG_SMP
-+ else if (unlikely(tlbstate != TLBSTATE_OK))
-+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
-+#endif
-+#endif
-+
-+ }
- else {
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ pax_open_kernel();
-+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
-+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
-+ pax_close_kernel();
-+ load_cr3(get_cpu_pgd(cpu));
-+#endif
-+
-+#ifdef CONFIG_SMP
- percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
-
-@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- * tlb flush IPI delivery. We must reload CR3
- * to make sure to use no freed page tables.
- */
-+
-+#ifndef CONFIG_PAX_PER_CPU_PGD
- load_cr3(next->pgd);
-+#endif
-+
- load_LDT_nolock(&next->context);
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
-+ if (!(__supported_pte_mask & _PAGE_NX))
-+ cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
-+#endif
-+
-+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
-+#endif
-+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
-+#endif
-+
- }
-+#endif
- }
--#endif
- }
-
- #define activate_mm(prev, next) \
-diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
-index 9eae775..c914fea 100644
---- a/arch/x86/include/asm/module.h
-+++ b/arch/x86/include/asm/module.h
-@@ -5,6 +5,7 @@
-
- #ifdef CONFIG_X86_64
- /* X86_64 does not define MODULE_PROC_FAMILY */
-+#define MODULE_PROC_FAMILY ""
- #elif defined CONFIG_M386
- #define MODULE_PROC_FAMILY "386 "
- #elif defined CONFIG_M486
-@@ -59,8 +60,20 @@
- #error unknown processor family
- #endif
-
--#ifdef CONFIG_X86_32
--# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
-+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
-+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
-+#else
-+#define MODULE_PAX_KERNEXEC ""
- #endif
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+#define MODULE_PAX_UDEREF "UDEREF "
-+#else
-+#define MODULE_PAX_UDEREF ""
-+#endif
-+
-+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
-+
- #endif /* _ASM_X86_MODULE_H */
-diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
-index 8ca8283..8dc71fa 100644
---- a/arch/x86/include/asm/page.h
-+++ b/arch/x86/include/asm/page.h
-@@ -55,11 +55,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
- * virt_to_page(kaddr) returns a valid pointer if and only if
- * virt_addr_valid(kaddr) returns true.
- */
--#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
- #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
- extern bool __virt_addr_valid(unsigned long kaddr);
- #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+#define virt_to_page(kaddr) \
-+ ({ \
-+ const void *__kaddr = (const void *)(kaddr); \
-+ BUG_ON(!virt_addr_valid(__kaddr)); \
-+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
-+ })
-+#else
-+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+#endif
-+
- #endif /* __ASSEMBLY__ */
-
- #include <asm-generic/memory_model.h>
-diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
-index da4e762..592f614 100644
---- a/arch/x86/include/asm/page_32.h
-+++ b/arch/x86/include/asm/page_32.h
-@@ -11,7 +11,7 @@
-
- #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
- #ifdef CONFIG_DEBUG_VIRTUAL
--extern unsigned long __phys_addr(unsigned long);
-+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
- #else
- #define __phys_addr(x) __phys_addr_nodebug(x)
- #endif
-diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
-index a9e9937..a398cb4 100644
---- a/arch/x86/include/asm/page_64_types.h
-+++ b/arch/x86/include/asm/page_64_types.h
-@@ -1,7 +1,7 @@
- #ifndef _ASM_X86_PAGE_64_DEFS_H
- #define _ASM_X86_PAGE_64_DEFS_H
-
--#define THREAD_ORDER 1
-+#define THREAD_ORDER 2
- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
- #define CURRENT_MASK (~(THREAD_SIZE - 1))
-
-@@ -55,9 +55,9 @@ void copy_page(void *to, void *from);
-
- /* duplicated to the one in bootmem.h */
- extern unsigned long max_pfn;
--extern unsigned long phys_base;
-+extern const unsigned long phys_base;
-
--extern unsigned long __phys_addr(unsigned long);
-+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
- #define __phys_reloc_hide(x) (x)
-
- #define vmemmap ((struct page *)VMEMMAP_START)
-diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 91e758b..cac1cd6 100644
---- a/arch/x86/include/asm/paravirt.h
-+++ b/arch/x86/include/asm/paravirt.h
-@@ -601,7 +601,7 @@ static inline pmd_t __pmd(pmdval_t val)
- return (pmd_t) { ret };
- }
-
--static inline pmdval_t pmd_val(pmd_t pmd)
-+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
- {
- pmdval_t ret;
-
-@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
- val);
- }
-
-+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
-+{
-+ pgdval_t val = native_pgd_val(pgd);
-+
-+ if (sizeof(pgdval_t) > sizeof(long))
-+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
-+ val, (u64)val >> 32);
-+ else
-+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
-+ val);
-+}
-+
- static inline void pgd_clear(pgd_t *pgdp)
- {
- set_pgd(pgdp, __pgd(0));
-@@ -751,6 +763,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
- pv_mmu_ops.set_fixmap(idx, phys, flags);
- }
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+static inline unsigned long pax_open_kernel(void)
-+{
-+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
-+}
-+
-+static inline unsigned long pax_close_kernel(void)
-+{
-+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
-+}
-+#else
-+static inline unsigned long pax_open_kernel(void) { return 0; }
-+static inline unsigned long pax_close_kernel(void) { return 0; }
-+#endif
-+
- #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-
- static inline int arch_spin_is_locked(struct arch_spinlock *lock)
-@@ -967,7 +994,7 @@ extern void default_banner(void);
-
- #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
- #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
--#define PARA_INDIRECT(addr) *%cs:addr
-+#define PARA_INDIRECT(addr) *%ss:addr
- #endif
-
- #define INTERRUPT_RETURN \
-@@ -1044,6 +1071,21 @@ extern void default_banner(void);
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
- CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
-+
-+#define GET_CR0_INTO_RDI \
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
-+ mov %rax,%rdi
-+
-+#define SET_RDI_INTO_CR0 \
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
-+
-+#define GET_CR3_INTO_RDI \
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
-+ mov %rax,%rdi
-+
-+#define SET_RDI_INTO_CR3 \
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
-+
- #endif /* CONFIG_X86_32 */
-
- #endif /* __ASSEMBLY__ */
-diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index faf2c04..055c010 100644
---- a/arch/x86/include/asm/paravirt_types.h
-+++ b/arch/x86/include/asm/paravirt_types.h
-@@ -84,7 +84,7 @@ struct pv_init_ops {
- */
- unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
- unsigned long addr, unsigned len);
--};
-+} __no_const __no_randomize_layout;
-
-
- struct pv_lazy_ops {
-@@ -92,13 +92,13 @@ struct pv_lazy_ops {
- void (*enter)(void);
- void (*leave)(void);
- void (*flush)(void);
--};
-+} __no_randomize_layout;
-
- struct pv_time_ops {
- unsigned long long (*sched_clock)(void);
- unsigned long long (*steal_clock)(int cpu);
- unsigned long (*get_tsc_khz)(void);
--};
-+} __no_const __no_randomize_layout;
-
- struct pv_cpu_ops {
- /* hooks for various privileged instructions */
-@@ -194,7 +194,7 @@ struct pv_cpu_ops {
-
- void (*start_context_switch)(struct task_struct *prev);
- void (*end_context_switch)(struct task_struct *next);
--};
-+} __no_const __no_randomize_layout;
-
- struct pv_irq_ops {
- /*
-@@ -217,7 +217,7 @@ struct pv_irq_ops {
- #ifdef CONFIG_X86_64
- void (*adjust_exception_frame)(void);
- #endif
--};
-+} __no_randomize_layout;
-
- struct pv_apic_ops {
- #ifdef CONFIG_X86_LOCAL_APIC
-@@ -225,7 +225,7 @@ struct pv_apic_ops {
- unsigned long start_eip,
- unsigned long start_esp);
- #endif
--};
-+} __no_const __no_randomize_layout;
-
- struct pv_mmu_ops {
- unsigned long (*read_cr2)(void);
-@@ -314,6 +314,7 @@ struct pv_mmu_ops {
- struct paravirt_callee_save make_pud;
-
- void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
-+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
- #endif /* PAGETABLE_LEVELS == 4 */
- #endif /* PAGETABLE_LEVELS >= 3 */
-
-@@ -325,7 +326,13 @@ struct pv_mmu_ops {
- an mfn. We can tell which is which from the index. */
- void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
- phys_addr_t phys, pgprot_t flags);
--};
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ unsigned long (*pax_open_kernel)(void);
-+ unsigned long (*pax_close_kernel)(void);
-+#endif
-+
-+} __no_randomize_layout;
-
- struct arch_spinlock;
- struct pv_lock_ops {
-@@ -335,11 +342,14 @@ struct pv_lock_ops {
- void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct arch_spinlock *lock);
- void (*spin_unlock)(struct arch_spinlock *lock);
--};
-+} __no_const __no_randomize_layout;
-
- /* This contains all the paravirt structures: we get a convenient
- * number for each function using the offset which we use to indicate
-- * what to patch. */
-+ * what to patch.
-+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
-+ */
-+
- struct paravirt_patch_template {
- struct pv_init_ops pv_init_ops;
- struct pv_time_ops pv_time_ops;
-@@ -348,7 +358,7 @@ struct paravirt_patch_template {
- struct pv_apic_ops pv_apic_ops;
- struct pv_mmu_ops pv_mmu_ops;
- struct pv_lock_ops pv_lock_ops;
--};
-+} __no_randomize_layout;
-
- extern struct pv_info pv_info;
- extern struct pv_init_ops pv_init_ops;
-diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
-index b4389a4..7024269 100644
---- a/arch/x86/include/asm/pgalloc.h
-+++ b/arch/x86/include/asm/pgalloc.h
-@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
- pmd_t *pmd, pte_t *pte)
- {
- paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
-+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
-+}
-+
-+static inline void pmd_populate_user(struct mm_struct *mm,
-+ pmd_t *pmd, pte_t *pte)
-+{
-+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
- set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
- }
-
-@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
-
- #ifdef CONFIG_X86_PAE
- extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
-+{
-+ pud_populate(mm, pudp, pmd);
-+}
- #else /* !CONFIG_X86_PAE */
- static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- {
- paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
- }
-+
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
-+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
-+}
- #endif /* CONFIG_X86_PAE */
-
- #if PAGETABLE_LEVELS > 3
-@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
- set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
- }
-
-+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
-+{
-+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
-+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
-+}
-+
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
- return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
-index 98391db..8f6984e 100644
---- a/arch/x86/include/asm/pgtable-2level.h
-+++ b/arch/x86/include/asm/pgtable-2level.h
-@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
-
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+ pax_open_kernel();
- *pmdp = pmd;
-+ pax_close_kernel();
- }
-
- static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index cb00ccc..17e9054 100644
---- a/arch/x86/include/asm/pgtable-3level.h
-+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+ pax_open_kernel();
- set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
-+ pax_close_kernel();
- }
-
- static inline void native_set_pud(pud_t *pudp, pud_t pud)
- {
-+ pax_open_kernel();
- set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
-+ pax_close_kernel();
- }
-
- /*
-diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 6be9909..5c476fc 100644
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
-
- #ifndef __PAGETABLE_PUD_FOLDED
- #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
-+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
- #define pgd_clear(pgd) native_pgd_clear(pgd)
- #endif
-
-@@ -81,12 +82,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
-
- #define arch_end_context_switch(prev) do {} while(0)
-
-+#define pax_open_kernel() native_pax_open_kernel()
-+#define pax_close_kernel() native_pax_close_kernel()
- #endif /* CONFIG_PARAVIRT */
-
-+#define __HAVE_ARCH_PAX_OPEN_KERNEL
-+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+static inline unsigned long native_pax_open_kernel(void)
-+{
-+ unsigned long cr0;
-+
-+ preempt_disable();
-+ barrier();
-+ cr0 = read_cr0() ^ X86_CR0_WP;
-+ BUG_ON(cr0 & X86_CR0_WP);
-+ write_cr0(cr0);
-+ barrier();
-+ return cr0 ^ X86_CR0_WP;
-+}
-+
-+static inline unsigned long native_pax_close_kernel(void)
-+{
-+ unsigned long cr0;
-+
-+ barrier();
-+ cr0 = read_cr0() ^ X86_CR0_WP;
-+ BUG_ON(!(cr0 & X86_CR0_WP));
-+ write_cr0(cr0);
-+ barrier();
-+ preempt_enable_no_resched();
-+ return cr0 ^ X86_CR0_WP;
-+}
-+#else
-+static inline unsigned long native_pax_open_kernel(void) { return 0; }
-+static inline unsigned long native_pax_close_kernel(void) { return 0; }
-+#endif
-+
- /*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-+static inline int pte_user(pte_t pte)
-+{
-+ return pte_val(pte) & _PAGE_USER;
-+}
-+
- static inline int pte_dirty(pte_t pte)
- {
- return pte_flags(pte) & _PAGE_DIRTY;
-@@ -147,6 +189,11 @@ static inline unsigned long pud_pfn(pud_t pud)
- return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
- }
-
-+static inline unsigned long pgd_pfn(pgd_t pgd)
-+{
-+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
-+}
-+
- #define pte_page(pte) pfn_to_page(pte_pfn(pte))
-
- static inline int pmd_large(pmd_t pte)
-@@ -200,9 +247,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
- return pte_clear_flags(pte, _PAGE_RW);
- }
-
-+static inline pte_t pte_mkread(pte_t pte)
-+{
-+ return __pte(pte_val(pte) | _PAGE_USER);
-+}
-+
- static inline pte_t pte_mkexec(pte_t pte)
- {
-- return pte_clear_flags(pte, _PAGE_NX);
-+#ifdef CONFIG_X86_PAE
-+ if (__supported_pte_mask & _PAGE_NX)
-+ return pte_clear_flags(pte, _PAGE_NX);
-+ else
-+#endif
-+ return pte_set_flags(pte, _PAGE_USER);
-+}
-+
-+static inline pte_t pte_exprotect(pte_t pte)
-+{
-+#ifdef CONFIG_X86_PAE
-+ if (__supported_pte_mask & _PAGE_NX)
-+ return pte_set_flags(pte, _PAGE_NX);
-+ else
-+#endif
-+ return pte_clear_flags(pte, _PAGE_USER);
- }
-
- static inline pte_t pte_mkdirty(pte_t pte)
-@@ -394,6 +461,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
- #endif
-
- #ifndef __ASSEMBLY__
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
-+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
-+{
-+ return cpu_pgd[cpu];
-+}
-+#endif
-+
- #include <linux/mm_types.h>
-
- static inline int pte_none(pte_t pte)
-@@ -515,7 +591,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
- * Currently stuck as a macro due to indirect forward reference to
- * linux/mmzone.h's __section_mem_map_addr() definition:
- */
--#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
-+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
-
- /* Find an entry in the second-level page table.. */
- static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -555,7 +631,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
- * Currently stuck as a macro due to indirect forward reference to
- * linux/mmzone.h's __section_mem_map_addr() definition:
- */
--#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
-+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
-
- /* to find an entry in a page-table-directory. */
- static inline unsigned long pud_index(unsigned long address)
-@@ -570,7 +646,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
-
- static inline int pgd_bad(pgd_t pgd)
- {
-- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
-+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
- }
-
- static inline int pgd_none(pgd_t pgd)
-@@ -593,7 +669,12 @@ static inline int pgd_none(pgd_t pgd)
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
--#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
-+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
-+#endif
-+
- /*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
-@@ -604,6 +685,22 @@ static inline int pgd_none(pgd_t pgd)
- #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
- #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
-
-+#ifdef CONFIG_X86_32
-+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
-+#else
-+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
-+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+#ifdef __ASSEMBLY__
-+#define pax_user_shadow_base pax_user_shadow_base(%rip)
-+#else
-+extern unsigned long pax_user_shadow_base;
-+#endif
-+#endif
-+
-+#endif
-+
- #ifndef __ASSEMBLY__
-
- extern int direct_gbpages;
-@@ -768,11 +865,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- * dst and src can be on the same page, but the range must not overlap,
- * and must not cross a page boundary.
- */
--static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
-+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
- {
-- memcpy(dst, src, count * sizeof(pgd_t));
-+ pax_open_kernel();
-+ while (count--)
-+ *dst++ = *src++;
-+ pax_close_kernel();
- }
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
-+#endif
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
-+#else
-+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
-+#endif
-
- #include <asm-generic/pgtable.h>
- #endif /* __ASSEMBLY__ */
-diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
-index 0c92113..34a77c6 100644
---- a/arch/x86/include/asm/pgtable_32.h
-+++ b/arch/x86/include/asm/pgtable_32.h
-@@ -25,9 +25,6 @@
- struct mm_struct;
- struct vm_area_struct;
-
--extern pgd_t swapper_pg_dir[1024];
--extern pgd_t initial_page_table[1024];
--
- static inline void pgtable_cache_init(void) { }
- static inline void check_pgt_cache(void) { }
- void paging_init(void);
-@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
- # include <asm/pgtable-2level.h>
- #endif
-
-+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-+extern pgd_t initial_page_table[PTRS_PER_PGD];
-+#ifdef CONFIG_X86_PAE
-+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
-+#endif
-+
- #if defined(CONFIG_HIGHPTE)
- #define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
-@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
- /* Clear a kernel PTE and flush it from the TLB */
- #define kpte_clear_flush(ptep, vaddr) \
- do { \
-+ pax_open_kernel(); \
- pte_clear(&init_mm, (vaddr), (ptep)); \
-+ pax_close_kernel(); \
- __flush_tlb_one((vaddr)); \
- } while (0)
-
-@@ -74,6 +79,9 @@ do { \
-
- #endif /* !__ASSEMBLY__ */
-
-+#define HAVE_ARCH_UNMAPPED_AREA
-+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-+
- /*
- * kern_addr_valid() is (1) for FLATMEM and (0) for
- * SPARSEMEM and DISCONTIGMEM
-diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
-index ed5903b..c7fe163 100644
---- a/arch/x86/include/asm/pgtable_32_types.h
-+++ b/arch/x86/include/asm/pgtable_32_types.h
-@@ -8,7 +8,7 @@
- */
- #ifdef CONFIG_X86_PAE
- # include <asm/pgtable-3level_types.h>
--# define PMD_SIZE (1UL << PMD_SHIFT)
-+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
- # define PMD_MASK (~(PMD_SIZE - 1))
- #else
- # include <asm/pgtable-2level_types.h>
-@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
- # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
- #endif
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifndef __ASSEMBLY__
-+extern unsigned char MODULES_EXEC_VADDR[];
-+extern unsigned char MODULES_EXEC_END[];
-+#endif
-+#include <asm/boot.h>
-+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
-+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
-+#else
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+#endif
-+
- #define MODULES_VADDR VMALLOC_START
- #define MODULES_END VMALLOC_END
- #define MODULES_LEN (MODULES_VADDR - MODULES_END)
-diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 975f709..9f779c9 100644
---- a/arch/x86/include/asm/pgtable_64.h
-+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,14 @@
-
- extern pud_t level3_kernel_pgt[512];
- extern pud_t level3_ident_pgt[512];
-+extern pud_t level3_vmalloc_start_pgt[512];
-+extern pud_t level3_vmalloc_end_pgt[512];
-+extern pud_t level3_vmemmap_pgt[512];
-+extern pud_t level2_vmemmap_pgt[512];
- extern pmd_t level2_kernel_pgt[512];
- extern pmd_t level2_fixmap_pgt[512];
--extern pmd_t level2_ident_pgt[512];
--extern pgd_t init_level4_pgt[];
-+extern pmd_t level2_ident_pgt[512*2];
-+extern pgd_t init_level4_pgt[512];
-
- #define swapper_pg_dir init_level4_pgt
-
-@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
-
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+ pax_open_kernel();
- *pmdp = pmd;
-+ pax_close_kernel();
- }
-
- static inline void native_pmd_clear(pmd_t *pmd)
-@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
-
- static inline void native_set_pud(pud_t *pudp, pud_t pud)
- {
-+ pax_open_kernel();
- *pudp = pud;
-+ pax_close_kernel();
- }
-
- static inline void native_pud_clear(pud_t *pud)
-@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
-
- static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
- {
-+ pax_open_kernel();
-+ *pgdp = pgd;
-+ pax_close_kernel();
-+}
-+
-+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
-+{
- *pgdp = pgd;
- }
-
-diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
-index 51817fa..19195fc 100644
---- a/arch/x86/include/asm/pgtable_64_types.h
-+++ b/arch/x86/include/asm/pgtable_64_types.h
-@@ -59,7 +59,12 @@ typedef struct { pteval_t pte; } pte_t;
- #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
- #define MODULES_END _AC(0xffffffffff000000, UL)
- #define MODULES_LEN (MODULES_END - MODULES_VADDR)
-+#define MODULES_EXEC_VADDR MODULES_VADDR
-+#define MODULES_EXEC_END MODULES_END
- #define ESPFIX_PGD_ENTRY _AC(-2, UL)
- #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
-diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index 013286a..8b42f4f 100644
---- a/arch/x86/include/asm/pgtable_types.h
-+++ b/arch/x86/include/asm/pgtable_types.h
-@@ -16,13 +16,12 @@
- #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
- #define _PAGE_BIT_PAT 7 /* on 4KB pages */
- #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
--#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
- #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
- #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
- #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
--#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
--#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
--#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
-+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
-+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
- #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-
- /* If _PAGE_BIT_PRESENT is clear, we use these: */
-@@ -40,7 +39,6 @@
- #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
- #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
- #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
--#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
- #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
- #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
- #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
-@@ -57,8 +55,10 @@
-
- #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
- #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
--#else
-+#elif defined(CONFIG_KMEMCHECK)
- #define _PAGE_NX (_AT(pteval_t, 0))
-+#else
-+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
- #endif
-
- #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
-@@ -96,6 +96,9 @@
- #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_ACCESSED)
-
-+#define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#define PAGE_SHARED_NOEXEC PAGE_SHARED
-+
- #define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
- #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
-@@ -106,7 +109,7 @@
- #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
- #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
- #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
--#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
-+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
- #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
- #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
- #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-@@ -168,8 +171,8 @@
- * bits are combined, this will alow user to access the high address mapped
- * VDSO in the presence of CONFIG_COMPAT_VDSO
- */
--#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
--#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
-+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
-+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
- #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
- #endif
-
-@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
- {
- return native_pgd_val(pgd) & PTE_FLAGS_MASK;
- }
-+#endif
-
-+#if PAGETABLE_LEVELS == 3
-+#include <asm-generic/pgtable-nopud.h>
-+#endif
-+
-+#if PAGETABLE_LEVELS == 2
-+#include <asm-generic/pgtable-nopmd.h>
-+#endif
-+
-+#ifndef __ASSEMBLY__
- #if PAGETABLE_LEVELS > 3
- typedef struct { pudval_t pud; } pud_t;
-
-@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
- return pud.pud;
- }
- #else
--#include <asm-generic/pgtable-nopud.h>
--
- static inline pudval_t native_pud_val(pud_t pud)
- {
- return native_pgd_val(pud.pgd);
-@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
- return pmd.pmd;
- }
- #else
--#include <asm-generic/pgtable-nopmd.h>
--
- static inline pmdval_t native_pmd_val(pmd_t pmd)
- {
- return native_pgd_val(pmd.pud.pgd);
-@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
-
- extern pteval_t __supported_pte_mask;
- extern void set_nx(void);
--extern int nx_enabled;
-
- #define pgprot_writecombine pgprot_writecombine
- extern pgprot_t pgprot_writecombine(pgprot_t prot);
-diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
-index 2dddb31..100c638 100644
---- a/arch/x86/include/asm/processor-flags.h
-+++ b/arch/x86/include/asm/processor-flags.h
-@@ -62,6 +62,7 @@
- #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
- #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
- #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
-+#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
-
- /*
- * x86-64 Task Priority Register, CR8
-diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index f7c89e2..553040d 100644
---- a/arch/x86/include/asm/processor.h
-+++ b/arch/x86/include/asm/processor.h
-@@ -110,7 +110,7 @@ struct cpuinfo_x86 {
- /* Index into per_cpu list: */
- u16 cpu_index;
- u32 microcode;
--} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
-
- #define X86_VENDOR_INTEL 0
- #define X86_VENDOR_CYRIX 1
-@@ -266,7 +266,7 @@ struct tss_struct {
-
- } ____cacheline_aligned;
-
--DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
-+extern struct tss_struct init_tss[NR_CPUS];
-
- /*
- * Save the original ist values for checking stack pointers during debugging
-@@ -859,11 +859,18 @@ static inline void spin_lock_prefetch(const void *x)
- */
- #define TASK_SIZE PAGE_OFFSET
- #define TASK_SIZE_MAX TASK_SIZE
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
-+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
-+#else
- #define STACK_TOP TASK_SIZE
--#define STACK_TOP_MAX STACK_TOP
-+#endif
-+
-+#define STACK_TOP_MAX TASK_SIZE
-
- #define INIT_THREAD { \
-- .sp0 = sizeof(init_stack) + (long)&init_stack, \
-+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
- .vm86_info = NULL, \
- .sysenter_cs = __KERNEL_CS, \
- .io_bitmap_ptr = NULL, \
-@@ -877,7 +884,7 @@ static inline void spin_lock_prefetch(const void *x)
- */
- #define INIT_TSS { \
- .x86_tss = { \
-- .sp0 = sizeof(init_stack) + (long)&init_stack, \
-+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
-@@ -888,11 +895,7 @@ static inline void spin_lock_prefetch(const void *x)
- extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
- #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
--#define KSTK_TOP(info) \
--({ \
-- unsigned long *__ptr = (unsigned long *)(info); \
-- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
--})
-+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
-
- /*
- * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -907,7 +910,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
- #define task_pt_regs(task) \
- ({ \
- struct pt_regs *__regs__; \
-- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
-+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
- __regs__ - 1; \
- })
-
-@@ -917,13 +920,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
- /*
- * User space process size. 47bits minus one guard page.
- */
--#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
-+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
-
- /* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
- #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
-- 0xc0000000 : 0xFFFFe000)
-+ 0xc0000000 : 0xFFFFf000)
-
- #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
- IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -934,11 +937,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
- #define STACK_TOP_MAX TASK_SIZE_MAX
-
- #define INIT_THREAD { \
-- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
- }
-
- #define INIT_TSS { \
-- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
- }
-
- /*
-@@ -960,6 +963,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
- */
- #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
-+#endif
-+
- #define KSTK_EIP(task) (task_pt_regs(task)->ip)
-
- /* Get/set a process' ability to use the timestamp counter instruction */
-@@ -972,7 +979,8 @@ extern int set_tsc_mode(unsigned int val);
- extern int amd_get_nb_id(int cpu);
-
- struct aperfmperf {
-- u64 aperf, mperf;
-+ u64 aperf __intentional_overflow(-1);
-+ u64 mperf __intentional_overflow(-1);
- };
-
- static inline void get_aperfmperf(struct aperfmperf *am)
-diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
-index 7b0a55a..ad115bf 100644
---- a/arch/x86/include/asm/ptrace-abi.h
-+++ b/arch/x86/include/asm/ptrace-abi.h
-@@ -49,7 +49,6 @@
- #define EFLAGS 144
- #define RSP 152
- #define SS 160
--#define ARGOFFSET R11
- #endif /* __ASSEMBLY__ */
-
- /* top of stack page */
-diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
-index 0581a85..d8c7f13 100644
---- a/arch/x86/include/asm/ptrace.h
-+++ b/arch/x86/include/asm/ptrace.h
-@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
- }
-
- /*
-- * user_mode_vm(regs) determines whether a register set came from user mode.
-+ * user_mode(regs) determines whether a register set came from user mode.
- * This is true if V8086 mode was enabled OR if the register set was from
- * protected mode with RPL-3 CS value. This tricky test checks that with
- * one comparison. Many places in the kernel can bypass this full check
-- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
-+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
-+ * be used.
- */
--static inline int user_mode(struct pt_regs *regs)
-+static inline int user_mode_novm(struct pt_regs *regs)
- {
- #ifdef CONFIG_X86_32
- return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
- #else
-- return !!(regs->cs & 3);
-+ return !!(regs->cs & SEGMENT_RPL_MASK);
- #endif
- }
-
--static inline int user_mode_vm(struct pt_regs *regs)
-+static inline int user_mode(struct pt_regs *regs)
- {
- #ifdef CONFIG_X86_32
- return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
- USER_RPL;
- #else
-- return user_mode(regs);
-+ return user_mode_novm(regs);
- #endif
- }
-
-@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
- #ifdef CONFIG_X86_64
- static inline bool user_64bit_mode(struct pt_regs *regs)
- {
-+ unsigned long cs = regs->cs & 0xffff;
- #ifndef CONFIG_PARAVIRT
- /*
- * On non-paravirt systems, this is the only long mode CPL 3
- * selector. We do not allow long mode selectors in the LDT.
- */
-- return regs->cs == __USER_CS;
-+ return cs == __USER_CS;
- #else
- /* Headers are too twisted for this to go in paravirt.h. */
-- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
-+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
- #endif
- }
- #endif
-diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
-index 92f29706..d0a1a53 100644
---- a/arch/x86/include/asm/reboot.h
-+++ b/arch/x86/include/asm/reboot.h
-@@ -6,19 +6,19 @@
- struct pt_regs;
-
- struct machine_ops {
-- void (*restart)(char *cmd);
-- void (*halt)(void);
-- void (*power_off)(void);
-+ void (* __noreturn restart)(char *cmd);
-+ void (* __noreturn halt)(void);
-+ void (* __noreturn power_off)(void);
- void (*shutdown)(void);
- void (*crash_shutdown)(struct pt_regs *);
-- void (*emergency_restart)(void);
--};
-+ void (* __noreturn emergency_restart)(void);
-+} __no_const;
-
- extern struct machine_ops machine_ops;
-
- void native_machine_crash_shutdown(struct pt_regs *regs);
- void native_machine_shutdown(void);
--void machine_real_restart(unsigned int type);
-+void __noreturn machine_real_restart(unsigned int type);
- /* These must match dispatch_table in reboot_32.S */
- #define MRR_BIOS 0
- #define MRR_APM 1
-diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
-index 2dbe4a7..ce1db00 100644
---- a/arch/x86/include/asm/rwsem.h
-+++ b/arch/x86/include/asm/rwsem.h
-@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
- {
- asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX _ASM_INC "(%1)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX _ASM_DEC "(%1)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- /* adds 0x00000001 */
- " jns 1f\n"
- " call call_rwsem_down_read_failed\n"
-@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
- "1:\n\t"
- " mov %1,%2\n\t"
- " add %3,%2\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "sub %3,%2\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- " jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
- " jnz 1b\n\t"
-@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
- long tmp;
- asm volatile("# beginning down_write\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "mov %1,(%2)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- /* adds 0xffff0001, returns the old value */
- " test %1,%1\n\t"
- /* was the count 0 before? */
-@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
- long tmp;
- asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "mov %1,(%2)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- /* subtracts 1, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
-@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
- long tmp;
- asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "mov %1,(%2)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- /* subtracts 0xffff0001, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
-@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
- {
- asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- /*
- * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
- * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
-@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
- */
- static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
- {
-- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
-+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- : "+m" (sem->count)
- : "er" (delta));
- }
-@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
- */
- static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
- {
-- return delta + xadd(&sem->count, delta);
-+ return delta + xadd_check_overflow(&sem->count, delta);
- }
-
- #endif /* __KERNEL__ */
-diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
-index 5e64171..f58957e 100644
---- a/arch/x86/include/asm/segment.h
-+++ b/arch/x86/include/asm/segment.h
-@@ -64,10 +64,15 @@
- * 26 - ESPFIX small SS
- * 27 - per-cpu [ offset to per-cpu data area ]
- * 28 - stack_canary-20 [ for stack protector ]
-- * 29 - unused
-- * 30 - unused
-+ * 29 - PCI BIOS CS
-+ * 30 - PCI BIOS DS
- * 31 - TSS for double fault handler
- */
-+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
-+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
-+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
-+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
-+
- #define GDT_ENTRY_TLS_MIN 6
- #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-
-@@ -79,6 +84,8 @@
-
- #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
-
-+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
-+
- #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
-
- #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
-@@ -104,6 +111,12 @@
- #define __KERNEL_STACK_CANARY 0
- #endif
-
-+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
-+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
-+
-+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
-+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
-+
- #define GDT_ENTRY_DOUBLEFAULT_TSS 31
-
- /*
-@@ -141,7 +154,7 @@
- */
-
- /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
--#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
-+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
-
-
- #else
-@@ -165,6 +178,8 @@
- #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
- #define __USER32_DS __USER_DS
-
-+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
-+
- #define GDT_ENTRY_TSS 8 /* needs two entries */
- #define GDT_ENTRY_LDT 10 /* needs two entries */
- #define GDT_ENTRY_TLS_MIN 12
-@@ -185,6 +200,7 @@
- #endif
-
- #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
-+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
- #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
- #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
- #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
-diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
-index 73b11bc..d4a3b63 100644
---- a/arch/x86/include/asm/smp.h
-+++ b/arch/x86/include/asm/smp.h
-@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
- /* cpus sharing the last level cache: */
- DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
- DECLARE_PER_CPU(u16, cpu_llc_id);
--DECLARE_PER_CPU(int, cpu_number);
-+DECLARE_PER_CPU(unsigned int, cpu_number);
-
- static inline struct cpumask *cpu_sibling_mask(int cpu)
- {
-@@ -77,7 +77,7 @@ struct smp_ops {
-
- void (*send_call_func_ipi)(const struct cpumask *mask);
- void (*send_call_func_single_ipi)(int cpu);
--};
-+} __no_const;
-
- /* Globals due to paravirt */
- extern void set_cpu_sibling_map(int cpu);
-@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
- extern int safe_smp_processor_id(void);
-
- #elif defined(CONFIG_X86_64_SMP)
--#define raw_smp_processor_id() (percpu_read(cpu_number))
--
--#define stack_smp_processor_id() \
--({ \
-- struct thread_info *ti; \
-- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
-- ti->cpu; \
--})
-+#define raw_smp_processor_id() (percpu_read(cpu_number))
-+#define stack_smp_processor_id() raw_smp_processor_id()
- #define safe_smp_processor_id() smp_processor_id()
-
- #endif
-diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
-index 972c260..43ab1fd 100644
---- a/arch/x86/include/asm/spinlock.h
-+++ b/arch/x86/include/asm/spinlock.h
-@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
- static inline void arch_read_lock(arch_rwlock_t *rw)
- {
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- "jns 1f\n"
- "call __read_lock_failed\n\t"
- "1:\n"
-@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
- static inline void arch_write_lock(arch_rwlock_t *rw)
- {
- asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- "jz 1f\n"
- "call __write_lock_failed\n\t"
- "1:\n"
-@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
-
- static inline void arch_read_unlock(arch_rwlock_t *rw)
- {
-- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
-+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- :"+m" (rw->lock) : : "memory");
- }
-
- static inline void arch_write_unlock(arch_rwlock_t *rw)
- {
-- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
-+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
- : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
- }
-
-diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
-index 1575177..cb23f52 100644
---- a/arch/x86/include/asm/stackprotector.h
-+++ b/arch/x86/include/asm/stackprotector.h
-@@ -48,7 +48,7 @@
- * head_32 for boot CPU and setup_per_cpu_areas() for others.
- */
- #define GDT_STACK_CANARY_INIT \
-- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
-+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
-
- /*
- * Initialize the stackprotector canary value.
-@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
-
- static inline void load_stack_canary_segment(void)
- {
--#ifdef CONFIG_X86_32
-+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
- asm volatile ("mov %0, %%gs" : : "r" (0));
- #endif
- }
-diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
-index 70bbe39..4ae2bd4 100644
---- a/arch/x86/include/asm/stacktrace.h
-+++ b/arch/x86/include/asm/stacktrace.h
-@@ -11,28 +11,20 @@
-
- extern int kstack_depth_to_print;
-
--struct thread_info;
-+struct task_struct;
- struct stacktrace_ops;
-
--typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
-- unsigned long *stack,
-- unsigned long bp,
-- const struct stacktrace_ops *ops,
-- void *data,
-- unsigned long *end,
-- int *graph);
-+typedef unsigned long walk_stack_t(struct task_struct *task,
-+ void *stack_start,
-+ unsigned long *stack,
-+ unsigned long bp,
-+ const struct stacktrace_ops *ops,
-+ void *data,
-+ unsigned long *end,
-+ int *graph);
-
--extern unsigned long
--print_context_stack(struct thread_info *tinfo,
-- unsigned long *stack, unsigned long bp,
-- const struct stacktrace_ops *ops, void *data,
-- unsigned long *end, int *graph);
--
--extern unsigned long
--print_context_stack_bp(struct thread_info *tinfo,
-- unsigned long *stack, unsigned long bp,
-- const struct stacktrace_ops *ops, void *data,
-- unsigned long *end, int *graph);
-+extern walk_stack_t print_context_stack;
-+extern walk_stack_t print_context_stack_bp;
-
- /* Generic stack tracer with callbacks */
-
-@@ -40,7 +32,7 @@ struct stacktrace_ops {
- void (*address)(void *data, unsigned long address, int reliable);
- /* On negative return stop dumping */
- int (*stack)(void *data, char *name);
-- walk_stack_t walk_stack;
-+ walk_stack_t *walk_stack;
- };
-
- void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
-diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
-index cb23852..2dde194 100644
---- a/arch/x86/include/asm/sys_ia32.h
-+++ b/arch/x86/include/asm/sys_ia32.h
-@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
- compat_sigset_t __user *, unsigned int);
- asmlinkage long sys32_alarm(unsigned int);
-
--asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
-+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
- asmlinkage long sys32_sysfs(int, u32, u32);
-
- asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
-diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
-index c4a348f..e2ad7ea 100644
---- a/arch/x86/include/asm/syscall.h
-+++ b/arch/x86/include/asm/syscall.h
-@@ -13,6 +13,7 @@
- #ifndef _ASM_X86_SYSCALL_H
- #define _ASM_X86_SYSCALL_H
-
-+#include <linux/audit.h>
- #include <linux/sched.h>
- #include <linux/err.h>
-
-@@ -86,6 +87,12 @@ static inline void syscall_set_arguments(struct task_struct *task,
- memcpy(&regs->bx + i, args, n * sizeof(args[0]));
- }
-
-+static inline int syscall_get_arch(struct task_struct *task,
-+ struct pt_regs *regs)
-+{
-+ return AUDIT_ARCH_I386;
-+}
-+
- #else /* CONFIG_X86_64 */
-
- static inline void syscall_get_arguments(struct task_struct *task,
-@@ -210,6 +217,22 @@ static inline void syscall_set_arguments(struct task_struct *task,
- }
- }
-
-+static inline int syscall_get_arch(struct task_struct *task,
-+ struct pt_regs *regs)
-+{
-+#ifdef CONFIG_IA32_EMULATION
-+ /*
-+ * TS_COMPAT is set for 32-bit syscall entries and then
-+ * remains set until we return to user mode.
-+ *
-+ * TIF_IA32 tasks should always have TS_COMPAT set at
-+ * system call time.
-+ */
-+ if (task_thread_info(task)->status & TS_COMPAT)
-+ return AUDIT_ARCH_I386;
-+#endif
-+ return AUDIT_ARCH_X86_64;
-+}
- #endif /* CONFIG_X86_32 */
-
- #endif /* _ASM_X86_SYSCALL_H */
-diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
-index d75adff..c0cc78b 100644
---- a/arch/x86/include/asm/system.h
-+++ b/arch/x86/include/asm/system.h
-@@ -125,7 +125,7 @@ do { \
- "call __switch_to\n\t" \
- "movq "__percpu_arg([current_task])",%%rsi\n\t" \
- __switch_canary \
-- "movq %P[thread_info](%%rsi),%%r8\n\t" \
-+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
- "movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
- "jnz ret_from_fork\n\t" \
-@@ -136,7 +136,7 @@ do { \
- [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
- [ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [_tif_fork] "i" (_TIF_FORK), \
-- [thread_info] "i" (offsetof(struct task_struct, stack)), \
-+ [thread_info] "m" (current_tinfo), \
- [current_task] "m" (current_task) \
- __switch_canary_iparam \
- : "memory", "cc" __EXTRA_CLOBBER)
-@@ -196,7 +196,7 @@ static inline unsigned long get_limit(unsigned long segment)
- {
- unsigned long __limit;
- asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
-- return __limit + 1;
-+ return __limit;
- }
-
- static inline void native_clts(void)
-@@ -390,13 +390,13 @@ static inline void clflush(volatile void *__p)
-
- void cpu_idle_wait(void);
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
- extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
- void default_idle(void);
- bool set_pm_idle_to_default(void);
-
--void stop_this_cpu(void *dummy);
-+void stop_this_cpu(void *dummy) __noreturn;
-
- /*
- * Force strict CPU ordering.
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index d7ef849..b1b009a 100644
---- a/arch/x86/include/asm/thread_info.h
-+++ b/arch/x86/include/asm/thread_info.h
-@@ -10,6 +10,7 @@
- #include <linux/compiler.h>
- #include <asm/page.h>
- #include <asm/types.h>
-+#include <asm/percpu.h>
-
- /*
- * low level task data that entry.S needs immediate access to
-@@ -24,7 +25,6 @@ struct exec_domain;
- #include <linux/atomic.h>
-
- struct thread_info {
-- struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
- __u32 flags; /* low level flags */
- __u32 status; /* thread synchronous flags */
-@@ -34,18 +34,12 @@ struct thread_info {
- mm_segment_t addr_limit;
- struct restart_block restart_block;
- void __user *sysenter_return;
--#ifdef CONFIG_X86_32
-- unsigned long previous_esp; /* ESP of the previous stack in
-- case of nested (IRQ) stacks
-- */
-- __u8 supervisor_stack[0];
--#endif
-+ unsigned long lowest_stack;
- int uaccess_err;
- };
-
--#define INIT_THREAD_INFO(tsk) \
-+#define INIT_THREAD_INFO \
- { \
-- .task = &tsk, \
- .exec_domain = &default_exec_domain, \
- .flags = 0, \
- .cpu = 0, \
-@@ -56,7 +50,7 @@ struct thread_info {
- }, \
- }
-
--#define init_thread_info (init_thread_union.thread_info)
-+#define init_thread_info (init_thread_union.stack)
- #define init_stack (init_thread_union.stack)
-
- #else /* !__ASSEMBLY__ */
-@@ -95,6 +89,7 @@ struct thread_info {
- #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
- #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
- #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
-+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
-
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-@@ -117,16 +112,17 @@ struct thread_info {
- #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
- #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
- #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
-+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
-
- /* work to do in syscall_trace_enter() */
- #define _TIF_WORK_SYSCALL_ENTRY \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
-- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
-+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
-
- /* work to do in syscall_trace_leave() */
- #define _TIF_WORK_SYSCALL_EXIT \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
-- _TIF_SYSCALL_TRACEPOINT)
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
-
- /* work to do on interrupt/exception return */
- #define _TIF_WORK_MASK \
-@@ -136,7 +132,8 @@ struct thread_info {
-
- /* work to do on any return to user space */
- #define _TIF_ALLWORK_MASK \
-- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
-+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
-+ _TIF_GRSEC_SETXID)
-
- /* Only used for 64 bit */
- #define _TIF_DO_NOTIFY_MASK \
-@@ -170,45 +167,40 @@ struct thread_info {
- ret; \
- })
-
--#ifdef CONFIG_X86_32
--
--#define STACK_WARN (THREAD_SIZE/8)
--/*
-- * macros/functions for gaining access to the thread information structure
-- *
-- * preempt_count needs to be 1 initially, until the scheduler is functional.
-- */
--#ifndef __ASSEMBLY__
--
--
--/* how to get the current stack pointer from C */
--register unsigned long current_stack_pointer asm("esp") __used;
--
--/* how to get the thread information struct from C */
--static inline struct thread_info *current_thread_info(void)
--{
-- return (struct thread_info *)
-- (current_stack_pointer & ~(THREAD_SIZE - 1));
--}
--
--#else /* !__ASSEMBLY__ */
--
-+#ifdef __ASSEMBLY__
- /* how to get the thread information struct from ASM */
- #define GET_THREAD_INFO(reg) \
-- movl $-THREAD_SIZE, reg; \
-- andl %esp, reg
-+ mov PER_CPU_VAR(current_tinfo), reg
-
- /* use this one if reg already contains %esp */
--#define GET_THREAD_INFO_WITH_ESP(reg) \
-- andl $-THREAD_SIZE, reg
-+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
-+#else
-+/* how to get the thread information struct from C */
-+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
-+
-+static __always_inline struct thread_info *current_thread_info(void)
-+{
-+ return percpu_read_stable(current_tinfo);
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_32
-+
-+#define STACK_WARN (THREAD_SIZE/8)
-+/*
-+ * macros/functions for gaining access to the thread information structure
-+ *
-+ * preempt_count needs to be 1 initially, until the scheduler is functional.
-+ */
-+#ifndef __ASSEMBLY__
-+
-+/* how to get the current stack pointer from C */
-+register unsigned long current_stack_pointer asm("esp") __used;
-
- #endif
-
- #else /* X86_32 */
-
--#include <asm/percpu.h>
--#define KERNEL_STACK_OFFSET (5*8)
--
- /*
- * macros/functions for gaining access to the thread information structure
- * preempt_count needs to be 1 initially, until the scheduler is functional.
-@@ -216,21 +208,8 @@ static inline struct thread_info *current_thread_info(void)
- #ifndef __ASSEMBLY__
- DECLARE_PER_CPU(unsigned long, kernel_stack);
-
--static inline struct thread_info *current_thread_info(void)
--{
-- struct thread_info *ti;
-- ti = (void *)(percpu_read_stable(kernel_stack) +
-- KERNEL_STACK_OFFSET - THREAD_SIZE);
-- return ti;
--}
--
--#else /* !__ASSEMBLY__ */
--
--/* how to get the thread information struct from ASM */
--#define GET_THREAD_INFO(reg) \
-- movq PER_CPU_VAR(kernel_stack),reg ; \
-- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
--
-+/* how to get the current stack pointer from C */
-+register unsigned long current_stack_pointer asm("rsp") __used;
- #endif
-
- #endif /* !X86_32 */
-@@ -264,5 +243,16 @@ extern void arch_task_cache_init(void);
- extern void free_thread_info(struct thread_info *ti);
- extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
- #define arch_task_cache_init arch_task_cache_init
-+
-+#define __HAVE_THREAD_FUNCTIONS
-+#define task_thread_info(task) (&(task)->tinfo)
-+#define task_stack_page(task) ((task)->stack)
-+#define setup_thread_stack(p, org) do {} while (0)
-+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
-+
-+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
-+extern struct task_struct *alloc_task_struct_node(int node);
-+extern void free_task_struct(struct task_struct *);
-+
- #endif
- #endif /* _ASM_X86_THREAD_INFO_H */
-diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 36361bf..916c53c 100644
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -7,6 +7,7 @@
- #include <linux/compiler.h>
- #include <linux/thread_info.h>
- #include <linux/string.h>
-+#include <linux/sched.h>
- #include <asm/asm.h>
- #include <asm/page.h>
-
-@@ -28,7 +29,12 @@
-
- #define get_ds() (KERNEL_DS)
- #define get_fs() (current_thread_info()->addr_limit)
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+void __set_fs(mm_segment_t x);
-+void set_fs(mm_segment_t x);
-+#else
- #define set_fs(x) (current_thread_info()->addr_limit = (x))
-+#endif
-
- #define segment_eq(a, b) ((a).seg == (b).seg)
-
-@@ -52,7 +58,7 @@
- __chk_user_ptr(addr); \
- asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
- : "=&r" (flag), "=r" (roksum) \
-- : "1" (addr), "g" ((long)(size)), \
-+ : "1" (addr), "rm" ((long)(size)), \
- "rm" (current_thread_info()->addr_limit.seg)); \
- flag; \
- })
-@@ -76,7 +82,35 @@
- * checks that the pointer is in the user space range - after calling
- * this function, memory access functions may still return -EFAULT.
- */
--#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
-+#define access_ok_noprefault(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
-+#define access_ok(type, addr, size) \
-+({ \
-+ unsigned long __size = size; \
-+ unsigned long __addr = (unsigned long)addr; \
-+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
-+ if (__ret_ao && __size) { \
-+ unsigned long __addr_ao = __addr & PAGE_MASK; \
-+ unsigned long __end_ao = __addr + __size - 1; \
-+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
-+ while (__addr_ao <= __end_ao) { \
-+ char __c_ao; \
-+ __addr_ao += PAGE_SIZE; \
-+ if (__size > PAGE_SIZE) \
-+ cond_resched(); \
-+ if (__get_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ if (type != VERIFY_WRITE) { \
-+ __addr = __addr_ao; \
-+ continue; \
-+ } \
-+ if (__put_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ __addr = __addr_ao; \
-+ } \
-+ } \
-+ } \
-+ __ret_ao; \
-+})
-
- /*
- * The exception table consists of pairs of addresses: the first is the
-@@ -126,6 +160,15 @@ extern int __get_user_bad(void);
- /* Careful: we have to cast the result to the type of the pointer
- * for sign reasons */
-
-+/*
-+ * This is a type: either (un)signed int, if the argument fits into
-+ * that type, or otherwise (un)signed long long.
-+ */
-+#define __inttype(x) \
-+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
-+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
-+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
-+
- /**
- * get_user: - Get a simple variable from user space.
- * @x: Variable to store result.
-@@ -154,7 +197,7 @@ extern int __get_user_bad(void);
- #define get_user(x, ptr) \
- ({ \
- int __ret_gu; \
-- unsigned long __val_gu; \
-+ __inttype(*(ptr)) __val_gu; \
- __chk_user_ptr(ptr); \
- might_fault(); \
- switch (sizeof(*(ptr))) { \
-@@ -182,12 +225,20 @@ extern int __get_user_bad(void);
- asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
- : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
-
--
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __copyuser_seg "gs;"
-+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
-+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
-+#else
-+#define __copyuser_seg
-+#define __COPYUSER_SET_ES
-+#define __COPYUSER_RESTORE_ES
-+#endif
-
- #ifdef CONFIG_X86_32
- #define __put_user_asm_u64(x, addr, err, errret) \
-- asm volatile("1: movl %%eax,0(%2)\n" \
-- "2: movl %%edx,4(%2)\n" \
-+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
-+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: movl %3,%0\n" \
-@@ -199,8 +250,8 @@ extern int __get_user_bad(void);
- : "A" (x), "r" (addr), "i" (errret), "0" (err))
-
- #define __put_user_asm_ex_u64(x, addr) \
-- asm volatile("1: movl %%eax,0(%1)\n" \
-- "2: movl %%edx,4(%1)\n" \
-+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
-+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
- "3:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
- _ASM_EXTABLE(2b, 3b - 2b) \
-@@ -252,7 +303,7 @@ extern void __put_user_8(void);
- __typeof__(*(ptr)) __pu_val; \
- __chk_user_ptr(ptr); \
- might_fault(); \
-- __pu_val = x; \
-+ __pu_val = (x); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __put_user_x(1, __pu_val, ptr, __ret_pu); \
-@@ -373,7 +424,7 @@ do { \
- } while (0)
-
- #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
-- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
-+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
-@@ -381,7 +432,7 @@ do { \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
-- : "=r" (err), ltype(x) \
-+ : "=r" (err), ltype (x) \
- : "m" (__m(addr)), "i" (errret), "0" (err))
-
- #define __get_user_size_ex(x, ptr, size) \
-@@ -406,7 +457,7 @@ do { \
- } while (0)
-
- #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
-- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
-+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
- "2:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
- : ltype(x) : "m" (__m(addr)))
-@@ -423,13 +474,24 @@ do { \
- int __gu_err; \
- unsigned long __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
-- (x) = (__force __typeof__(*(ptr)))__gu_val; \
-+ (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
- })
-
- /* FIXME: this hack is definitely wrong -AK */
- struct __large_struct { unsigned long buf[100]; };
--#define __m(x) (*(struct __large_struct __user *)(x))
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define ____m(x) \
-+({ \
-+ unsigned long ____x = (unsigned long)(x); \
-+ if (____x < pax_user_shadow_base) \
-+ ____x += pax_user_shadow_base; \
-+ (typeof(x))____x; \
-+})
-+#else
-+#define ____m(x) (x)
-+#endif
-+#define __m(x) (*(struct __large_struct __user *)____m(x))
-
- /*
- * Tell gcc we read from memory instead of writing: this is because
-@@ -437,7 +499,7 @@ struct __large_struct { unsigned long buf[100]; };
- * aliasing issues.
- */
- #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
-- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
-+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
-@@ -445,10 +507,10 @@ struct __large_struct { unsigned long buf[100]; };
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r"(err) \
-- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
-+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
-
- #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
-- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
-+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
- "2:\n" \
- _ASM_EXTABLE(1b, 2b - 1b) \
- : : ltype(x), "m" (__m(addr)))
-@@ -487,8 +549,12 @@ struct __large_struct { unsigned long buf[100]; };
- * On error, the variable @x is set to zero.
- */
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __get_user(x, ptr) get_user((x), (ptr))
-+#else
- #define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-+#endif
-
- /**
- * __put_user: - Write a simple value into user space, with less checking.
-@@ -510,8 +576,12 @@ struct __large_struct { unsigned long buf[100]; };
- * Returns zero on success, or -EFAULT on error.
- */
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __put_user(x, ptr) put_user((x), (ptr))
-+#else
- #define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-+#endif
-
- #define __get_user_unaligned __get_user
- #define __put_user_unaligned __put_user
-@@ -529,7 +599,7 @@ struct __large_struct { unsigned long buf[100]; };
- #define get_user_ex(x, ptr) do { \
- unsigned long __gue_val; \
- __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
-- (x) = (__force __typeof__(*(ptr)))__gue_val; \
-+ (x) = (__typeof__(*(ptr)))__gue_val; \
- } while (0)
-
- #ifdef CONFIG_X86_WP_WORKS_OK
-diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
-index 566e803..9540707 100644
---- a/arch/x86/include/asm/uaccess_32.h
-+++ b/arch/x86/include/asm/uaccess_32.h
-@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
- static __always_inline unsigned long __must_check
- __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
-+ check_object_size(from, n, true);
-+
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
-@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
- __copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- might_fault();
-+
- return __copy_to_user_inatomic(to, from, n);
- }
-
- static __always_inline unsigned long
- __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- /* Avoid zeroing the tail if the copy fails..
- * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
- * but as the zeroing behaviour is only significant when n is not
-@@ -137,6 +146,12 @@ static __always_inline unsigned long
- __copy_from_user(void *to, const void __user *from, unsigned long n)
- {
- might_fault();
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ check_object_size(to, n, false);
-+
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
-@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
- {
- might_fault();
-+
-+ if ((long)n < 0)
-+ return n;
-+
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
-@@ -181,15 +200,19 @@ static __always_inline unsigned long
- __copy_from_user_inatomic_nocache(void *to, const void __user *from,
- unsigned long n)
- {
-- return __copy_from_user_ll_nocache_nozero(to, from, n);
-+ if ((long)n < 0)
-+ return n;
-+
-+ return __copy_from_user_ll_nocache_nozero(to, from, n);
- }
-
--unsigned long __must_check copy_to_user(void __user *to,
-- const void *from, unsigned long n);
--unsigned long __must_check _copy_from_user(void *to,
-- const void __user *from,
-- unsigned long n);
--
-+extern void copy_to_user_overflow(void)
-+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-+ __compiletime_error("copy_to_user() buffer size is not provably correct")
-+#else
-+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
-+#endif
-+;
-
- extern void copy_from_user_overflow(void)
- #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
- #endif
- ;
-
--static inline unsigned long __must_check copy_from_user(void *to,
-- const void __user *from,
-- unsigned long n)
-+/**
-+ * copy_to_user: - Copy a block of data into user space.
-+ * @to: Destination address, in user space.
-+ * @from: Source address, in kernel space.
-+ * @n: Number of bytes to copy.
-+ *
-+ * Context: User context only. This function may sleep.
-+ *
-+ * Copy data from kernel space to user space.
-+ *
-+ * Returns number of bytes that could not be copied.
-+ * On success, this will be zero.
-+ */
-+static inline unsigned long __must_check
-+copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(from);
-
-- if (likely(sz == -1 || sz >= n))
-- n = _copy_from_user(to, from, n);
-- else
-+ if (unlikely(sz != (size_t)-1 && sz < n))
-+ copy_to_user_overflow();
-+ else if (access_ok(VERIFY_WRITE, to, n))
-+ n = __copy_to_user(to, from, n);
-+ return n;
-+}
-+
-+/**
-+ * copy_from_user: - Copy a block of data from user space.
-+ * @to: Destination address, in kernel space.
-+ * @from: Source address, in user space.
-+ * @n: Number of bytes to copy.
-+ *
-+ * Context: User context only. This function may sleep.
-+ *
-+ * Copy data from user space to kernel space.
-+ *
-+ * Returns number of bytes that could not be copied.
-+ * On success, this will be zero.
-+ *
-+ * If some data could not be copied, this function will pad the copied
-+ * data to the requested size using zero bytes.
-+ */
-+static inline unsigned long __must_check
-+copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ size_t sz = __compiletime_object_size(to);
-+
-+ check_object_size(to, n, false);
-+
-+ if (unlikely(sz != (size_t)-1 && sz < n))
- copy_from_user_overflow();
--
-+ else if (access_ok(VERIFY_READ, from, n))
-+ n = __copy_from_user(to, from, n);
-+ else if ((long)n > 0)
-+ memset(to, 0, n);
- return n;
- }
-
-diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
-index 1c66d30..6f1d97a 100644
---- a/arch/x86/include/asm/uaccess_64.h
-+++ b/arch/x86/include/asm/uaccess_64.h
-@@ -10,6 +10,9 @@
- #include <asm/alternative.h>
- #include <asm/cpufeature.h>
- #include <asm/page.h>
-+#include <asm/pgtable.h>
-+
-+#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
- /*
- * Copy To/From Userspace
-@@ -17,12 +20,12 @@
-
- /* Handles exceptions in both to and from, but doesn't do access_ok */
- __must_check unsigned long
--copy_user_generic_string(void *to, const void *from, unsigned len);
-+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
- __must_check unsigned long
--copy_user_generic_unrolled(void *to, const void *from, unsigned len);
-+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
-
- static __always_inline __must_check unsigned long
--copy_user_generic(void *to, const void *from, unsigned len)
-+copy_user_generic(void *to, const void *from, unsigned long len)
- {
- unsigned ret;
-
-@@ -36,138 +39,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
- return ret;
- }
-
-+static __always_inline __must_check unsigned long
-+__copy_to_user(void __user *to, const void *from, unsigned long len);
-+static __always_inline __must_check unsigned long
-+__copy_from_user(void *to, const void __user *from, unsigned long len);
- __must_check unsigned long
--_copy_to_user(void __user *to, const void *from, unsigned len);
--__must_check unsigned long
--_copy_from_user(void *to, const void __user *from, unsigned len);
--__must_check unsigned long
--copy_in_user(void __user *to, const void __user *from, unsigned len);
-+copy_in_user(void __user *to, const void __user *from, unsigned long len);
-+
-+extern void copy_to_user_overflow(void)
-+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-+ __compiletime_error("copy_to_user() buffer size is not provably correct")
-+#else
-+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
-+#endif
-+;
-+
-+extern void copy_from_user_overflow(void)
-+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-+ __compiletime_error("copy_from_user() buffer size is not provably correct")
-+#else
-+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
-+#endif
-+;
-
- static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
--
- might_fault();
-- if (likely(sz == -1 || sz >= n))
-- n = _copy_from_user(to, from, n);
--#ifdef CONFIG_DEBUG_VM
-- else
-- WARN(1, "Buffer overflow detected!\n");
--#endif
-+
-+ check_object_size(to, n, false);
-+
-+ if (access_ok(VERIFY_READ, from, n))
-+ n = __copy_from_user(to, from, n);
-+ else if (n < INT_MAX)
-+ memset(to, 0, n);
- return n;
- }
-
- static __always_inline __must_check
--int copy_to_user(void __user *dst, const void *src, unsigned size)
-+int copy_to_user(void __user *dst, const void *src, unsigned long size)
- {
- might_fault();
-
-- return _copy_to_user(dst, src, size);
-+ if (access_ok(VERIFY_WRITE, dst, size))
-+ size = __copy_to_user(dst, src, size);
-+ return size;
- }
-
- static __always_inline __must_check
--int __copy_from_user(void *dst, const void __user *src, unsigned size)
-+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
- {
-- int ret = 0;
-+ size_t sz = __compiletime_object_size(dst);
-+ unsigned ret = 0;
-
- might_fault();
-+
-+ if (size > INT_MAX)
-+ return size;
-+
-+ check_object_size(dst, size, false);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!access_ok_noprefault(VERIFY_READ, src, size))
-+ return size;
-+#endif
-+
-+ if (unlikely(sz != (size_t)-1 && sz < size)) {
-+ copy_from_user_overflow();
-+ return size;
-+ }
-+
- if (!__builtin_constant_p(size))
-- return copy_user_generic(dst, (__force void *)src, size);
-+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
- switch (size) {
-- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
-+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
- ret, "b", "b", "=q", 1);
- return ret;
-- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
-+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
- ret, "w", "w", "=r", 2);
- return ret;
-- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
-+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
- ret, "l", "k", "=r", 4);
- return ret;
-- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
-+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
- ret, "q", "", "=r", 8);
- return ret;
- case 10:
-- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
-+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
- ret, "q", "", "=r", 10);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u16 *)(8 + (char *)dst),
-- (u16 __user *)(8 + (char __user *)src),
-+ (const u16 __user *)(8 + (const char __user *)src),
- ret, "w", "w", "=r", 2);
- return ret;
- case 16:
-- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
-+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
- ret, "q", "", "=r", 16);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u64 *)(8 + (char *)dst),
-- (u64 __user *)(8 + (char __user *)src),
-+ (const u64 __user *)(8 + (const char __user *)src),
- ret, "q", "", "=r", 8);
- return ret;
- default:
-- return copy_user_generic(dst, (__force void *)src, size);
-+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
- }
- }
-
- static __always_inline __must_check
--int __copy_to_user(void __user *dst, const void *src, unsigned size)
-+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
- {
-- int ret = 0;
-+ size_t sz = __compiletime_object_size(src);
-+ unsigned ret = 0;
-
- might_fault();
-+
-+ if (size > INT_MAX)
-+ return size;
-+
-+ check_object_size(src, size, true);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
-+ return size;
-+#endif
-+
-+ if (unlikely(sz != (size_t)-1 && sz < size)) {
-+ copy_to_user_overflow();
-+ return size;
-+ }
-+
- if (!__builtin_constant_p(size))
-- return copy_user_generic((__force void *)dst, src, size);
-+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
- switch (size) {
-- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
-+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
- ret, "b", "b", "iq", 1);
- return ret;
-- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
-+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
- return ret;
-- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
-+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
- ret, "l", "k", "ir", 4);
- return ret;
-- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
-+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 8);
- return ret;
- case 10:
-- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
-+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 10);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
-- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
-+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
- return ret;
- case 16:
-- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
-+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 16);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
-- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
-+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "er", 8);
- return ret;
- default:
-- return copy_user_generic((__force void *)dst, src, size);
-+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
- }
- }
-
- static __always_inline __must_check
--int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
- {
-- int ret = 0;
-+ unsigned ret = 0;
-
- might_fault();
-+
-+ if (size > INT_MAX)
-+ return size;
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!access_ok_noprefault(VERIFY_READ, src, size))
-+ return size;
-+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
-+ return size;
-+#endif
-+
- if (!__builtin_constant_p(size))
-- return copy_user_generic((__force void *)dst,
-- (__force void *)src, size);
-+ return copy_user_generic((__force_kernel void *)____m(dst),
-+ (__force_kernel const void *)____m(src), size);
- switch (size) {
- case 1: {
- u8 tmp;
-- __get_user_asm(tmp, (u8 __user *)src,
-+ __get_user_asm(tmp, (const u8 __user *)src,
- ret, "b", "b", "=q", 1);
- if (likely(!ret))
- __put_user_asm(tmp, (u8 __user *)dst,
-@@ -176,7 +241,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
- }
- case 2: {
- u16 tmp;
-- __get_user_asm(tmp, (u16 __user *)src,
-+ __get_user_asm(tmp, (const u16 __user *)src,
- ret, "w", "w", "=r", 2);
- if (likely(!ret))
- __put_user_asm(tmp, (u16 __user *)dst,
-@@ -186,7 +251,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-
- case 4: {
- u32 tmp;
-- __get_user_asm(tmp, (u32 __user *)src,
-+ __get_user_asm(tmp, (const u32 __user *)src,
- ret, "l", "k", "=r", 4);
- if (likely(!ret))
- __put_user_asm(tmp, (u32 __user *)dst,
-@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
- }
- case 8: {
- u64 tmp;
-- __get_user_asm(tmp, (u64 __user *)src,
-+ __get_user_asm(tmp, (const u64 __user *)src,
- ret, "q", "", "=r", 8);
- if (likely(!ret))
- __put_user_asm(tmp, (u64 __user *)dst,
-@@ -203,8 +268,8 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
- return ret;
- }
- default:
-- return copy_user_generic((__force void *)dst,
-- (__force void *)src, size);
-+ return copy_user_generic((__force_kernel void *)____m(dst),
-+ (__force_kernel const void *)____m(src), size);
- }
- }
-
-@@ -218,36 +283,57 @@ __must_check long strlen_user(const char __user *str);
- __must_check unsigned long clear_user(void __user *mem, unsigned long len);
- __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
-
--static __must_check __always_inline int
--__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
-+static __must_check __always_inline unsigned long
-+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
- {
-- return copy_user_generic(dst, (__force const void *)src, size);
-+ if (size > INT_MAX)
-+ return size;
-+
-+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
- }
-
--static __must_check __always_inline int
--__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
-+static __must_check __always_inline unsigned long
-+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
- {
-- return copy_user_generic((__force void *)dst, src, size);
-+ if (size > INT_MAX)
-+ return size;
-+
-+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
- }
-
--extern long __copy_user_nocache(void *dst, const void __user *src,
-- unsigned size, int zerorest);
-+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
-+ unsigned long size, int zerorest);
-
--static inline int
--__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
-+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
- {
- might_sleep();
-+
-+ if (size > INT_MAX)
-+ return size;
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!access_ok_noprefault(VERIFY_READ, src, size))
-+ return size;
-+#endif
-+
- return __copy_user_nocache(dst, src, size, 1);
- }
-
--static inline int
--__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
-- unsigned size)
-+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
-+ unsigned long size)
- {
-+ if (size > INT_MAX)
-+ return size;
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (!access_ok_noprefault(VERIFY_READ, src, size))
-+ return size;
-+#endif
-+
- return __copy_user_nocache(dst, src, size, 0);
- }
-
--unsigned long
--copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
-+extern unsigned long
-+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
-
- #endif /* _ASM_X86_UACCESS_64_H */
-diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
-index bb05228..d763d5b 100644
---- a/arch/x86/include/asm/vdso.h
-+++ b/arch/x86/include/asm/vdso.h
-@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
- #define VDSO32_SYMBOL(base, name) \
- ({ \
- extern const char VDSO32_##name[]; \
-- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
-+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
- })
- #endif
-
-diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
-index 1971e65..1e07354 100644
---- a/arch/x86/include/asm/x86_init.h
-+++ b/arch/x86/include/asm/x86_init.h
-@@ -139,7 +139,7 @@ struct x86_init_ops {
- struct x86_init_timers timers;
- struct x86_init_iommu iommu;
- struct x86_init_pci pci;
--};
-+} __no_const;
-
- /**
- * struct x86_cpuinit_ops - platform specific cpu hotplug setups
-@@ -147,7 +147,7 @@ struct x86_init_ops {
- */
- struct x86_cpuinit_ops {
- void (*setup_percpu_clockev)(void);
--};
-+} __no_const;
-
- /**
- * struct x86_platform_ops - platform specific runtime functions
-@@ -169,7 +169,7 @@ struct x86_platform_ops {
- void (*nmi_init)(void);
- unsigned char (*get_nmi_reason)(void);
- int (*i8042_detect)(void);
--};
-+} __no_const;
-
- struct pci_dev;
-
-@@ -177,7 +177,7 @@ struct x86_msi_ops {
- int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
- void (*teardown_msi_irq)(unsigned int irq);
- void (*teardown_msi_irqs)(struct pci_dev *dev);
--};
-+} __no_const;
-
- extern struct x86_init_ops x86_init;
- extern struct x86_cpuinit_ops x86_cpuinit;
-diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
-index c34f96c..e26f052 100644
---- a/arch/x86/include/asm/xen/page.h
-+++ b/arch/x86/include/asm/xen/page.h
-@@ -54,7 +54,7 @@ extern int m2p_remove_override(struct page *page, bool clear_pte);
- extern struct page *m2p_find_override(unsigned long mfn);
- extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
-
--static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
- {
- unsigned long mfn;
-
-diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
-index c6ce245..aab6adb 100644
---- a/arch/x86/include/asm/xsave.h
-+++ b/arch/x86/include/asm/xsave.h
-@@ -65,6 +65,8 @@ static inline int xsave_user(struct xsave_struct __user *buf)
- {
- int err;
-
-+ buf = (struct xsave_struct __user *)____m(buf);
-+
- /*
- * Clear the xsave header first, so that reserved fields are
- * initialized to zero.
-@@ -74,7 +76,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
- if (unlikely(err))
- return -EFAULT;
-
-- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
-+ __asm__ __volatile__("1:"
-+ __copyuser_seg
-+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
-@@ -96,11 +100,13 @@ static inline int xsave_user(struct xsave_struct __user *buf)
- static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
- {
- int err;
-- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
-+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)____m(buf));
- u32 lmask = mask;
- u32 hmask = mask >> 32;
-
-- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
-+ __asm__ __volatile__("1:"
-+ __copyuser_seg
-+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl $-1,%[err]\n"
-diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index 479d03c..2450277 100644
---- a/arch/x86/kernel/acpi/boot.c
-+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1345,7 +1345,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
- * If your system is blacklisted here, but you find that acpi=force
- * works for you, please contact linux-acpi@vger.kernel.org
- */
--static struct dmi_system_id __initdata acpi_dmi_table[] = {
-+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
- /*
- * Boxes that need ACPI disabled
- */
-@@ -1420,7 +1420,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
- };
-
- /* second table for DMI checks that should run after early-quirks */
--static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
-+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
- /*
- * HP laptops which use a DSDT reporting as HP/SB400/10000,
- * which includes some code which overrides all temperature
-diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
-index 6a564ac..3f3a3d7 100644
---- a/arch/x86/kernel/acpi/realmode/Makefile
-+++ b/arch/x86/kernel/acpi/realmode/Makefile
-@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
- KBUILD_CFLAGS += $(call cc-option, -m32)
-+ifdef CONSTIFY_PLUGIN
-+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
- KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
- GCOV_PROFILE := n
-
-diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
-index b4fd836..4358fe3 100644
---- a/arch/x86/kernel/acpi/realmode/wakeup.S
-+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
-@@ -108,6 +108,9 @@ wakeup_code:
- /* Do any other stuff... */
-
- #ifndef CONFIG_64BIT
-+ /* Recheck NX bit overrides (64bit path does this in trampoline */
-+ call verify_cpu
-+
- /* This could also be done in C code... */
- movl pmode_cr3, %eax
- movl %eax, %cr3
-@@ -131,6 +134,7 @@ wakeup_code:
- movl pmode_cr0, %eax
- movl %eax, %cr0
- jmp pmode_return
-+# include "../../verify_cpu.S"
- #else
- pushw $0
- pushw trampoline_segment
-diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
-index 103b6ab..2004d0a 100644
---- a/arch/x86/kernel/acpi/sleep.c
-+++ b/arch/x86/kernel/acpi/sleep.c
-@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
- header->trampoline_segment = trampoline_address() >> 4;
- #ifdef CONFIG_SMP
- stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
-+
-+ pax_open_kernel();
- early_gdt_descr.address =
- (unsigned long)get_cpu_gdt_table(smp_processor_id());
-+ pax_close_kernel();
-+
- initial_gs = per_cpu_offset(smp_processor_id());
- #endif
- initial_code = (unsigned long)wakeup_long64;
-diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
-index 13ab7205..95d5442 100644
---- a/arch/x86/kernel/acpi/wakeup_32.S
-+++ b/arch/x86/kernel/acpi/wakeup_32.S
-@@ -30,13 +30,11 @@ wakeup_pmode_return:
- # and restore the stack ... but you need gdt for this to work
- movl saved_context_esp, %esp
-
-- movl %cs:saved_magic, %eax
-- cmpl $0x12345678, %eax
-+ cmpl $0x12345678, saved_magic
- jne bogus_magic
-
- # jump to place where we left off
-- movl saved_eip, %eax
-- jmp *%eax
-+ jmp *(saved_eip)
-
- bogus_magic:
- jmp bogus_magic
-diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index bda833c..a9bdd97 100644
---- a/arch/x86/kernel/alternative.c
-+++ b/arch/x86/kernel/alternative.c
-@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
- */
- for (a = start; a < end; a++) {
- instr = (u8 *)&a->instr_offset + a->instr_offset;
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
-+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+#endif
-+
- replacement = (u8 *)&a->repl_offset + a->repl_offset;
- BUG_ON(a->replacementlen > a->instrlen);
- BUG_ON(a->instrlen > sizeof(insnbuf));
-@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
- for (poff = start; poff < end; poff++) {
- u8 *ptr = (u8 *)poff + *poff;
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
-+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+#endif
-+
- if (!*poff || ptr < text || ptr >= text_end)
- continue;
- /* turn DS segment override prefix into lock prefix */
-- if (*ptr == 0x3e)
-+ if (*ktla_ktva(ptr) == 0x3e)
- text_poke(ptr, ((unsigned char []){0xf0}), 1);
- };
- mutex_unlock(&text_mutex);
-@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
- for (poff = start; poff < end; poff++) {
- u8 *ptr = (u8 *)poff + *poff;
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
-+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+#endif
-+
- if (!*poff || ptr < text || ptr >= text_end)
- continue;
- /* turn lock prefix into DS segment override prefix */
-- if (*ptr == 0xf0)
-+ if (*ktla_ktva(ptr) == 0xf0)
- text_poke(ptr, ((unsigned char []){0x3E}), 1);
- };
- mutex_unlock(&text_mutex);
-@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
-
- BUG_ON(p->len > MAX_PATCH_LEN);
- /* prep the buffer with the original instructions */
-- memcpy(insnbuf, p->instr, p->len);
-+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
- used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
- (unsigned long)p->instr, p->len);
-
-@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
- if (smp_alt_once)
- free_init_pages("SMP alternatives",
- (unsigned long)__smp_locks,
-- (unsigned long)__smp_locks_end);
-+ PAGE_ALIGN((unsigned long)__smp_locks_end));
-
- restart_nmi();
- }
-@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
- * instructions. And on the local CPU you need to be protected again NMI or MCE
- * handlers seeing an inconsistent instruction while you patch.
- */
--void *__init_or_module text_poke_early(void *addr, const void *opcode,
-+void *__kprobes text_poke_early(void *addr, const void *opcode,
- size_t len)
- {
- unsigned long flags;
- local_irq_save(flags);
-- memcpy(addr, opcode, len);
-+
-+ pax_open_kernel();
-+ memcpy(ktla_ktva(addr), opcode, len);
- sync_core();
-+ pax_close_kernel();
-+
- local_irq_restore(flags);
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
- that causes hangs on some VIA CPUs. */
-@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
- */
- void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
- {
-- unsigned long flags;
-- char *vaddr;
-+ unsigned char *vaddr = ktla_ktva(addr);
- struct page *pages[2];
-- int i;
-+ size_t i;
-
- if (!core_kernel_text((unsigned long)addr)) {
-- pages[0] = vmalloc_to_page(addr);
-- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
-+ pages[0] = vmalloc_to_page(vaddr);
-+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
- } else {
-- pages[0] = virt_to_page(addr);
-+ pages[0] = virt_to_page(vaddr);
- WARN_ON(!PageReserved(pages[0]));
-- pages[1] = virt_to_page(addr + PAGE_SIZE);
-+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
- }
- BUG_ON(!pages[0]);
-- local_irq_save(flags);
-- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
-- if (pages[1])
-- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
-- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
-- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-- clear_fixmap(FIX_TEXT_POKE0);
-- if (pages[1])
-- clear_fixmap(FIX_TEXT_POKE1);
-- local_flush_tlb();
-- sync_core();
-- /* Could also do a CLFLUSH here to speed up CPU recovery; but
-- that causes hangs on some VIA CPUs. */
-+ text_poke_early(addr, opcode, len);
- for (i = 0; i < len; i++)
-- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
-- local_irq_restore(flags);
-+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
- return addr;
- }
-
-diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index 838a3b4..71de0f5 100644
---- a/arch/x86/kernel/apic/apic.c
-+++ b/arch/x86/kernel/apic/apic.c
-@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
- /*
- * Debug level, exported for io_apic.c
- */
--unsigned int apic_verbosity;
-+int apic_verbosity;
-
- int pic_mode;
-
-@@ -1859,7 +1859,7 @@ void smp_error_interrupt(struct pt_regs *regs)
- apic_write(APIC_ESR, 0);
- v1 = apic_read(APIC_ESR);
- ack_APIC_irq();
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
-
- apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
- smp_processor_id(), v0 , v1);
-diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
-index f7a41e4..be25d88 100644
---- a/arch/x86/kernel/apic/apic_flat_64.c
-+++ b/arch/x86/kernel/apic/apic_flat_64.c
-@@ -171,7 +171,7 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
- return initial_apic_id >> index_msb;
- }
-
--static struct apic apic_flat = {
-+static struct apic apic_flat __read_only = {
- .name = "flat",
- .probe = NULL,
- .acpi_madt_oem_check = flat_acpi_madt_oem_check,
-@@ -327,7 +327,7 @@ static int physflat_probe(void)
- return 0;
- }
-
--static struct apic apic_physflat = {
-+static struct apic apic_physflat __read_only = {
-
- .name = "physical flat",
- .probe = physflat_probe,
-diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
-index 775b82b..841f78b 100644
---- a/arch/x86/kernel/apic/apic_noop.c
-+++ b/arch/x86/kernel/apic/apic_noop.c
-@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
- WARN_ON_ONCE(cpu_has_apic && !disable_apic);
- }
-
--struct apic apic_noop = {
-+struct apic apic_noop __read_only = {
- .name = "noop",
- .probe = noop_probe,
- .acpi_madt_oem_check = NULL,
-diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
-index 521bead..a724871 100644
---- a/arch/x86/kernel/apic/bigsmp_32.c
-+++ b/arch/x86/kernel/apic/bigsmp_32.c
-@@ -193,7 +193,7 @@ static int probe_bigsmp(void)
- return dmi_bigsmp;
- }
-
--static struct apic apic_bigsmp = {
-+static struct apic apic_bigsmp __read_only = {
-
- .name = "bigsmp",
- .probe = probe_bigsmp,
-diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
-index 5d513bc..6a51935 100644
---- a/arch/x86/kernel/apic/es7000_32.c
-+++ b/arch/x86/kernel/apic/es7000_32.c
-@@ -619,8 +619,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
- return ret && es7000_apic_is_cluster();
- }
-
--/* We've been warned by a false positive warning.Use __refdata to keep calm. */
--static struct apic __refdata apic_es7000_cluster = {
-+static struct apic apic_es7000_cluster __read_only = {
-
- .name = "es7000",
- .probe = probe_es7000,
-@@ -685,7 +684,7 @@ static struct apic __refdata apic_es7000_cluster = {
- .x86_32_early_logical_apicid = es7000_early_logical_apicid,
- };
-
--static struct apic __refdata apic_es7000 = {
-+static struct apic apic_es7000 __read_only = {
-
- .name = "es7000",
- .probe = probe_es7000,
-diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 6d939d7..a93a87e 100644
---- a/arch/x86/kernel/apic/io_apic.c
-+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
- }
- EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-
--void lock_vector_lock(void)
-+void lock_vector_lock(void) __acquires(vector_lock)
- {
- /* Used to the online set of cpus does not change
- * during assign_irq_vector.
-@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
- raw_spin_lock(&vector_lock);
- }
-
--void unlock_vector_lock(void)
-+void unlock_vector_lock(void) __releases(vector_lock)
- {
- raw_spin_unlock(&vector_lock);
- }
-@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
- ack_APIC_irq();
- }
-
--atomic_t irq_mis_count;
-+atomic_unchecked_t irq_mis_count;
-
- static void ack_apic_level(struct irq_data *data)
- {
-@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
- * at the cpu.
- */
- if (!(v & (1 << (i & 0x1f)))) {
-- atomic_inc(&irq_mis_count);
-+ atomic_inc_unchecked(&irq_mis_count);
-
- eoi_ioapic_irq(irq, cfg);
- }
-@@ -2634,17 +2634,20 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
-
- static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
- {
-- chip->irq_print_chip = ir_print_prefix;
-- chip->irq_ack = ir_ack_apic_edge;
-- chip->irq_eoi = ir_ack_apic_level;
-+ pax_open_kernel();
-+ *(void **)&chip->irq_print_chip = ir_print_prefix;
-+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
-+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
-
- #ifdef CONFIG_SMP
-- chip->irq_set_affinity = ir_ioapic_set_affinity;
-+ *(void **)&chip->irq_set_affinity = ir_ioapic_set_affinity;
- #endif
-+
-+ pax_close_kernel();
- }
- #endif /* CONFIG_IRQ_REMAP */
-
--static struct irq_chip ioapic_chip __read_mostly = {
-+static struct irq_chip ioapic_chip = {
- .name = "IO-APIC",
- .irq_startup = startup_ioapic_irq,
- .irq_mask = mask_ioapic_irq,
-@@ -2715,7 +2718,7 @@ static void ack_lapic_irq(struct irq_data *data)
- ack_APIC_irq();
- }
-
--static struct irq_chip lapic_chip __read_mostly = {
-+static struct irq_chip lapic_chip = {
- .name = "local-APIC",
- .irq_mask = mask_lapic_irq,
- .irq_unmask = unmask_lapic_irq,
-diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
-index c4a61ca..4c63d32 100644
---- a/arch/x86/kernel/apic/numaq_32.c
-+++ b/arch/x86/kernel/apic/numaq_32.c
-@@ -472,8 +472,7 @@ static void numaq_setup_portio_remap(void)
- (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
- }
-
--/* Use __refdata to keep false positive warning calm. */
--static struct apic __refdata apic_numaq = {
-+static struct apic apic_numaq __read_only = {
-
- .name = "NUMAQ",
- .probe = probe_numaq,
-diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
-index 0787bb3..e222a80 100644
---- a/arch/x86/kernel/apic/probe_32.c
-+++ b/arch/x86/kernel/apic/probe_32.c
-@@ -87,7 +87,7 @@ static int probe_default(void)
- return 1;
- }
-
--static struct apic apic_default = {
-+static struct apic apic_default __read_only = {
-
- .name = "default",
- .probe = probe_default,
-diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
-index 1911442..2424a83 100644
---- a/arch/x86/kernel/apic/summit_32.c
-+++ b/arch/x86/kernel/apic/summit_32.c
-@@ -491,7 +491,7 @@ void setup_summit(void)
- }
- #endif
-
--static struct apic apic_summit = {
-+static struct apic apic_summit __read_only = {
-
- .name = "summit",
- .probe = probe_summit,
-diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
-index 5007958..2eba140 100644
---- a/arch/x86/kernel/apic/x2apic_cluster.c
-+++ b/arch/x86/kernel/apic/x2apic_cluster.c
-@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __refdata x2apic_cpu_notifier = {
-+static struct notifier_block x2apic_cpu_notifier = {
- .notifier_call = update_clusterinfo,
- };
-
-@@ -208,7 +208,7 @@ static int x2apic_cluster_probe(void)
- return 0;
- }
-
--static struct apic apic_x2apic_cluster = {
-+static struct apic apic_x2apic_cluster __read_only = {
-
- .name = "cluster x2apic",
- .probe = x2apic_cluster_probe,
-diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
-index db4f704..2d4f409 100644
---- a/arch/x86/kernel/apic/x2apic_phys.c
-+++ b/arch/x86/kernel/apic/x2apic_phys.c
-@@ -121,7 +121,7 @@ static int x2apic_phys_probe(void)
- return apic == &apic_x2apic_phys;
- }
-
--static struct apic apic_x2apic_phys = {
-+static struct apic apic_x2apic_phys __read_only = {
-
- .name = "physical x2apic",
- .probe = x2apic_phys_probe,
-diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
-index 79b05b8..bc1d972 100644
---- a/arch/x86/kernel/apic/x2apic_uv_x.c
-+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -346,7 +346,7 @@ static int uv_probe(void)
- return apic == &apic_x2apic_uv_x;
- }
-
--static struct apic __refdata apic_x2apic_uv_x = {
-+static struct apic apic_x2apic_uv_x __read_only = {
-
- .name = "UV large system",
- .probe = uv_probe,
-diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
-index a46bd38..92f6c9c 100644
---- a/arch/x86/kernel/apm_32.c
-+++ b/arch/x86/kernel/apm_32.c
-@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
- * This is for buggy BIOS's that refer to (real mode) segment 0x40
- * even though they are called in protected mode.
- */
--static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
-+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
- (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
-
- static const char driver_version[] = "1.16ac"; /* no spaces */
-@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
- BUG_ON(cpu != 0);
- gdt = get_cpu_gdt_table(cpu);
- save_desc_40 = gdt[0x40 / 8];
-+
-+ pax_open_kernel();
- gdt[0x40 / 8] = bad_bios_desc;
-+ pax_close_kernel();
-
- apm_irq_save(flags);
- APM_DO_SAVE_SEGS;
-@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
- &call->esi);
- APM_DO_RESTORE_SEGS;
- apm_irq_restore(flags);
-+
-+ pax_open_kernel();
- gdt[0x40 / 8] = save_desc_40;
-+ pax_close_kernel();
-+
- put_cpu();
-
- return call->eax & 0xff;
-@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
- BUG_ON(cpu != 0);
- gdt = get_cpu_gdt_table(cpu);
- save_desc_40 = gdt[0x40 / 8];
-+
-+ pax_open_kernel();
- gdt[0x40 / 8] = bad_bios_desc;
-+ pax_close_kernel();
-
- apm_irq_save(flags);
- APM_DO_SAVE_SEGS;
-@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
- &call->eax);
- APM_DO_RESTORE_SEGS;
- apm_irq_restore(flags);
-+
-+ pax_open_kernel();
- gdt[0x40 / 8] = save_desc_40;
-+ pax_close_kernel();
-+
- put_cpu();
- return error;
- }
-@@ -2037,7 +2051,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata apm_dmi_table[] = {
-+static const struct dmi_system_id __initconst apm_dmi_table[] = {
- {
- print_if_true,
- KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
-@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
- * code to that CPU.
- */
- gdt = get_cpu_gdt_table(0);
-+
-+ pax_open_kernel();
- set_desc_base(&gdt[APM_CS >> 3],
- (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
- set_desc_base(&gdt[APM_CS_16 >> 3],
- (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
- set_desc_base(&gdt[APM_DS >> 3],
- (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
-+ pax_close_kernel();
-
- proc_create("apm", 0, NULL, &apm_file_ops);
-
-diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
-index 4f13faf..87db5d2 100644
---- a/arch/x86/kernel/asm-offsets.c
-+++ b/arch/x86/kernel/asm-offsets.c
-@@ -33,6 +33,8 @@ void common(void) {
- OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
- OFFSET(TI_preempt_count, thread_info, preempt_count);
-+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
-+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
-
- BLANK();
- OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -53,8 +55,26 @@ void common(void) {
- OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
- OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
- OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
- #endif
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
-+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
-+#ifdef CONFIG_X86_64
-+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
-+#endif
-+#endif
-+
-+#endif
-+
-+ BLANK();
-+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
-+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
-+
- #ifdef CONFIG_XEN
- BLANK();
- OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
-diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
-index e72a119..6e2955d 100644
---- a/arch/x86/kernel/asm-offsets_64.c
-+++ b/arch/x86/kernel/asm-offsets_64.c
-@@ -69,6 +69,7 @@ int main(void)
- BLANK();
- #undef ENTRY
-
-+ DEFINE(TSS_size, sizeof(struct tss_struct));
- OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- BLANK();
-
-diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
-index 25f24dc..4094a7f 100644
---- a/arch/x86/kernel/cpu/Makefile
-+++ b/arch/x86/kernel/cpu/Makefile
-@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
- CFLAGS_REMOVE_perf_event.o = -pg
- endif
-
--# Make sure load_percpu_segment has no stackprotector
--nostackp := $(call cc-option, -fno-stack-protector)
--CFLAGS_common.o := $(nostackp)
--
- obj-y := intel_cacheinfo.o scattered.o topology.o
- obj-y += proc.o capflags.o powerflags.o common.o
- obj-y += vmware.o hypervisor.o sched.o mshyperv.o
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index 60d4c33..3f51857 100644
---- a/arch/x86/kernel/cpu/amd.c
-+++ b/arch/x86/kernel/cpu/amd.c
-@@ -711,7 +711,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
- unsigned int size)
- {
- /* AMD errata T13 (order #21922) */
-- if ((c->x86 == 6)) {
-+ if (c->x86 == 6) {
- /* Duron Rev A0 */
- if (c->x86_model == 3 && c->x86_mask == 0)
- size = 64;
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 6284d6d..60561cb 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
-
- static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
-
--DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
--#ifdef CONFIG_X86_64
-- /*
-- * We need valid kernel segments for data and code in long mode too
-- * IRET will check the segment types kkeil 2000/10/28
-- * Also sysret mandates a special GDT layout
-- *
-- * TLS descriptors are currently at a different place compared to i386.
-- * Hopefully nobody expects them at a fixed place (Wine?)
-- */
-- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
-- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
-- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
-- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
-- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
-- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
--#else
-- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
-- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
-- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
-- /*
-- * Segments used for calling PnP BIOS have byte granularity.
-- * They code segments and data segments have fixed 64k limits,
-- * the transfer segment sizes are set at run time.
-- */
-- /* 32-bit code */
-- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-- /* 16-bit code */
-- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-- /* 16-bit data */
-- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
-- /* 16-bit data */
-- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
-- /* 16-bit data */
-- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
-- /*
-- * The APM segments have byte granularity and their bases
-- * are set at run time. All have 64k limits.
-- */
-- /* 32-bit code */
-- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-- /* 16-bit code */
-- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-- /* data */
-- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
--
-- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-- GDT_STACK_CANARY_INIT
--#endif
--} };
--EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
--
- static int __init x86_xsave_setup(char *s)
- {
- if (strlen(s))
-@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
- {
- struct desc_ptr gdt_descr;
-
-- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
-+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
- /* Reload the per-cpu base */
-@@ -769,6 +715,16 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
-
- setup_smep(c);
-
-+#ifdef CONFIG_X86_32
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(__supported_pte_mask & _PAGE_NX))
-+ clear_cpu_cap(c, X86_FEATURE_PSE);
-+#endif
-+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ clear_cpu_cap(c, X86_FEATURE_SEP);
-+#endif
-+#endif
-+
- get_model_name(c); /* Default name */
-
- detect_nopl(c);
-@@ -1021,6 +977,9 @@ static __init int setup_disablecpuid(char *arg)
- }
- __setup("clearcpuid=", setup_disablecpuid);
-
-+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
-+EXPORT_PER_CPU_SYMBOL(current_tinfo);
-+
- #ifdef CONFIG_X86_64
- struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-
-@@ -1036,7 +995,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
- EXPORT_PER_CPU_SYMBOL(current_task);
-
- DEFINE_PER_CPU(unsigned long, kernel_stack) =
-- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
-+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
- EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
- DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1101,7 +1060,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
- {
- memset(regs, 0, sizeof(struct pt_regs));
- regs->fs = __KERNEL_PERCPU;
-- regs->gs = __KERNEL_STACK_CANARY;
-+ savesegment(gs, regs->gs);
-
- return regs;
- }
-@@ -1156,7 +1115,7 @@ void __cpuinit cpu_init(void)
- int i;
-
- cpu = stack_smp_processor_id();
-- t = &per_cpu(init_tss, cpu);
-+ t = init_tss + cpu;
- oist = &per_cpu(orig_ist, cpu);
-
- #ifdef CONFIG_NUMA
-@@ -1182,7 +1141,7 @@ void __cpuinit cpu_init(void)
- switch_to_new_gdt(cpu);
- loadsegment(fs, 0);
-
-- load_idt((const struct desc_ptr *)&idt_descr);
-+ load_idt(&idt_descr);
-
- memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
- syscall_init();
-@@ -1191,7 +1150,6 @@ void __cpuinit cpu_init(void)
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
- barrier();
-
-- x86_configure_nx();
- if (cpu != 0)
- enable_x2apic();
-
-@@ -1245,7 +1203,7 @@ void __cpuinit cpu_init(void)
- {
- int cpu = smp_processor_id();
- struct task_struct *curr = current;
-- struct tss_struct *t = &per_cpu(init_tss, cpu);
-+ struct tss_struct *t = init_tss + cpu;
- struct thread_struct *thread = &curr->thread;
-
- if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
-diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
-index e7a64dd..6a192f6 100644
---- a/arch/x86/kernel/cpu/intel.c
-+++ b/arch/x86/kernel/cpu/intel.c
-@@ -189,7 +189,7 @@ static void __cpuinit trap_init_f00f_bug(void)
- * Update the IDT descriptor and reload the IDT so that
- * it uses the read-only mapped virtual address.
- */
-- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
- load_idt(&idt_descr);
- }
- #endif
-diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
-index 0e89635..f0a7525 100644
---- a/arch/x86/kernel/cpu/intel_cacheinfo.c
-+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
-@@ -984,6 +984,22 @@ static struct attribute *default_attrs[] = {
- };
-
- #ifdef CONFIG_AMD_NB
-+static struct attribute *default_attrs_amd_nb[] = {
-+ &type.attr,
-+ &level.attr,
-+ &coherency_line_size.attr,
-+ &physical_line_partition.attr,
-+ &ways_of_associativity.attr,
-+ &number_of_sets.attr,
-+ &size.attr,
-+ &shared_cpu_map.attr,
-+ &shared_cpu_list.attr,
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL
-+};
-+
- static struct attribute ** __cpuinit amd_l3_attrs(void)
- {
- static struct attribute **attrs;
-@@ -994,18 +1010,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
-
- n = sizeof (default_attrs) / sizeof (struct attribute *);
-
-- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-- n += 2;
--
-- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-- n += 1;
--
-- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
-- if (attrs == NULL)
-- return attrs = default_attrs;
--
-- for (n = 0; default_attrs[n]; n++)
-- attrs[n] = default_attrs[n];
-+ attrs = default_attrs_amd_nb;
-
- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
- attrs[n++] = &cache_disable_0.attr;
-@@ -1056,6 +1061,13 @@ static struct kobj_type ktype_cache = {
- .default_attrs = default_attrs,
- };
-
-+#ifdef CONFIG_AMD_NB
-+static struct kobj_type ktype_cache_amd_nb = {
-+ .sysfs_ops = &sysfs_ops,
-+ .default_attrs = default_attrs_amd_nb,
-+};
-+#endif
-+
- static struct kobj_type ktype_percpu_entry = {
- .sysfs_ops = &sysfs_ops,
- };
-@@ -1121,20 +1133,26 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
- return retval;
- }
-
-+#ifdef CONFIG_AMD_NB
-+ amd_l3_attrs();
-+#endif
-+
- for (i = 0; i < num_cache_leaves; i++) {
-+ struct kobj_type *ktype;
-+
- this_object = INDEX_KOBJECT_PTR(cpu, i);
- this_object->cpu = cpu;
- this_object->index = i;
-
- this_leaf = CPUID4_INFO_IDX(cpu, i);
-
-- ktype_cache.default_attrs = default_attrs;
-+ ktype = &ktype_cache;
- #ifdef CONFIG_AMD_NB
- if (this_leaf->base.nb)
-- ktype_cache.default_attrs = amd_l3_attrs();
-+ ktype = &ktype_cache_amd_nb;
- #endif
- retval = kobject_init_and_add(&(this_object->kobj),
-- &ktype_cache,
-+ ktype,
- per_cpu(ici_cache_kobject, cpu),
- "index%1lu", i);
- if (unlikely(retval)) {
-@@ -1189,7 +1207,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
-+static struct notifier_block cacheinfo_cpu_notifier = {
- .notifier_call = cacheinfo_cpu_callback,
- };
-
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 3b67877..77e760c 100644
---- a/arch/x86/kernel/cpu/mcheck/mce.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -42,6 +42,7 @@
- #include <asm/processor.h>
- #include <asm/mce.h>
- #include <asm/msr.h>
-+#include <asm/local.h>
-
- #include "mce-internal.h"
-
-@@ -200,7 +201,7 @@ static void print_mce(struct mce *m)
- !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
- m->cs, m->ip);
-
-- if (m->cs == __KERNEL_CS)
-+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
- print_symbol("{%s}", m->ip);
- pr_cont("\n");
- }
-@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
-
- #define PANIC_TIMEOUT 5 /* 5 seconds */
-
--static atomic_t mce_paniced;
-+static atomic_unchecked_t mce_paniced;
-
- static int fake_panic;
--static atomic_t mce_fake_paniced;
-+static atomic_unchecked_t mce_fake_paniced;
-
- /* Panic in progress. Enable interrupts and wait for final IPI */
- static void wait_for_panic(void)
-@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
- /*
- * Make sure only one CPU runs in machine check panic
- */
-- if (atomic_inc_return(&mce_paniced) > 1)
-+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
- wait_for_panic();
- barrier();
-
-@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
- console_verbose();
- } else {
- /* Don't log too much for fake panic */
-- if (atomic_inc_return(&mce_fake_paniced) > 1)
-+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
- return;
- }
- /* First print corrected ones that are still unlogged */
-@@ -307,7 +308,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
- if (!fake_panic) {
- if (panic_timeout == 0)
- panic_timeout = mce_panic_timeout;
-- panic(msg);
-+ panic("%s", msg);
- } else
- pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
- }
-@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
- * might have been modified by someone else.
- */
- rmb();
-- if (atomic_read(&mce_paniced))
-+ if (atomic_read_unchecked(&mce_paniced))
- wait_for_panic();
- if (!monarch_timeout)
- goto out;
-@@ -1404,7 +1405,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
- }
-
- /* Call the installed machine check handler for this CPU setup. */
--void (*machine_check_vector)(struct pt_regs *, long error_code) =
-+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
- unexpected_machine_check;
-
- /*
-@@ -1427,7 +1428,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
- return;
- }
-
-+ pax_open_kernel();
- machine_check_vector = do_machine_check;
-+ pax_close_kernel();
-
- __mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(c);
-@@ -1441,7 +1444,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
- */
-
- static DEFINE_SPINLOCK(mce_chrdev_state_lock);
--static int mce_chrdev_open_count; /* #times opened */
-+static local_t mce_chrdev_open_count; /* #times opened */
- static int mce_chrdev_open_exclu; /* already open exclusive? */
-
- static int mce_chrdev_open(struct inode *inode, struct file *file)
-@@ -1449,7 +1452,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
- spin_lock(&mce_chrdev_state_lock);
-
- if (mce_chrdev_open_exclu ||
-- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
-+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
- spin_unlock(&mce_chrdev_state_lock);
-
- return -EBUSY;
-@@ -1457,7 +1460,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
-
- if (file->f_flags & O_EXCL)
- mce_chrdev_open_exclu = 1;
-- mce_chrdev_open_count++;
-+ local_inc(&mce_chrdev_open_count);
-
- spin_unlock(&mce_chrdev_state_lock);
-
-@@ -1468,7 +1471,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
- {
- spin_lock(&mce_chrdev_state_lock);
-
-- mce_chrdev_open_count--;
-+ local_dec(&mce_chrdev_open_count);
- mce_chrdev_open_exclu = 0;
-
- spin_unlock(&mce_chrdev_state_lock);
-@@ -2099,7 +2102,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block mce_cpu_notifier __cpuinitdata = {
-+static struct notifier_block mce_cpu_notifier = {
- .notifier_call = mce_cpu_callback,
- };
-
-@@ -2109,7 +2112,7 @@ static __init void mce_init_banks(void)
-
- for (i = 0; i < banks; i++) {
- struct mce_bank *b = &mce_banks[i];
-- struct sysdev_attribute *a = &b->attr;
-+ sysdev_attribute_no_const *a = &b->attr;
-
- sysfs_attr_init(&a->attr);
- a->attr.name = b->attrname;
-@@ -2177,7 +2180,7 @@ struct dentry *mce_get_debugfs_dir(void)
- static void mce_reset(void)
- {
- cpu_missing = 0;
-- atomic_set(&mce_fake_paniced, 0);
-+ atomic_set_unchecked(&mce_fake_paniced, 0);
- atomic_set(&mce_executing, 0);
- atomic_set(&mce_callin, 0);
- atomic_set(&global_nwo, 0);
-diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
-index 5c0e653..0882b0a 100644
---- a/arch/x86/kernel/cpu/mcheck/p5.c
-+++ b/arch/x86/kernel/cpu/mcheck/p5.c
-@@ -12,6 +12,7 @@
- #include <asm/system.h>
- #include <asm/mce.h>
- #include <asm/msr.h>
-+#include <asm/pgtable.h>
-
- /* By default disabled */
- int mce_p5_enabled __read_mostly;
-@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
- if (!cpu_has(c, X86_FEATURE_MCE))
- return;
-
-+ pax_open_kernel();
- machine_check_vector = pentium_machine_check;
-+ pax_close_kernel();
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
-diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
-index ce04b58..b84acbd 100644
---- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
-+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
-@@ -290,7 +290,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
-+static struct notifier_block thermal_throttle_cpu_notifier =
- {
- .notifier_call = thermal_throttle_cpu_callback,
- };
-diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
-index 54060f5..c1a7577 100644
---- a/arch/x86/kernel/cpu/mcheck/winchip.c
-+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
-@@ -11,6 +11,7 @@
- #include <asm/system.h>
- #include <asm/mce.h>
- #include <asm/msr.h>
-+#include <asm/pgtable.h>
-
- /* Machine check handler for WinChip C6: */
- static void winchip_machine_check(struct pt_regs *regs, long error_code)
-@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
- {
- u32 lo, hi;
-
-+ pax_open_kernel();
- machine_check_vector = winchip_machine_check;
-+ pax_close_kernel();
- /* Make sure the vector pointer is visible before we enable MCEs: */
- wmb();
-
-diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 6b96110..0da73eb 100644
---- a/arch/x86/kernel/cpu/mtrr/main.c
-+++ b/arch/x86/kernel/cpu/mtrr/main.c
-@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
- u64 size_or_mask, size_and_mask;
- static bool mtrr_aps_delayed_init;
-
--static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
-
- const struct mtrr_ops *mtrr_if;
-
-diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
-index df5e41f..816c719 100644
---- a/arch/x86/kernel/cpu/mtrr/mtrr.h
-+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
-@@ -25,7 +25,7 @@ struct mtrr_ops {
- int (*validate_add_page)(unsigned long base, unsigned long size,
- unsigned int type);
- int (*have_wrcomb)(void);
--};
-+} __do_const;
-
- extern int generic_get_free_region(unsigned long base, unsigned long size,
- int replace_reg);
-diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 1c041e0..ec625d2 100644
---- a/arch/x86/kernel/cpu/perf_event.c
-+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -1532,7 +1532,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
- break;
-
- perf_callchain_store(entry, frame.return_address);
-- fp = frame.next_frame;
-+ fp = (const void __force_user *)frame.next_frame;
- }
- }
-
-diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
-index 212a6a4..322f5d9 100644
---- a/arch/x86/kernel/cpuid.c
-+++ b/arch/x86/kernel/cpuid.c
-@@ -172,7 +172,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __refdata cpuid_class_cpu_notifier =
-+static struct notifier_block cpuid_class_cpu_notifier =
- {
- .notifier_call = cpuid_class_cpu_callback,
- };
-diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
-index 69e231b..8b4e1c6 100644
---- a/arch/x86/kernel/crash.c
-+++ b/arch/x86/kernel/crash.c
-@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
- {
- #ifdef CONFIG_X86_32
- struct pt_regs fixed_regs;
--#endif
-
--#ifdef CONFIG_X86_32
-- if (!user_mode_vm(regs)) {
-+ if (!user_mode(regs)) {
- crash_fixup_ss_esp(&fixed_regs, regs);
- regs = &fixed_regs;
- }
-diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
-index afa64ad..dce67dd 100644
---- a/arch/x86/kernel/crash_dump_64.c
-+++ b/arch/x86/kernel/crash_dump_64.c
-@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- return -ENOMEM;
-
- if (userbuf) {
-- if (copy_to_user(buf, vaddr + offset, csize)) {
-+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
-diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
-index 37250fe..bf2ec74 100644
---- a/arch/x86/kernel/doublefault_32.c
-+++ b/arch/x86/kernel/doublefault_32.c
-@@ -11,7 +11,7 @@
-
- #define DOUBLEFAULT_STACKSIZE (1024)
- static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
--#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
-+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
-
- #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
-
-@@ -21,7 +21,7 @@ static void doublefault_fn(void)
- unsigned long gdt, tss;
-
- store_gdt(&gdt_desc);
-- gdt = gdt_desc.address;
-+ gdt = (unsigned long)gdt_desc.address;
-
- printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
-
-@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
- /* 0x2 bit is always set */
- .flags = X86_EFLAGS_SF | 0x2,
- .sp = STACK_START,
-- .es = __USER_DS,
-+ .es = __KERNEL_DS,
- .cs = __KERNEL_CS,
- .ss = __KERNEL_DS,
-- .ds = __USER_DS,
-+ .ds = __KERNEL_DS,
- .fs = __KERNEL_PERCPU,
-
- .__cr3 = __pa_nodebug(swapper_pg_dir),
-diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
-index 1aae78f..138ca1b 100644
---- a/arch/x86/kernel/dumpstack.c
-+++ b/arch/x86/kernel/dumpstack.c
-@@ -2,6 +2,9 @@
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
- */
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+#define __INCLUDED_BY_HIDESYM 1
-+#endif
- #include <linux/kallsyms.h>
- #include <linux/kprobes.h>
- #include <linux/uaccess.h>
-@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
- static void
- print_ftrace_graph_addr(unsigned long addr, void *data,
- const struct stacktrace_ops *ops,
-- struct thread_info *tinfo, int *graph)
-+ struct task_struct *task, int *graph)
- {
-- struct task_struct *task = tinfo->task;
- unsigned long ret_addr;
- int index = task->curr_ret_stack;
-
-@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
- static inline void
- print_ftrace_graph_addr(unsigned long addr, void *data,
- const struct stacktrace_ops *ops,
-- struct thread_info *tinfo, int *graph)
-+ struct task_struct *task, int *graph)
- { }
- #endif
-
-@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
- * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
- */
-
--static inline int valid_stack_ptr(struct thread_info *tinfo,
-- void *p, unsigned int size, void *end)
-+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
- {
-- void *t = tinfo;
- if (end) {
- if (p < end && p >= (end-THREAD_SIZE))
- return 1;
-@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
- }
-
- unsigned long
--print_context_stack(struct thread_info *tinfo,
-+print_context_stack(struct task_struct *task, void *stack_start,
- unsigned long *stack, unsigned long bp,
- const struct stacktrace_ops *ops, void *data,
- unsigned long *end, int *graph)
- {
- struct stack_frame *frame = (struct stack_frame *)bp;
-
-- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
-+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
- unsigned long addr;
-
- addr = *stack;
-@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
- } else {
- ops->address(data, addr, 0);
- }
-- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
-+ print_ftrace_graph_addr(addr, data, ops, task, graph);
- }
- stack++;
- }
-@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
- EXPORT_SYMBOL_GPL(print_context_stack);
-
- unsigned long
--print_context_stack_bp(struct thread_info *tinfo,
-+print_context_stack_bp(struct task_struct *task, void *stack_start,
- unsigned long *stack, unsigned long bp,
- const struct stacktrace_ops *ops, void *data,
- unsigned long *end, int *graph)
-@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
- struct stack_frame *frame = (struct stack_frame *)bp;
- unsigned long *ret_addr = &frame->return_address;
-
-- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
-+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
- unsigned long addr = *ret_addr;
-
- if (!__kernel_text_address(addr))
-@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
- ops->address(data, addr, 1);
- frame = frame->next_frame;
- ret_addr = &frame->return_address;
-- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
-+ print_ftrace_graph_addr(addr, data, ops, task, graph);
- }
-
- return (unsigned long)frame;
-@@ -147,7 +147,7 @@ static int print_trace_stack(void *data, char *name)
- static void print_trace_address(void *data, unsigned long addr, int reliable)
- {
- touch_nmi_watchdog();
-- printk(data);
-+ printk("%s", (char *)data);
- printk_address(addr, reliable);
- }
-
-@@ -186,7 +186,7 @@ void dump_stack(void)
-
- bp = stack_frame(current, NULL);
- printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-- current->pid, current->comm, print_tainted(),
-+ task_pid_nr(current), current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
-@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
- }
- EXPORT_SYMBOL_GPL(oops_begin);
-
-+extern void gr_handle_kernel_exploit(void);
-+
- void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
- {
- if (regs && kexec_should_crash(current))
-@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception");
-- do_exit(signr);
-+
-+ gr_handle_kernel_exploit();
-+
-+ do_group_exit(signr);
- }
-
- int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
-
- show_registers(regs);
- #ifdef CONFIG_X86_32
-- if (user_mode_vm(regs)) {
-+ if (user_mode(regs)) {
- sp = regs->sp;
- ss = regs->ss & 0xffff;
- } else {
-@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
- unsigned long flags = oops_begin();
- int sig = SIGSEGV;
-
-- if (!user_mode_vm(regs))
-+ if (!user_mode(regs))
- report_bug(regs->ip, regs);
-
- if (__die(str, regs, err))
-diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
-index c99f9ed..76cf602 100644
---- a/arch/x86/kernel/dumpstack_32.c
-+++ b/arch/x86/kernel/dumpstack_32.c
-@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- bp = stack_frame(task, regs);
-
- for (;;) {
-- struct thread_info *context;
-+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
-
-- context = (struct thread_info *)
-- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
-+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
-
-- stack = (unsigned long *)context->previous_esp;
-- if (!stack)
-+ if (stack_start == task_stack_page(task))
- break;
-+ stack = *(unsigned long **)stack_start;
- if (ops->stack(data, "IRQ") < 0)
- break;
- touch_nmi_watchdog();
-@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
- * When in-kernel, we also print out the stack and code at the
- * time of the fault..
- */
-- if (!user_mode_vm(regs)) {
-+ if (!user_mode(regs)) {
- unsigned int code_prologue = code_bytes * 43 / 64;
- unsigned int code_len = code_bytes;
- unsigned char c;
- u8 *ip;
-+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
-
- printk(KERN_EMERG "Stack:\n");
- show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
-
- printk(KERN_EMERG "Code: ");
-
-- ip = (u8 *)regs->ip - code_prologue;
-+ ip = (u8 *)regs->ip - code_prologue + cs_base;
- if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
- /* try starting at IP */
-- ip = (u8 *)regs->ip;
-+ ip = (u8 *)regs->ip + cs_base;
- code_len = code_len - code_prologue + 1;
- }
- for (i = 0; i < code_len; i++, ip++) {
-@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
- printk(KERN_CONT " Bad EIP value.");
- break;
- }
-- if (ip == (u8 *)regs->ip)
-+ if (ip == (u8 *)regs->ip + cs_base)
- printk(KERN_CONT "<%02x> ", c);
- else
- printk(KERN_CONT "%02x ", c);
-@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
- {
- unsigned short ud2;
-
-+ ip = ktla_ktva(ip);
- if (ip < PAGE_OFFSET)
- return 0;
- if (probe_kernel_address((unsigned short *)ip, ud2))
-@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
-
- return ud2 == 0x0b0f;
- }
-+
-+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
-+void pax_check_alloca(unsigned long size)
-+{
-+ unsigned long sp = (unsigned long)&sp, stack_left;
-+
-+ /* all kernel stacks are of the same size */
-+ stack_left = sp & (THREAD_SIZE - 1);
-+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
-+}
-+EXPORT_SYMBOL(pax_check_alloca);
-+#endif
-diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
-index 5e890cc..66ec71b 100644
---- a/arch/x86/kernel/dumpstack_64.c
-+++ b/arch/x86/kernel/dumpstack_64.c
-@@ -118,9 +118,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- unsigned long *irq_stack_end =
- (unsigned long *)per_cpu(irq_stack_ptr, cpu);
- unsigned used = 0;
-- struct thread_info *tinfo;
- int graph = 0;
- unsigned long dummy;
-+ void *stack_start;
-
- if (!task)
- task = current;
-@@ -141,10 +141,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- * current stack address. If the stacks consist of nested
- * exceptions
- */
-- tinfo = task_thread_info(task);
- for (;;) {
- char *id;
- unsigned long *estack_end;
-+
- estack_end = in_exception_stack(cpu, (unsigned long)stack,
- &used, &id);
-
-@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- if (ops->stack(data, id) < 0)
- break;
-
-- bp = ops->walk_stack(tinfo, stack, bp, ops,
-+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
- data, estack_end, &graph);
- ops->stack(data, "<EOE>");
- /*
-@@ -160,6 +160,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- * second-to-last pointer (index -2 to end) in the
- * exception stack:
- */
-+ if ((u16)estack_end[-1] != __KERNEL_DS)
-+ goto out;
- stack = (unsigned long *) estack_end[-2];
- continue;
- }
-@@ -171,7 +173,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
- if (ops->stack(data, "IRQ") < 0)
- break;
-- bp = ops->walk_stack(tinfo, stack, bp,
-+ bp = ops->walk_stack(task, irq_stack, stack, bp,
- ops, data, irq_stack_end, &graph);
- /*
- * We link to the next stack (which would be
-@@ -190,7 +192,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
- /*
- * This handles the process stack:
- */
-- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
-+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
-+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
-+out:
- put_cpu();
- }
- EXPORT_SYMBOL(dump_trace);
-@@ -248,7 +252,7 @@ void show_registers(struct pt_regs *regs)
- {
- int i;
- unsigned long sp;
-- const int cpu = smp_processor_id();
-+ const int cpu = raw_smp_processor_id();
- struct task_struct *cur = current;
-
- sp = regs->sp;
-@@ -299,8 +303,55 @@ int is_valid_bugaddr(unsigned long ip)
- {
- unsigned short ud2;
-
-- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
-+ if (probe_kernel_address((unsigned short *)ip, ud2))
- return 0;
-
- return ud2 == 0x0b0f;
- }
-+
-+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
-+void pax_check_alloca(unsigned long size)
-+{
-+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
-+ unsigned cpu, used;
-+ char *id;
-+
-+ /* check the process stack first */
-+ stack_start = (unsigned long)task_stack_page(current);
-+ stack_end = stack_start + THREAD_SIZE;
-+ if (likely(stack_start <= sp && sp < stack_end)) {
-+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
-+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
-+ return;
-+ }
-+
-+ cpu = get_cpu();
-+
-+ /* check the irq stacks */
-+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
-+ stack_start = stack_end - IRQ_STACK_SIZE;
-+ if (stack_start <= sp && sp < stack_end) {
-+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
-+ put_cpu();
-+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
-+ return;
-+ }
-+
-+ /* check the exception stacks */
-+ used = 0;
-+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
-+ stack_start = stack_end - EXCEPTION_STKSZ;
-+ if (stack_end && stack_start <= sp && sp < stack_end) {
-+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
-+ put_cpu();
-+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
-+ return;
-+ }
-+
-+ put_cpu();
-+
-+ /* unknown stack */
-+ BUG();
-+}
-+EXPORT_SYMBOL(pax_check_alloca);
-+#endif
-diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
-index 303a0e4..0aad351 100644
---- a/arch/x86/kernel/e820.c
-+++ b/arch/x86/kernel/e820.c
-@@ -829,8 +829,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
-
- static void early_panic(char *msg)
- {
-- early_printk(msg);
-- panic(msg);
-+ early_printk("%s", msg);
-+ panic("%s", msg);
- }
-
- static int userdef __initdata;
-diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
-index cd28a35..c72ed9a 100644
---- a/arch/x86/kernel/early_printk.c
-+++ b/arch/x86/kernel/early_printk.c
-@@ -7,6 +7,7 @@
- #include <linux/pci_regs.h>
- #include <linux/pci_ids.h>
- #include <linux/errno.h>
-+#include <linux/sched.h>
- #include <asm/io.h>
- #include <asm/processor.h>
- #include <asm/fcntl.h>
-diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index 0fa4f89..dbbfa58 100644
---- a/arch/x86/kernel/entry_32.S
-+++ b/arch/x86/kernel/entry_32.S
-@@ -180,13 +180,154 @@
- /*CFI_REL_OFFSET gs, PT_GS*/
- .endm
- .macro SET_KERNEL_GS reg
-+
-+#ifdef CONFIG_CC_STACKPROTECTOR
- movl $(__KERNEL_STACK_CANARY), \reg
-+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
-+ movl $(__USER_DS), \reg
-+#else
-+ xorl \reg, \reg
-+#endif
-+
- movl \reg, %gs
- .endm
-
- #endif /* CONFIG_X86_32_LAZY_GS */
-
--.macro SAVE_ALL
-+.macro pax_enter_kernel
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_enter_kernel
-+#endif
-+.endm
-+
-+.macro pax_exit_kernel
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_exit_kernel
-+#endif
-+.endm
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ENTRY(pax_enter_kernel)
-+#ifdef CONFIG_PARAVIRT
-+ pushl %eax
-+ pushl %ecx
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
-+ mov %eax, %esi
-+#else
-+ mov %cr0, %esi
-+#endif
-+ bts $16, %esi
-+ jnc 1f
-+ mov %cs, %esi
-+ cmp $__KERNEL_CS, %esi
-+ jz 3f
-+ ljmp $__KERNEL_CS, $3f
-+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
-+2:
-+#ifdef CONFIG_PARAVIRT
-+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
-+#else
-+ mov %esi, %cr0
-+#endif
-+3:
-+#ifdef CONFIG_PARAVIRT
-+ popl %ecx
-+ popl %eax
-+#endif
-+ ret
-+ENDPROC(pax_enter_kernel)
-+
-+ENTRY(pax_exit_kernel)
-+#ifdef CONFIG_PARAVIRT
-+ pushl %eax
-+ pushl %ecx
-+#endif
-+ mov %cs, %esi
-+ cmp $__KERNEXEC_KERNEL_CS, %esi
-+ jnz 2f
-+#ifdef CONFIG_PARAVIRT
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
-+ mov %eax, %esi
-+#else
-+ mov %cr0, %esi
-+#endif
-+ btr $16, %esi
-+ ljmp $__KERNEL_CS, $1f
-+1:
-+#ifdef CONFIG_PARAVIRT
-+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
-+#else
-+ mov %esi, %cr0
-+#endif
-+2:
-+#ifdef CONFIG_PARAVIRT
-+ popl %ecx
-+ popl %eax
-+#endif
-+ ret
-+ENDPROC(pax_exit_kernel)
-+#endif
-+
-+.macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+.endm
-+
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+/*
-+ * ebp: thread_info
-+ */
-+ENTRY(pax_erase_kstack)
-+ pushl %edi
-+ pushl %ecx
-+ pushl %eax
-+
-+ mov TI_lowest_stack(%ebp), %edi
-+ mov $-0xBEEF, %eax
-+ std
-+
-+1: mov %edi, %ecx
-+ and $THREAD_SIZE_asm - 1, %ecx
-+ shr $2, %ecx
-+ repne scasl
-+ jecxz 2f
-+
-+ cmp $2*16, %ecx
-+ jc 2f
-+
-+ mov $2*16, %ecx
-+ repe scasl
-+ jecxz 2f
-+ jne 1b
-+
-+2: cld
-+ or $2*4, %edi
-+ mov %esp, %ecx
-+ sub %edi, %ecx
-+
-+ cmp $THREAD_SIZE_asm, %ecx
-+ jb 3f
-+ ud2
-+3:
-+
-+ shr $2, %ecx
-+ rep stosl
-+
-+ mov TI_task_thread_sp0(%ebp), %edi
-+ sub $128, %edi
-+ mov %edi, TI_lowest_stack(%ebp)
-+
-+ popl %eax
-+ popl %ecx
-+ popl %edi
-+ ret
-+ENDPROC(pax_erase_kstack)
-+#endif
-+
-+.macro __SAVE_ALL _DS
- cld
- PUSH_GS
- pushl_cfi %fs
-@@ -209,7 +350,7 @@
- CFI_REL_OFFSET ecx, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
-- movl $(__USER_DS), %edx
-+ movl $\_DS, %edx
- movl %edx, %ds
- movl %edx, %es
- movl $(__KERNEL_PERCPU), %edx
-@@ -217,6 +358,15 @@
- SET_KERNEL_GS %edx
- .endm
-
-+.macro SAVE_ALL
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ __SAVE_ALL __KERNEL_DS
-+ pax_enter_kernel
-+#else
-+ __SAVE_ALL __USER_DS
-+#endif
-+.endm
-+
- .macro RESTORE_INT_REGS
- popl_cfi %ebx
- CFI_RESTORE ebx
-@@ -302,7 +452,7 @@ ENTRY(ret_from_fork)
- popfl_cfi
- jmp syscall_exit
- CFI_ENDPROC
--END(ret_from_fork)
-+ENDPROC(ret_from_fork)
-
- /*
- * Interrupt exit functions should be protected against kprobes
-@@ -336,7 +486,15 @@ resume_userspace_sig:
- andl $SEGMENT_RPL_MASK, %eax
- #endif
- cmpl $USER_RPL, %eax
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ jae resume_userspace
-+
-+ pax_exit_kernel
-+ jmp resume_kernel
-+#else
- jb resume_kernel # not returning to v8086 or userspace
-+#endif
-
- ENTRY(resume_userspace)
- LOCKDEP_SYS_EXIT
-@@ -348,8 +506,8 @@ ENTRY(resume_userspace)
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
- # int/exception return?
- jne work_pending
-- jmp restore_all
--END(ret_from_exception)
-+ jmp restore_all_pax
-+ENDPROC(ret_from_exception)
-
- #ifdef CONFIG_PREEMPT
- ENTRY(resume_kernel)
-@@ -364,7 +522,7 @@ need_resched:
- jz restore_all
- call preempt_schedule_irq
- jmp need_resched
--END(resume_kernel)
-+ENDPROC(resume_kernel)
- #endif
- CFI_ENDPROC
- /*
-@@ -398,23 +556,34 @@ sysenter_past_esp:
- /*CFI_REL_OFFSET cs, 0*/
- /*
- * Push current_thread_info()->sysenter_return to the stack.
-- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
-- * pushed above; +8 corresponds to copy_thread's esp0 setting.
- */
-- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
-+ pushl_cfi $0
- CFI_REL_OFFSET eip, 0
-
- pushl_cfi %eax
- SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ movl TI_sysenter_return(%ebp),%ebp
-+ movl %ebp,PT_EIP(%esp)
- ENABLE_INTERRUPTS(CLBR_NONE)
-
- /*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
-+ movl PT_OLDESP(%esp),%ebp
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov PT_OLDSS(%esp),%ds
-+1: movl %ds:(%ebp),%ebp
-+ push %ss
-+ pop %ds
-+#else
- cmpl $__PAGE_OFFSET-3,%ebp
- jae syscall_fault
- 1: movl (%ebp),%ebp
-+#endif
-+
- movl %ebp,PT_EBP(%esp)
- .section __ex_table,"a"
- .align 4
-@@ -423,6 +592,10 @@ sysenter_past_esp:
-
- GET_THREAD_INFO(%ebp)
-
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
- jnz sysenter_audit
- sysenter_do_call:
-@@ -438,12 +611,24 @@ sysenter_after_call:
- testl $_TIF_ALLWORK_MASK, %ecx
- jne sysexit_audit
- sysenter_exit:
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushl_cfi %eax
-+ movl %esp, %eax
-+ call pax_randomize_kstack
-+ popl_cfi %eax
-+#endif
-+
-+ pax_erase_kstack
-+
- /* if something modifies registers it must also disable sysexit */
- movl PT_EIP(%esp), %edx
- movl PT_OLDESP(%esp), %ecx
- xorl %ebp,%ebp
- TRACE_IRQS_ON
- 1: mov PT_FS(%esp), %fs
-+2: mov PT_DS(%esp), %ds
-+3: mov PT_ES(%esp), %es
- PTGS_TO_GS
- ENABLE_INTERRUPTS_SYSEXIT
-
-@@ -460,6 +645,9 @@ sysenter_audit:
- movl %eax,%edx /* 2nd arg: syscall number */
- movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
- call audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- pushl_cfi %ebx
- movl PT_EAX(%esp),%eax /* reload syscall number */
- jmp sysenter_do_call
-@@ -486,11 +674,17 @@ sysexit_audit:
-
- CFI_ENDPROC
- .pushsection .fixup,"ax"
--2: movl $0,PT_FS(%esp)
-+4: movl $0,PT_FS(%esp)
-+ jmp 1b
-+5: movl $0,PT_DS(%esp)
-+ jmp 1b
-+6: movl $0,PT_ES(%esp)
- jmp 1b
- .section __ex_table,"a"
- .align 4
-- .long 1b,2b
-+ .long 1b,4b
-+ .long 2b,5b
-+ .long 3b,6b
- .popsection
- PTGS_TO_GS_EX
- ENDPROC(ia32_sysenter_target)
-@@ -505,6 +699,11 @@ ENTRY(system_call)
- pushl_cfi %eax # save orig_eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- # system call tracing in operation / emulation
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
- jnz syscall_trace_entry
-@@ -524,6 +723,15 @@ syscall_exit:
- testl $_TIF_ALLWORK_MASK, %ecx # current->work
- jne syscall_exit_work
-
-+restore_all_pax:
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ movl %esp, %eax
-+ call pax_randomize_kstack
-+#endif
-+
-+ pax_erase_kstack
-+
- restore_all:
- TRACE_IRQS_IRET
- restore_all_notrace:
-@@ -581,14 +789,34 @@ ldt_ss:
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
--#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
-+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
- mov %esp, %edx /* load kernel esp */
- mov PT_OLDESP(%esp), %eax /* load userspace esp */
- mov %dx, %ax /* eax: new kernel esp */
- sub %eax, %edx /* offset (low word is 0) */
-+#ifdef CONFIG_SMP
-+ movl PER_CPU_VAR(cpu_number), %ebx
-+ shll $PAGE_SHIFT_asm, %ebx
-+ addl $cpu_gdt_table, %ebx
-+#else
-+ movl $cpu_gdt_table, %ebx
-+#endif
- shr $16, %edx
-- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
-- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mov %cr0, %esi
-+ btr $16, %esi
-+ mov %esi, %cr0
-+#endif
-+
-+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
-+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ bts $16, %esi
-+ mov %esi, %cr0
-+#endif
-+
- pushl_cfi $__ESPFIX_SS
- pushl_cfi %eax /* new kernel esp */
- /* Disable interrupts, but do not irqtrace this section: we
-@@ -618,34 +846,28 @@ work_resched:
- movl TI_flags(%ebp), %ecx
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
-- jz restore_all
-+ jz restore_all_pax
- testb $_TIF_NEED_RESCHED, %cl
- jnz work_resched
-
- work_notifysig: # deal with pending signals and
- # notify-resume requests
-+ movl %esp, %eax
- #ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-- movl %esp, %eax
-- jne work_notifysig_v86 # returning to kernel-space or
-+ jz 1f # returning to kernel-space or
- # vm86-space
-- xorl %edx, %edx
-- call do_notify_resume
-- jmp resume_userspace_sig
-
-- ALIGN
--work_notifysig_v86:
- pushl_cfi %ecx # save ti_flags for do_notify_resume
- call save_v86_state # %eax contains pt_regs pointer
- popl_cfi %ecx
- movl %eax, %esp
--#else
-- movl %esp, %eax
-+1:
- #endif
- xorl %edx, %edx
- call do_notify_resume
- jmp resume_userspace_sig
--END(work_pending)
-+ENDPROC(work_pending)
-
- # perform syscall exit tracing
- ALIGN
-@@ -653,11 +875,14 @@ syscall_trace_entry:
- movl $-ENOSYS,PT_EAX(%esp)
- movl %esp, %eax
- call syscall_trace_enter
-+
-+ pax_erase_kstack
-+
- /* What it returned is what we'll actually use. */
- cmpl $(nr_syscalls), %eax
- jnae syscall_call
- jmp syscall_exit
--END(syscall_trace_entry)
-+ENDPROC(syscall_trace_entry)
-
- # perform syscall exit tracing
- ALIGN
-@@ -670,25 +895,29 @@ syscall_exit_work:
- movl %esp, %eax
- call syscall_trace_leave
- jmp resume_userspace
--END(syscall_exit_work)
-+ENDPROC(syscall_exit_work)
- CFI_ENDPROC
-
- RING0_INT_FRAME # can't unwind into user space anyway
- syscall_fault:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ push %ss
-+ pop %ds
-+#endif
- GET_THREAD_INFO(%ebp)
- movl $-EFAULT,PT_EAX(%esp)
- jmp resume_userspace
--END(syscall_fault)
-+ENDPROC(syscall_fault)
-
- syscall_badsys:
- movl $-ENOSYS,%eax
- jmp syscall_after_call
--END(syscall_badsys)
-+ENDPROC(syscall_badsys)
-
- sysenter_badsys:
- movl $-ENOSYS,%eax
- jmp sysenter_after_call
--END(syscall_badsys)
-+ENDPROC(sysenter_badsys)
- CFI_ENDPROC
- /*
- * End of kprobes section
-@@ -762,6 +991,36 @@ ptregs_clone:
- CFI_ENDPROC
- ENDPROC(ptregs_clone)
-
-+ ALIGN;
-+ENTRY(kernel_execve)
-+ CFI_STARTPROC
-+ pushl_cfi %ebp
-+ sub $PT_OLDSS+4,%esp
-+ pushl_cfi %edi
-+ pushl_cfi %ecx
-+ pushl_cfi %eax
-+ lea 3*4(%esp),%edi
-+ mov $PT_OLDSS/4+1,%ecx
-+ xorl %eax,%eax
-+ rep stosl
-+ popl_cfi %eax
-+ popl_cfi %ecx
-+ popl_cfi %edi
-+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
-+ pushl_cfi %esp
-+ call sys_execve
-+ add $4,%esp
-+ CFI_ADJUST_CFA_OFFSET -4
-+ GET_THREAD_INFO(%ebp)
-+ test %eax,%eax
-+ jz syscall_exit
-+ add $PT_OLDSS+4,%esp
-+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
-+ popl_cfi %ebp
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(kernel_execve)
-+
- .macro FIXUP_ESPFIX_STACK
- /*
- * Switch back for ESPFIX stack to the normal zerobased stack
-@@ -772,8 +1031,15 @@ ENDPROC(ptregs_clone)
- */
- #ifdef CONFIG_X86_ESPFIX32
- /* fixup the stack */
-- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
-- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
-+#ifdef CONFIG_SMP
-+ movl PER_CPU_VAR(cpu_number), %ebx
-+ shll $PAGE_SHIFT_asm, %ebx
-+ addl $cpu_gdt_table, %ebx
-+#else
-+ movl $cpu_gdt_table, %ebx
-+#endif
-+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
-+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
- shl $16, %eax
- addl %esp, %eax /* the adjusted stack pointer */
- pushl_cfi $__KERNEL_DS
-@@ -829,7 +1095,7 @@ vector=vector+1
- .endr
- 2: jmp common_interrupt
- .endr
--END(irq_entries_start)
-+ENDPROC(irq_entries_start)
-
- .previous
- END(interrupt)
-@@ -877,7 +1143,7 @@ ENTRY(coprocessor_error)
- pushl_cfi $do_coprocessor_error
- jmp error_code
- CFI_ENDPROC
--END(coprocessor_error)
-+ENDPROC(coprocessor_error)
-
- ENTRY(simd_coprocessor_error)
- RING0_INT_FRAME
-@@ -898,7 +1164,7 @@ ENTRY(simd_coprocessor_error)
- #endif
- jmp error_code
- CFI_ENDPROC
--END(simd_coprocessor_error)
-+ENDPROC(simd_coprocessor_error)
-
- ENTRY(device_not_available)
- RING0_INT_FRAME
-@@ -906,7 +1172,7 @@ ENTRY(device_not_available)
- pushl_cfi $do_device_not_available
- jmp error_code
- CFI_ENDPROC
--END(device_not_available)
-+ENDPROC(device_not_available)
-
- #ifdef CONFIG_PARAVIRT
- ENTRY(native_iret)
-@@ -915,12 +1181,12 @@ ENTRY(native_iret)
- .align 4
- .long native_iret, iret_exc
- .previous
--END(native_iret)
-+ENDPROC(native_iret)
-
- ENTRY(native_irq_enable_sysexit)
- sti
- sysexit
--END(native_irq_enable_sysexit)
-+ENDPROC(native_irq_enable_sysexit)
- #endif
-
- ENTRY(overflow)
-@@ -929,7 +1195,7 @@ ENTRY(overflow)
- pushl_cfi $do_overflow
- jmp error_code
- CFI_ENDPROC
--END(overflow)
-+ENDPROC(overflow)
-
- ENTRY(bounds)
- RING0_INT_FRAME
-@@ -937,7 +1203,7 @@ ENTRY(bounds)
- pushl_cfi $do_bounds
- jmp error_code
- CFI_ENDPROC
--END(bounds)
-+ENDPROC(bounds)
-
- ENTRY(invalid_op)
- RING0_INT_FRAME
-@@ -945,7 +1211,7 @@ ENTRY(invalid_op)
- pushl_cfi $do_invalid_op
- jmp error_code
- CFI_ENDPROC
--END(invalid_op)
-+ENDPROC(invalid_op)
-
- ENTRY(coprocessor_segment_overrun)
- RING0_INT_FRAME
-@@ -953,35 +1219,35 @@ ENTRY(coprocessor_segment_overrun)
- pushl_cfi $do_coprocessor_segment_overrun
- jmp error_code
- CFI_ENDPROC
--END(coprocessor_segment_overrun)
-+ENDPROC(coprocessor_segment_overrun)
-
- ENTRY(invalid_TSS)
- RING0_EC_FRAME
- pushl_cfi $do_invalid_TSS
- jmp error_code
- CFI_ENDPROC
--END(invalid_TSS)
-+ENDPROC(invalid_TSS)
-
- ENTRY(segment_not_present)
- RING0_EC_FRAME
- pushl_cfi $do_segment_not_present
- jmp error_code
- CFI_ENDPROC
--END(segment_not_present)
-+ENDPROC(segment_not_present)
-
- ENTRY(stack_segment)
- RING0_EC_FRAME
- pushl_cfi $do_stack_segment
- jmp error_code
- CFI_ENDPROC
--END(stack_segment)
-+ENDPROC(stack_segment)
-
- ENTRY(alignment_check)
- RING0_EC_FRAME
- pushl_cfi $do_alignment_check
- jmp error_code
- CFI_ENDPROC
--END(alignment_check)
-+ENDPROC(alignment_check)
-
- ENTRY(divide_error)
- RING0_INT_FRAME
-@@ -989,7 +1255,7 @@ ENTRY(divide_error)
- pushl_cfi $do_divide_error
- jmp error_code
- CFI_ENDPROC
--END(divide_error)
-+ENDPROC(divide_error)
-
- #ifdef CONFIG_X86_MCE
- ENTRY(machine_check)
-@@ -998,7 +1264,7 @@ ENTRY(machine_check)
- pushl_cfi machine_check_vector
- jmp error_code
- CFI_ENDPROC
--END(machine_check)
-+ENDPROC(machine_check)
- #endif
-
- ENTRY(spurious_interrupt_bug)
-@@ -1007,7 +1273,7 @@ ENTRY(spurious_interrupt_bug)
- pushl_cfi $do_spurious_interrupt_bug
- jmp error_code
- CFI_ENDPROC
--END(spurious_interrupt_bug)
-+ENDPROC(spurious_interrupt_bug)
- /*
- * End of kprobes section
- */
-@@ -1123,7 +1389,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
-
- ENTRY(mcount)
- ret
--END(mcount)
-+ENDPROC(mcount)
-
- ENTRY(ftrace_caller)
- cmpl $0, function_trace_stop
-@@ -1152,7 +1418,7 @@ ftrace_graph_call:
- .globl ftrace_stub
- ftrace_stub:
- ret
--END(ftrace_caller)
-+ENDPROC(ftrace_caller)
-
- #else /* ! CONFIG_DYNAMIC_FTRACE */
-
-@@ -1188,7 +1454,7 @@ trace:
- popl %ecx
- popl %eax
- jmp ftrace_stub
--END(mcount)
-+ENDPROC(mcount)
- #endif /* CONFIG_DYNAMIC_FTRACE */
- #endif /* CONFIG_FUNCTION_TRACER */
-
-@@ -1209,7 +1475,7 @@ ENTRY(ftrace_graph_caller)
- popl %ecx
- popl %eax
- ret
--END(ftrace_graph_caller)
-+ENDPROC(ftrace_graph_caller)
-
- .globl return_to_handler
- return_to_handler:
-@@ -1223,7 +1489,6 @@ return_to_handler:
- jmp *%ecx
- #endif
-
--.section .rodata,"a"
- #include "syscall_table_32.S"
-
- syscall_table_size=(.-sys_call_table)
-@@ -1269,15 +1534,18 @@ error_code:
- movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
- REG_TO_PTGS %ecx
- SET_KERNEL_GS %ecx
-- movl $(__USER_DS), %ecx
-+ movl $(__KERNEL_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
-+
-+ pax_enter_kernel
-+
- TRACE_IRQS_OFF
- movl %esp,%eax # pt_regs pointer
- call *%edi
- jmp ret_from_exception
- CFI_ENDPROC
--END(page_fault)
-+ENDPROC(page_fault)
-
- /*
- * Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1319,7 +1587,7 @@ debug_stack_correct:
- call do_debug
- jmp ret_from_exception
- CFI_ENDPROC
--END(debug)
-+ENDPROC(debug)
-
- /*
- * NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1358,6 +1626,9 @@ nmi_stack_correct:
- xorl %edx,%edx # zero error code
- movl %esp,%eax # pt_regs pointer
- call do_nmi
-+
-+ pax_exit_kernel
-+
- jmp restore_all_notrace
- CFI_ENDPROC
-
-@@ -1395,13 +1666,16 @@ nmi_espfix_stack:
- FIXUP_ESPFIX_STACK # %eax == %esp
- xorl %edx,%edx # zero error code
- call do_nmi
-+
-+ pax_exit_kernel
-+
- RESTORE_REGS
- lss 12+4(%esp), %esp # back to espfix stack
- CFI_ADJUST_CFA_OFFSET -24
- jmp irq_return
- #endif
- CFI_ENDPROC
--END(nmi)
-+ENDPROC(nmi)
-
- ENTRY(int3)
- RING0_INT_FRAME
-@@ -1413,14 +1687,14 @@ ENTRY(int3)
- call do_int3
- jmp ret_from_exception
- CFI_ENDPROC
--END(int3)
-+ENDPROC(int3)
-
- ENTRY(general_protection)
- RING0_EC_FRAME
- pushl_cfi $do_general_protection
- jmp error_code
- CFI_ENDPROC
--END(general_protection)
-+ENDPROC(general_protection)
-
- #ifdef CONFIG_KVM_GUEST
- ENTRY(async_page_fault)
-@@ -1428,7 +1702,7 @@ ENTRY(async_page_fault)
- pushl_cfi $do_async_page_fault
- jmp error_code
- CFI_ENDPROC
--END(async_page_fault)
-+ENDPROC(async_page_fault)
- #endif
-
- /*
-diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 8d15c69..30e8c80 100644
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -56,6 +56,8 @@
- #include <asm/ftrace.h>
- #include <asm/percpu.h>
- #include <asm/pgtable_types.h>
-+#include <asm/pgtable.h>
-+#include <asm/alternative-asm.h>
-
- /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
- #include <linux/elf-em.h>
-@@ -69,8 +71,9 @@
- #ifdef CONFIG_FUNCTION_TRACER
- #ifdef CONFIG_DYNAMIC_FTRACE
- ENTRY(mcount)
-+ pax_force_retaddr
- retq
--END(mcount)
-+ENDPROC(mcount)
-
- ENTRY(ftrace_caller)
- cmpl $0, function_trace_stop
-@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
- #endif
-
- GLOBAL(ftrace_stub)
-+ pax_force_retaddr
- retq
--END(ftrace_caller)
-+ENDPROC(ftrace_caller)
-
- #else /* ! CONFIG_DYNAMIC_FTRACE */
- ENTRY(mcount)
-@@ -113,6 +117,7 @@ ENTRY(mcount)
- #endif
-
- GLOBAL(ftrace_stub)
-+ pax_force_retaddr
- retq
-
- trace:
-@@ -122,12 +127,13 @@ trace:
- movq 8(%rbp), %rsi
- subq $MCOUNT_INSN_SIZE, %rdi
-
-+ pax_force_fptr ftrace_trace_function
- call *ftrace_trace_function
-
- MCOUNT_RESTORE_FRAME
-
- jmp ftrace_stub
--END(mcount)
-+ENDPROC(mcount)
- #endif /* CONFIG_DYNAMIC_FTRACE */
- #endif /* CONFIG_FUNCTION_TRACER */
-
-@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
-
- MCOUNT_RESTORE_FRAME
-
-+ pax_force_retaddr
- retq
--END(ftrace_graph_caller)
-+ENDPROC(ftrace_graph_caller)
-
- GLOBAL(return_to_handler)
- subq $24, %rsp
-@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
- movq 8(%rsp), %rdx
- movq (%rsp), %rax
- addq $24, %rsp
-+ pax_force_fptr %rdi
- jmp *%rdi
- #endif
-
-@@ -179,6 +187,286 @@ ENTRY(native_usergs_sysret64)
- ENDPROC(native_usergs_sysret64)
- #endif /* CONFIG_PARAVIRT */
-
-+ .macro ljmpq sel, off
-+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
-+ .byte 0x48; ljmp *1234f(%rip)
-+ .pushsection .rodata
-+ .align 16
-+ 1234: .quad \off; .word \sel
-+ .popsection
-+#else
-+ pushq $\sel
-+ pushq $\off
-+ lretq
-+#endif
-+ .endm
-+
-+ .macro pax_enter_kernel
-+ pax_set_fptr_mask
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_enter_kernel
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_exit_kernel
-+#endif
-+ .endm
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ENTRY(pax_enter_kernel)
-+ pushq %rdi
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+ GET_CR0_INTO_RDI
-+ bts $16,%rdi
-+ jnc 3f
-+ mov %cs,%edi
-+ cmp $__KERNEL_CS,%edi
-+ jnz 2f
-+1:
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+
-+2: ljmpq __KERNEL_CS,1b
-+3: ljmpq __KERNEXEC_KERNEL_CS,4f
-+4: SET_RDI_INTO_CR0
-+ jmp 1b
-+ENDPROC(pax_enter_kernel)
-+
-+ENTRY(pax_exit_kernel)
-+ pushq %rdi
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+ mov %cs,%rdi
-+ cmp $__KERNEXEC_KERNEL_CS,%edi
-+ jz 2f
-+ GET_CR0_INTO_RDI
-+ bts $16,%rdi
-+ jnc 4f
-+1:
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI);
-+#endif
-+
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+
-+2: GET_CR0_INTO_RDI
-+ btr $16,%rdi
-+ jnc 4f
-+ ljmpq __KERNEL_CS,3f
-+3: SET_RDI_INTO_CR0
-+ jmp 1b
-+4: ud2
-+ jmp 4b
-+ENDPROC(pax_exit_kernel)
-+#endif
-+
-+ .macro pax_enter_kernel_user
-+ pax_set_fptr_mask
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel_user
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
-+#endif
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushq %rax
-+ pushq %r11
-+ call pax_randomize_kstack
-+ popq %r11
-+ popq %rax
-+#endif
-+ .endm
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ENTRY(pax_enter_kernel_user)
-+ pushq %rdi
-+ pushq %rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+ GET_CR3_INTO_RDI
-+ mov %rdi,%rbx
-+ add $__START_KERNEL_map,%rbx
-+ sub phys_base(%rip),%rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ cmpl $0, pv_info+PARAVIRT_enabled
-+ jz 1f
-+ pushq %rdi
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ mov i*8(%rbx),%rsi
-+ mov $0,%sil
-+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
-+ i = i + 1
-+ .endr
-+ popq %rdi
-+ jmp 2f
-+1:
-+#endif
-+
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ movb $0,i*8(%rbx)
-+ i = i + 1
-+ .endr
-+
-+#ifdef CONFIG_PARAVIRT
-+2:
-+#endif
-+ SET_RDI_INTO_CR3
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ bts $16,%rdi
-+ SET_RDI_INTO_CR0
-+#endif
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rbx
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+ENDPROC(pax_enter_kernel_user)
-+
-+ENTRY(pax_exit_kernel_user)
-+ pushq %rdi
-+ pushq %rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ btr $16,%rdi
-+ jnc 3f
-+ SET_RDI_INTO_CR0
-+#endif
-+
-+ GET_CR3_INTO_RDI
-+ mov %rdi,%rbx
-+ add $__START_KERNEL_map,%rbx
-+ sub phys_base(%rip),%rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ cmpl $0, pv_info+PARAVIRT_enabled
-+ jz 1f
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ mov i*8(%rbx),%rsi
-+ mov $0x67,%sil
-+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
-+ i = i + 1
-+ .endr
-+ jmp 2f
-+1:
-+#endif
-+
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ movb $0x67,i*8(%rbx)
-+ i = i + 1
-+ .endr
-+
-+#ifdef CONFIG_PARAVIRT
-+2: PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rbx
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+3: ud2
-+ jmp 3b
-+ENDPROC(pax_exit_kernel_user)
-+#endif
-+
-+.macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+.endm
-+
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ENTRY(pax_erase_kstack)
-+ pushq %rdi
-+ pushq %rcx
-+ pushq %rax
-+ pushq %r11
-+
-+ GET_THREAD_INFO(%r11)
-+ mov TI_lowest_stack(%r11), %rdi
-+ mov $-0xBEEF, %rax
-+ std
-+
-+1: mov %edi, %ecx
-+ and $THREAD_SIZE_asm - 1, %ecx
-+ shr $3, %ecx
-+ repne scasq
-+ jecxz 2f
-+
-+ cmp $2*8, %ecx
-+ jc 2f
-+
-+ mov $2*8, %ecx
-+ repe scasq
-+ jecxz 2f
-+ jne 1b
-+
-+2: cld
-+ or $2*8, %rdi
-+ mov %esp, %ecx
-+ sub %edi, %ecx
-+
-+ cmp $THREAD_SIZE_asm, %rcx
-+ jb 3f
-+ ud2
-+3:
-+
-+ shr $3, %ecx
-+ rep stosq
-+
-+ mov TI_task_thread_sp0(%r11), %rdi
-+ sub $256, %rdi
-+ mov %rdi, TI_lowest_stack(%r11)
-+
-+ popq %r11
-+ popq %rax
-+ popq %rcx
-+ popq %rdi
-+ pax_force_retaddr
-+ ret
-+ENDPROC(pax_erase_kstack)
-+#endif
-
- .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
- #ifdef CONFIG_TRACE_IRQFLAGS
-@@ -232,8 +520,8 @@ ENDPROC(native_usergs_sysret64)
- .endm
-
- .macro UNFAKE_STACK_FRAME
-- addq $8*6, %rsp
-- CFI_ADJUST_CFA_OFFSET -(6*8)
-+ addq $8*6 + ARG_SKIP, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
- .endm
-
- /*
-@@ -302,25 +590,26 @@ ENDPROC(native_usergs_sysret64)
- /* save partial stack frame */
- .macro SAVE_ARGS_IRQ
- cld
-- /* start from rbp in pt_regs and jump over */
-- movq_cfi rdi, RDI-RBP
-- movq_cfi rsi, RSI-RBP
-- movq_cfi rdx, RDX-RBP
-- movq_cfi rcx, RCX-RBP
-- movq_cfi rax, RAX-RBP
-- movq_cfi r8, R8-RBP
-- movq_cfi r9, R9-RBP
-- movq_cfi r10, R10-RBP
-- movq_cfi r11, R11-RBP
-+ /* start from r15 in pt_regs and jump over */
-+ movq_cfi rdi, RDI
-+ movq_cfi rsi, RSI
-+ movq_cfi rdx, RDX
-+ movq_cfi rcx, RCX
-+ movq_cfi rax, RAX
-+ movq_cfi r8, R8
-+ movq_cfi r9, R9
-+ movq_cfi r10, R10
-+ movq_cfi r11, R11
-+ movq_cfi r12, R12
-
- /* Save rbp so that we can unwind from get_irq_regs() */
-- movq_cfi rbp, 0
-+ movq_cfi rbp, RBP
-
- /* Save previous stack value */
- movq %rsp, %rsi
-
-- leaq -RBP(%rsp),%rdi /* arg1 for handler */
-- testl $3, CS(%rdi)
-+ movq %rsp,%rdi /* arg1 for handler */
-+ testb $3, CS(%rsi)
- je 1f
- SWAPGS
- /*
-@@ -341,24 +630,39 @@ ENDPROC(native_usergs_sysret64)
- 0x06 /* DW_OP_deref */, \
- 0x08 /* DW_OP_const1u */, SS+8-RBP, \
- 0x22 /* DW_OP_plus */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS(%rdi)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
-+
- /* We entered an interrupt context - irqs are off: */
- TRACE_IRQS_OFF
- .endm
-
- ENTRY(save_rest)
-- PARTIAL_FRAME 1 REST_SKIP+8
-- movq 5*8+16(%rsp), %r11 /* save return address */
-+ PARTIAL_FRAME 1 8
- movq_cfi rbx, RBX+16
- movq_cfi rbp, RBP+16
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
- movq_cfi r12, R12+16
-+#endif
-+
- movq_cfi r13, R13+16
- movq_cfi r14, R14+16
- movq_cfi r15, R15+16
-- movq %r11, 8(%rsp) /* return address */
- FIXUP_TOP_OF_STACK %r11, 16
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(save_rest)
-+ENDPROC(save_rest)
-
- /* save complete stack frame */
- .pushsection .kprobes.text, "ax"
-@@ -387,10 +691,21 @@ ENTRY(save_paranoid)
- js 1f /* negative -> in kernel */
- SWAPGS
- xorl %ebx,%ebx
--1: ret
-+1:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS+8(%rsp)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
-+ pax_force_retaddr
-+ ret
- CFI_ENDPROC
--END(save_paranoid)
-- .popsection
-+ENDPROC(save_paranoid)
-
- /*
- * A newly forked process directly context switches into this address.
-@@ -411,7 +726,7 @@ ENTRY(ret_from_fork)
-
- RESTORE_REST
-
-- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
-+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
- je int_ret_from_sys_call
-
- /*
-@@ -424,7 +739,7 @@ ENTRY(ret_from_fork)
- jmp int_ret_from_sys_call
-
- CFI_ENDPROC
--END(ret_from_fork)
-+ENDPROC(ret_from_fork)
-
- /*
- * System call entry. Up to 6 arguments in registers are supported.
-@@ -460,7 +775,7 @@ END(ret_from_fork)
- ENTRY(system_call)
- CFI_STARTPROC simple
- CFI_SIGNAL_FRAME
-- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
-+ CFI_DEF_CFA rsp,0
- CFI_REGISTER rip,rcx
- /*CFI_REGISTER rflags,r11*/
- SWAPGS_UNSAFE_STACK
-@@ -473,12 +788,18 @@ ENTRY(system_call_after_swapgs)
-
- movq %rsp,PER_CPU_VAR(old_rsp)
- movq PER_CPU_VAR(kernel_stack),%rsp
-+ SAVE_ARGS 8*6,0
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- /*
- * No need to follow this irqs off/on section - it's straight
- * and short:
- */
- ENABLE_INTERRUPTS(CLBR_NONE)
-- SAVE_ARGS 8,0
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
-@@ -507,6 +828,8 @@ sysret_check:
- andl %edi,%edx
- jnz sysret_careful
- CFI_REMEMBER_STATE
-+ pax_exit_kernel_user
-+ pax_erase_kstack
- /*
- * sysretq will re-enable interrupts:
- */
-@@ -565,6 +888,9 @@ auditsys:
- movq %rax,%rsi /* 2nd arg: syscall number */
- movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
- call audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- LOAD_ARGS 0 /* reload call-clobbered registers */
- jmp system_call_fastpath
-
-@@ -595,12 +921,15 @@ tracesys:
- FIXUP_TOP_OF_STACK %rdi
- movq %rsp,%rdi
- call syscall_trace_enter
-+
-+ pax_erase_kstack
-+
- /*
- * Reload arg registers from stack in case ptrace changed them.
- * We don't reload %rax because syscall_trace_enter() returned
- * the value it wants us to use in the table lookup.
- */
-- LOAD_ARGS ARGOFFSET, 1
-+ LOAD_ARGS 1
- RESTORE_REST
- cmpq $__NR_syscall_max,%rax
- ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
-@@ -616,7 +945,7 @@ tracesys:
- GLOBAL(int_ret_from_sys_call)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $3,CS-ARGOFFSET(%rsp)
-+ testb $3,CS-ARGOFFSET(%rsp)
- je retint_restore_args
- movl $_TIF_ALLWORK_MASK,%edi
- /* edi: mask to check */
-@@ -627,7 +956,9 @@ GLOBAL(int_with_check)
- andl %edi,%edx
- jnz int_careful
- andl $~TS_COMPAT,TI_status(%rcx)
-- jmp retint_swapgs
-+ pax_exit_kernel_user
-+ pax_erase_kstack
-+ jmp retint_swapgs_pax
-
- /* Either reschedule or signal or syscall exit tracking needed. */
- /* First do a reschedule test. */
-@@ -673,7 +1004,7 @@ int_restore_rest:
- TRACE_IRQS_OFF
- jmp int_with_check
- CFI_ENDPROC
--END(system_call)
-+ENDPROC(system_call)
-
- /*
- * Certain special system calls that need to save a complete full stack frame.
-@@ -681,15 +1012,13 @@ END(system_call)
- .macro PTREGSCALL label,func,arg
- ENTRY(\label)
- PARTIAL_FRAME 1 8 /* offset 8: return address */
-- subq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET REST_SKIP
- call save_rest
- DEFAULT_FRAME 0 8 /* offset 8: return address */
- leaq 8(%rsp), \arg /* pt_regs pointer */
- call \func
- jmp ptregscall_common
- CFI_ENDPROC
--END(\label)
-+ENDPROC(\label)
- .endm
-
- PTREGSCALL stub_clone, sys_clone, %r8
-@@ -704,12 +1033,17 @@ ENTRY(ptregscall_common)
- movq_cfi_restore R15+8, r15
- movq_cfi_restore R14+8, r14
- movq_cfi_restore R13+8, r13
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
- movq_cfi_restore R12+8, r12
-+#endif
-+
- movq_cfi_restore RBP+8, rbp
- movq_cfi_restore RBX+8, rbx
-- ret $REST_SKIP /* pop extended registers */
-+ pax_force_retaddr
-+ ret
- CFI_ENDPROC
--END(ptregscall_common)
-+ENDPROC(ptregscall_common)
-
- ENTRY(stub_execve)
- CFI_STARTPROC
-@@ -724,7 +1058,7 @@ ENTRY(stub_execve)
- RESTORE_REST
- jmp int_ret_from_sys_call
- CFI_ENDPROC
--END(stub_execve)
-+ENDPROC(stub_execve)
-
- /*
- * sigreturn is special because it needs to restore all registers on return.
-@@ -742,7 +1076,7 @@ ENTRY(stub_rt_sigreturn)
- RESTORE_REST
- jmp int_ret_from_sys_call
- CFI_ENDPROC
--END(stub_rt_sigreturn)
-+ENDPROC(stub_rt_sigreturn)
-
- /*
- * Build the entry stubs and pointer table with some assembler magic.
-@@ -777,7 +1111,7 @@ vector=vector+1
- 2: jmp common_interrupt
- .endr
- CFI_ENDPROC
--END(irq_entries_start)
-+ENDPROC(irq_entries_start)
-
- .previous
- END(interrupt)
-@@ -794,8 +1128,8 @@ END(interrupt)
- /* 0(%rsp): ~(interrupt number) */
- .macro interrupt func
- /* reserve pt_regs for scratch regs and rbp */
-- subq $ORIG_RAX-RBP, %rsp
-- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
-+ subq $ORIG_RAX, %rsp
-+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
- SAVE_ARGS_IRQ
- call \func
- .endm
-@@ -822,13 +1156,13 @@ ret_from_intr:
- /* Restore saved previous stack */
- popq %rsi
- CFI_DEF_CFA_REGISTER rsi
-- leaq ARGOFFSET-RBP(%rsi), %rsp
-+ movq %rsi, %rsp
- CFI_DEF_CFA_REGISTER rsp
-- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
-+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
-
- exit_intr:
- GET_THREAD_INFO(%rcx)
-- testl $3,CS-ARGOFFSET(%rsp)
-+ testb $3,CS-ARGOFFSET(%rsp)
- je retint_kernel
-
- /* Interrupt came from user space */
-@@ -850,12 +1184,16 @@ retint_swapgs: /* return to user-space */
- * The iretq could re-enable interrupts:
- */
- DISABLE_INTERRUPTS(CLBR_ANY)
-+ pax_exit_kernel_user
-+retint_swapgs_pax:
- TRACE_IRQS_IRETQ
- SWAPGS
- jmp restore_args
-
- retint_restore_args: /* return to kernel space */
- DISABLE_INTERRUPTS(CLBR_ANY)
-+ pax_exit_kernel
-+ pax_force_retaddr (RIP-ARGOFFSET)
- /*
- * The iretq could re-enable interrupts:
- */
-@@ -893,15 +1231,15 @@ native_irq_return_ldt:
- SWAPGS
- movq PER_CPU_VAR(espfix_waddr),%rdi
- movq %rax,(0*8)(%rdi) /* RAX */
-- movq (2*8)(%rsp),%rax /* RIP */
-+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
- movq %rax,(1*8)(%rdi)
-- movq (3*8)(%rsp),%rax /* CS */
-+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
- movq %rax,(2*8)(%rdi)
-- movq (4*8)(%rsp),%rax /* RFLAGS */
-+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
- movq %rax,(3*8)(%rdi)
-- movq (6*8)(%rsp),%rax /* SS */
-+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
- movq %rax,(5*8)(%rdi)
-- movq (5*8)(%rsp),%rax /* RSP */
-+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
- movq %rax,(4*8)(%rdi)
- andl $0xffff0000,%eax
- popq_cfi %rdi
-@@ -957,7 +1295,7 @@ ENTRY(retint_kernel)
- jmp exit_intr
- #endif
- CFI_ENDPROC
--END(common_interrupt)
-+ENDPROC(common_interrupt)
-
- /*
- * End of kprobes section
-@@ -974,7 +1312,7 @@ ENTRY(\sym)
- interrupt \do_sym
- jmp ret_from_intr
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- #ifdef CONFIG_SMP
-@@ -1044,7 +1382,7 @@ ENTRY(\sym)
- call \do_sym
- jmp error_exit /* %ebx: no swapgs flag */
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- .macro paranoidzeroentry sym do_sym
-@@ -1061,10 +1399,10 @@ ENTRY(\sym)
- call \do_sym
- jmp paranoid_exit /* %ebx: no swapgs flag */
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
--#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
-+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
- .macro paranoidzeroentry_ist sym do_sym ist
- ENTRY(\sym)
- INTR_FRAME
-@@ -1076,12 +1414,18 @@ ENTRY(\sym)
- TRACE_IRQS_OFF
- movq %rsp,%rdi /* pt_regs pointer */
- xorl %esi,%esi /* no error code */
-+#ifdef CONFIG_SMP
-+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
-+ lea init_tss(%r13), %r13
-+#else
-+ lea init_tss(%rip), %r13
-+#endif
- subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
- call \do_sym
- addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
- jmp paranoid_exit /* %ebx: no swapgs flag */
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- .macro errorentry sym do_sym
-@@ -1098,7 +1442,7 @@ ENTRY(\sym)
- call \do_sym
- jmp error_exit /* %ebx: no swapgs flag */
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- /* error code is on the stack already */
-@@ -1117,7 +1461,7 @@ ENTRY(\sym)
- call \do_sym
- jmp paranoid_exit /* %ebx: no swapgs flag */
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- zeroentry divide_error do_divide_error
-@@ -1147,9 +1491,10 @@ gs_change:
- 2: mfence /* workaround */
- SWAPGS
- popfq_cfi
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(native_load_gs_index)
-+ENDPROC(native_load_gs_index)
-
- .section __ex_table,"a"
- .align 8
-@@ -1171,13 +1516,14 @@ ENTRY(kernel_thread_helper)
- * Here we are in the child and the registers are set as they were
- * at kernel_thread() invocation in the parent.
- */
-+ pax_force_fptr %rsi
- call *%rsi
- # exit
- mov %eax, %edi
- call do_exit
- ud2 # padding for call trace
- CFI_ENDPROC
--END(kernel_thread_helper)
-+ENDPROC(kernel_thread_helper)
-
- /*
- * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1204,11 +1550,11 @@ ENTRY(kernel_execve)
- RESTORE_REST
- testq %rax,%rax
- je int_ret_from_sys_call
-- RESTORE_ARGS
- UNFAKE_STACK_FRAME
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(kernel_execve)
-+ENDPROC(kernel_execve)
-
- /* Call softirq on interrupt stack. Interrupts are off. */
- ENTRY(call_softirq)
-@@ -1226,9 +1572,10 @@ ENTRY(call_softirq)
- CFI_DEF_CFA_REGISTER rsp
- CFI_ADJUST_CFA_OFFSET -8
- decl PER_CPU_VAR(irq_count)
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(call_softirq)
-+ENDPROC(call_softirq)
-
- #ifdef CONFIG_XEN
- zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1266,7 +1613,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
- decl PER_CPU_VAR(irq_count)
- jmp error_exit
- CFI_ENDPROC
--END(xen_do_hypervisor_callback)
-+ENDPROC(xen_do_hypervisor_callback)
-
- /*
- * Hypervisor uses this for application faults while it executes.
-@@ -1325,7 +1672,7 @@ ENTRY(xen_failsafe_callback)
- SAVE_ALL
- jmp error_exit
- CFI_ENDPROC
--END(xen_failsafe_callback)
-+ENDPROC(xen_failsafe_callback)
-
- apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
- xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1374,16 +1721,31 @@ ENTRY(paranoid_exit)
- TRACE_IRQS_OFF
- testl %ebx,%ebx /* swapgs needed? */
- jnz paranoid_restore
-- testl $3,CS(%rsp)
-+ testb $3,CS(%rsp)
- jnz paranoid_userspace
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pax_exit_kernel
-+ TRACE_IRQS_IRETQ 0
-+ SWAPGS_UNSAFE_STACK
-+ RESTORE_ALL 8
-+ pax_force_retaddr_bts
-+ jmp irq_return
-+#endif
- paranoid_swapgs:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pax_exit_kernel_user
-+#else
-+ pax_exit_kernel
-+#endif
- TRACE_IRQS_IRETQ 0
- SWAPGS_UNSAFE_STACK
- RESTORE_ALL 8
- jmp irq_return
- paranoid_restore:
-+ pax_exit_kernel
- TRACE_IRQS_IRETQ 0
- RESTORE_ALL 8
-+ pax_force_retaddr_bts
- jmp irq_return
- paranoid_userspace:
- GET_THREAD_INFO(%rcx)
-@@ -1412,7 +1774,7 @@ paranoid_schedule:
- TRACE_IRQS_OFF
- jmp paranoid_userspace
- CFI_ENDPROC
--END(paranoid_exit)
-+ENDPROC(paranoid_exit)
-
- /*
- * Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1439,12 +1801,23 @@ ENTRY(error_entry)
- movq_cfi r14, R14+8
- movq_cfi r15, R15+8
- xorl %ebx,%ebx
-- testl $3,CS+8(%rsp)
-+ testb $3,CS+8(%rsp)
- je error_kernelspace
- error_swapgs:
- SWAPGS
- error_sti:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS+8(%rsp)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
- TRACE_IRQS_OFF
-+ pax_force_retaddr
- ret
-
- /*
-@@ -1478,7 +1851,7 @@ error_bad_iret:
- decl %ebx /* Return to usergs */
- jmp error_sti
- CFI_ENDPROC
--END(error_entry)
-+ENDPROC(error_entry)
-
-
- /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1498,7 +1871,7 @@ ENTRY(error_exit)
- jnz retint_careful
- jmp retint_swapgs
- CFI_ENDPROC
--END(error_exit)
-+ENDPROC(error_exit)
-
-
- /* runs on exception stack */
-@@ -1510,6 +1883,7 @@ ENTRY(nmi)
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
- call save_paranoid
- DEFAULT_FRAME 0
-+
- /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
- movq %rsp,%rdi
- movq $-1,%rsi
-@@ -1520,12 +1894,28 @@ ENTRY(nmi)
- DISABLE_INTERRUPTS(CLBR_NONE)
- testl %ebx,%ebx /* swapgs needed? */
- jnz nmi_restore
-- testl $3,CS(%rsp)
-+ testb $3,CS(%rsp)
- jnz nmi_userspace
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pax_exit_kernel
-+ SWAPGS_UNSAFE_STACK
-+ RESTORE_ALL 8
-+ pax_force_retaddr_bts
-+ jmp irq_return
-+#endif
- nmi_swapgs:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pax_exit_kernel_user
-+#else
-+ pax_exit_kernel
-+#endif
- SWAPGS_UNSAFE_STACK
-+ RESTORE_ALL 8
-+ jmp irq_return
- nmi_restore:
-+ pax_exit_kernel
- RESTORE_ALL 8
-+ pax_force_retaddr_bts
- jmp irq_return
- nmi_userspace:
- GET_THREAD_INFO(%rcx)
-@@ -1554,14 +1944,14 @@ nmi_schedule:
- jmp paranoid_exit
- CFI_ENDPROC
- #endif
--END(nmi)
-+ENDPROC(nmi)
-
- ENTRY(ignore_sysret)
- CFI_STARTPROC
- mov $-ENOSYS,%eax
- sysret
- CFI_ENDPROC
--END(ignore_sysret)
-+ENDPROC(ignore_sysret)
-
- /*
- * End of kprobes section
-diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
-index 94d857f..6042d8a 100644
---- a/arch/x86/kernel/espfix_64.c
-+++ b/arch/x86/kernel/espfix_64.c
-@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
- #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
- static void *espfix_pages[ESPFIX_MAX_PAGES];
-
--static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
-- __aligned(PAGE_SIZE);
-+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
-
- static unsigned int page_random, slot_random;
-
-@@ -122,14 +121,16 @@ static void init_espfix_random(void)
- void __init init_espfix_bsp(void)
- {
- pgd_t *pgd_p;
-- pteval_t ptemask;
--
-- ptemask = __supported_pte_mask;
-+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
-
- /* Install the espfix pud into the kernel page directory */
-- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
-+ pgd_p = &init_level4_pgt[index];
- pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ clone_pgd_range(get_cpu_pgd(0) + index, swapper_pg_dir + index, 1);
-+#endif
-+
- /* Randomize the locations */
- init_espfix_random();
-
-@@ -197,7 +198,7 @@ void init_espfix_ap(void)
- set_pte(&pte_p[n*PTE_STRIDE], pte);
-
- /* Job is done for this CPU and any CPU which shares this page */
-- ACCESS_ONCE(espfix_pages[page]) = stack_page;
-+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
-
- unlock_done:
- mutex_unlock(&espfix_init_mutex);
-diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
-index c9a281f..3645760 100644
---- a/arch/x86/kernel/ftrace.c
-+++ b/arch/x86/kernel/ftrace.c
-@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
- static const void *mod_code_newcode; /* holds the text to write to the IP */
-
- static unsigned nmi_wait_count;
--static atomic_t nmi_update_count = ATOMIC_INIT(0);
-+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
-
- int ftrace_arch_read_dyn_info(char *buf, int size)
- {
-@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
-
- r = snprintf(buf, size, "%u %u",
- nmi_wait_count,
-- atomic_read(&nmi_update_count));
-+ atomic_read_unchecked(&nmi_update_count));
- return r;
- }
-
-@@ -178,7 +178,7 @@ void ftrace_nmi_enter(void)
- if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
- smp_rmb();
- ftrace_mod_code();
-- atomic_inc(&nmi_update_count);
-+ atomic_inc_unchecked(&nmi_update_count);
- }
- /* Must have previous changes seen before executions */
- smp_mb();
-@@ -271,6 +271,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
- {
- unsigned char replaced[MCOUNT_INSN_SIZE];
-
-+ ip = ktla_ktva(ip);
-+
- /*
- * Note: Due to modules and __init, code can
- * disappear and change, we need to protect against faulting
-@@ -327,7 +329,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
- unsigned char old[MCOUNT_INSN_SIZE], *new;
- int ret;
-
-- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
-+ memcpy(old, ktla_ktva((void *)ftrace_call), MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(ip, (unsigned long)func);
- ret = ftrace_modify_code(ip, old, new);
-
-@@ -353,6 +355,8 @@ static int ftrace_mod_jmp(unsigned long ip,
- {
- unsigned char code[MCOUNT_INSN_SIZE];
-
-+ ip = ktla_ktva(ip);
-+
- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
-diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
-index 3bb0850..55a56f4 100644
---- a/arch/x86/kernel/head32.c
-+++ b/arch/x86/kernel/head32.c
-@@ -19,6 +19,7 @@
- #include <asm/io_apic.h>
- #include <asm/bios_ebda.h>
- #include <asm/tlbflush.h>
-+#include <asm/boot.h>
-
- static void __init i386_default_early_setup(void)
- {
-@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
- {
- memblock_init();
-
-- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
-+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
-
- #ifdef CONFIG_BLK_DEV_INITRD
- /* Reserve INITRD */
-diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index ce0be7c..a61dc21 100644
---- a/arch/x86/kernel/head_32.S
-+++ b/arch/x86/kernel/head_32.S
-@@ -25,6 +25,12 @@
- /* Physical address */
- #define pa(X) ((X) - __PAGE_OFFSET)
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+#define ta(X) (X)
-+#else
-+#define ta(X) ((X) - __PAGE_OFFSET)
-+#endif
-+
- /*
- * References to members of the new_cpu_data structure.
- */
-@@ -54,11 +60,7 @@
- * and small than max_low_pfn, otherwise will waste some page table entries
- */
-
--#if PTRS_PER_PMD > 1
--#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
--#else
--#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
--#endif
-+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
-
- /* Number of possible pages in the lowmem region */
- LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
-@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
- RESERVE_BRK(pagetables, INIT_MAP_SIZE)
-
- /*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+/*
- * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
- * %esi points to the real-mode code as a 32-bit pointer.
- * CS and DS must be 4 GB flat segments, but we don't depend on
-@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
- * can.
- */
- __HEAD
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ jmp startup_32
-+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
-+.fill PAGE_SIZE-5,1,0xcc
-+#endif
-+
- ENTRY(startup_32)
- movl pa(stack_start),%ecx
-
-@@ -105,6 +120,59 @@ ENTRY(startup_32)
- 2:
- leal -__PAGE_OFFSET(%ecx),%esp
-
-+#ifdef CONFIG_SMP
-+ movl $pa(cpu_gdt_table),%edi
-+ movl $__per_cpu_load,%eax
-+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
-+ rorl $16,%eax
-+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
-+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
-+ movl $__per_cpu_end - 1,%eax
-+ subl $__per_cpu_start,%eax
-+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ movl $NR_CPUS,%ecx
-+ movl $pa(cpu_gdt_table),%edi
-+1:
-+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
-+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
-+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
-+ addl $PAGE_SIZE_asm,%edi
-+ loop 1b
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ movl $pa(boot_gdt),%edi
-+ movl $__LOAD_PHYSICAL_ADDR,%eax
-+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
-+ rorl $16,%eax
-+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
-+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
-+ rorl $16,%eax
-+
-+ ljmp $(__BOOT_CS),$1f
-+1:
-+
-+ movl $NR_CPUS,%ecx
-+ movl $pa(cpu_gdt_table),%edi
-+ addl $__PAGE_OFFSET,%eax
-+1:
-+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
-+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
-+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
-+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
-+ rorl $16,%eax
-+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
-+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
-+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
-+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
-+ rorl $16,%eax
-+ addl $PAGE_SIZE_asm,%edi
-+ loop 1b
-+#endif
-+
- /*
- * Clear BSS first so that there are no surprises...
- */
-@@ -195,8 +263,11 @@ ENTRY(startup_32)
- movl %eax, pa(max_pfn_mapped)
-
- /* Do early initialization of the fixmap area */
-- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
-+#ifdef CONFIG_COMPAT_VDSO
-+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
-+#else
-+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
-+#endif
- #else /* Not PAE */
-
- page_pde_offset = (__PAGE_OFFSET >> 20);
-@@ -226,8 +297,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
- movl %eax, pa(max_pfn_mapped)
-
- /* Do early initialization of the fixmap area */
-- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-- movl %eax,pa(initial_page_table+0xffc)
-+#ifdef CONFIG_COMPAT_VDSO
-+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
-+#else
-+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
-+#endif
- #endif
-
- #ifdef CONFIG_PARAVIRT
-@@ -241,9 +315,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
- cmpl $num_subarch_entries, %eax
- jae bad_subarch
-
-- movl pa(subarch_entries)(,%eax,4), %eax
-- subl $__PAGE_OFFSET, %eax
-- jmp *%eax
-+ jmp *pa(subarch_entries)(,%eax,4)
-
- bad_subarch:
- WEAK(lguest_entry)
-@@ -255,10 +327,10 @@ WEAK(xen_entry)
- __INITDATA
-
- subarch_entries:
-- .long default_entry /* normal x86/PC */
-- .long lguest_entry /* lguest hypervisor */
-- .long xen_entry /* Xen hypervisor */
-- .long default_entry /* Moorestown MID */
-+ .long ta(default_entry) /* normal x86/PC */
-+ .long ta(lguest_entry) /* lguest hypervisor */
-+ .long ta(xen_entry) /* Xen hypervisor */
-+ .long ta(default_entry) /* Moorestown MID */
- num_subarch_entries = (. - subarch_entries) / 4
- .previous
- #else
-@@ -312,6 +384,7 @@ default_entry:
- orl %edx,%eax
- movl %eax,%cr4
-
-+#ifdef CONFIG_X86_PAE
- testb $X86_CR4_PAE, %al # check if PAE is enabled
- jz 6f
-
-@@ -340,6 +413,9 @@ default_entry:
- /* Make changes effective */
- wrmsr
-
-+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
-+#endif
-+
- 6:
-
- /*
-@@ -443,7 +519,7 @@ is386: movl $2,%ecx # set MP
- 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
- movl %eax,%ss # after changing gdt.
-
-- movl $(__USER_DS),%eax # DS/ES contains default USER segment
-+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
- movl %eax,%ds
- movl %eax,%es
-
-@@ -457,15 +533,22 @@ is386: movl $2,%ecx # set MP
- */
- cmpb $0,ready
- jne 1f
-- movl $gdt_page,%eax
-+ movl $cpu_gdt_table,%eax
- movl $stack_canary,%ecx
-+#ifdef CONFIG_SMP
-+ addl $__per_cpu_load,%ecx
-+#endif
- movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
- shrl $16, %ecx
- movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
- movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
- 1:
--#endif
- movl $(__KERNEL_STACK_CANARY),%eax
-+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
-+ movl $(__USER_DS),%eax
-+#else
-+ xorl %eax,%eax
-+#endif
- movl %eax,%gs
-
- xorl %eax,%eax # Clear LDT
-@@ -558,22 +641,22 @@ early_page_fault:
- jmp early_fault
-
- early_fault:
-- cld
- #ifdef CONFIG_PRINTK
-+ cmpl $1,%ss:early_recursion_flag
-+ je hlt_loop
-+ incl %ss:early_recursion_flag
-+ cld
- pusha
- movl $(__KERNEL_DS),%eax
- movl %eax,%ds
- movl %eax,%es
-- cmpl $2,early_recursion_flag
-- je hlt_loop
-- incl early_recursion_flag
- movl %cr2,%eax
- pushl %eax
- pushl %edx /* trapno */
- pushl $fault_msg
- call printk
-+; call dump_stack
- #endif
-- call dump_stack
- hlt_loop:
- hlt
- jmp hlt_loop
-@@ -581,8 +664,11 @@ hlt_loop:
- /* This is the default interrupt "handler" :-) */
- ALIGN
- ignore_int:
-- cld
- #ifdef CONFIG_PRINTK
-+ cmpl $2,%ss:early_recursion_flag
-+ je hlt_loop
-+ incl %ss:early_recursion_flag
-+ cld
- pushl %eax
- pushl %ecx
- pushl %edx
-@@ -591,9 +677,6 @@ ignore_int:
- movl $(__KERNEL_DS),%eax
- movl %eax,%ds
- movl %eax,%es
-- cmpl $2,early_recursion_flag
-- je hlt_loop
-- incl early_recursion_flag
- pushl 16(%esp)
- pushl 24(%esp)
- pushl 32(%esp)
-@@ -622,29 +705,43 @@ ENTRY(initial_code)
- /*
- * BSS section
- */
--__PAGE_ALIGNED_BSS
-- .align PAGE_SIZE
- #ifdef CONFIG_X86_PAE
-+.section .initial_pg_pmd,"a",@progbits
- initial_pg_pmd:
- .fill 1024*KPMDS,4,0
- #else
-+.section .initial_page_table,"a",@progbits
- ENTRY(initial_page_table)
- .fill 1024,4,0
- #endif
-+.section .initial_pg_fixmap,"a",@progbits
- initial_pg_fixmap:
- .fill 1024,4,0
-+.section .empty_zero_page,"a",@progbits
- ENTRY(empty_zero_page)
- .fill 4096,1,0
-+.section .swapper_pg_dir,"a",@progbits
- ENTRY(swapper_pg_dir)
-+#ifdef CONFIG_X86_PAE
-+ .fill 4,8,0
-+#else
- .fill 1024,4,0
-+#endif
-+
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+.section .idt,"a",@progbits
-+ENTRY(idt_table)
-+ .fill 256,8,0
-
- /*
- * This starts the data section.
- */
- #ifdef CONFIG_X86_PAE
--__PAGE_ALIGNED_DATA
-- /* Page-aligned for the benefit of paravirt? */
-- .align PAGE_SIZE
-+.section .initial_page_table,"a",@progbits
- ENTRY(initial_page_table)
- .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
- # if KPMDS == 3
-@@ -663,18 +760,27 @@ ENTRY(initial_page_table)
- # error "Kernel PMDs should be 1, 2 or 3"
- # endif
- .align PAGE_SIZE /* needs to be page-sized too */
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ENTRY(cpu_pgd)
-+ .rept NR_CPUS
-+ .fill 4,8,0
-+ .endr
-+#endif
-+
- #endif
-
- .data
- .balign 4
- ENTRY(stack_start)
-- .long init_thread_union+THREAD_SIZE
-+ .long init_thread_union+THREAD_SIZE-8
-
-+ready: .byte 0
-+
-+.section .rodata,"a",@progbits
- early_recursion_flag:
- .long 0
-
--ready: .byte 0
--
- int_msg:
- .asciz "Unknown interrupt or fault at: %p %p %p\n"
-
-@@ -707,7 +813,7 @@ fault_msg:
- .word 0 # 32 bit align gdt_desc.address
- boot_gdt_descr:
- .word __BOOT_DS+7
-- .long boot_gdt - __PAGE_OFFSET
-+ .long pa(boot_gdt)
-
- .word 0 # 32-bit align idt_desc.address
- idt_descr:
-@@ -718,7 +824,7 @@ idt_descr:
- .word 0 # 32 bit align gdt_desc.address
- ENTRY(early_gdt_descr)
- .word GDT_ENTRIES*8-1
-- .long gdt_page /* Overwritten for secondary CPUs */
-+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
-
- /*
- * The boot_gdt must mirror the equivalent in setup.S and is
-@@ -727,5 +833,65 @@ ENTRY(early_gdt_descr)
- .align L1_CACHE_BYTES
- ENTRY(boot_gdt)
- .fill GDT_ENTRY_BOOT_CS,8,0
-- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
-- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
-+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
-+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
-+
-+ .align PAGE_SIZE_asm
-+ENTRY(cpu_gdt_table)
-+ .rept NR_CPUS
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+ .quad 0x0000000000000000 /* 0x0b reserved */
-+ .quad 0x0000000000000000 /* 0x13 reserved */
-+ .quad 0x0000000000000000 /* 0x1b reserved */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
-+#else
-+ .quad 0x0000000000000000 /* 0x20 unused */
-+#endif
-+
-+ .quad 0x0000000000000000 /* 0x28 unused */
-+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
-+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
-+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
-+ .quad 0x0000000000000000 /* 0x4b reserved */
-+ .quad 0x0000000000000000 /* 0x53 reserved */
-+ .quad 0x0000000000000000 /* 0x5b reserved */
-+
-+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
-+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
-+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
-+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
-+
-+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
-+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
-+
-+ /*
-+ * Segments used for calling PnP BIOS have byte granularity.
-+ * The code segments and data segments have fixed 64k limits,
-+ * the transfer segment sizes are set at run time.
-+ */
-+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
-+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
-+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
-+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
-+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
-+
-+ /*
-+ * The APM segments have byte granularity and their bases
-+ * are set at run time. All have 64k limits.
-+ */
-+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
-+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
-+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
-+
-+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
-+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
-+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
-+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
-+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
-+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
-+
-+ /* Be sure this is zeroed to avoid false validations in Xen */
-+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
-+ .endr
-diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index e11e394..3306b50 100644
---- a/arch/x86/kernel/head_64.S
-+++ b/arch/x86/kernel/head_64.S
-@@ -19,6 +19,8 @@
- #include <asm/cache.h>
- #include <asm/processor-flags.h>
- #include <asm/percpu.h>
-+#include <asm/cpufeature.h>
-+#include <asm/alternative-asm.h>
-
- #ifdef CONFIG_PARAVIRT
- #include <asm/asm-offsets.h>
-@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
- L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
- L4_START_KERNEL = pgd_index(__START_KERNEL_map)
- L3_START_KERNEL = pud_index(__START_KERNEL_map)
-+L4_VMALLOC_START = pgd_index(VMALLOC_START)
-+L3_VMALLOC_START = pud_index(VMALLOC_START)
-+L4_VMALLOC_END = pgd_index(VMALLOC_END)
-+L3_VMALLOC_END = pud_index(VMALLOC_END)
-+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
-+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
-
- .text
- __HEAD
-@@ -85,35 +93,25 @@ startup_64:
- */
- addq %rbp, init_level4_pgt + 0(%rip)
- addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
-+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
-+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
-+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
- addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
-
- addq %rbp, level3_ident_pgt + 0(%rip)
-+#ifndef CONFIG_XEN
-+ addq %rbp, level3_ident_pgt + 8(%rip)
-+#endif
-
-- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
-- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
-+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
-
-+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
-+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
-+
-+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
-+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
--
-- /* Add an Identity mapping if I am above 1G */
-- leaq _text(%rip), %rdi
-- andq $PMD_PAGE_MASK, %rdi
--
-- movq %rdi, %rax
-- shrq $PUD_SHIFT, %rax
-- andq $(PTRS_PER_PUD - 1), %rax
-- jz ident_complete
--
-- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
-- leaq level3_ident_pgt(%rip), %rbx
-- movq %rdx, 0(%rbx, %rax, 8)
--
-- movq %rdi, %rax
-- shrq $PMD_SHIFT, %rax
-- andq $(PTRS_PER_PMD - 1), %rax
-- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
-- leaq level2_spare_pgt(%rip), %rbx
-- movq %rdx, 0(%rbx, %rax, 8)
--ident_complete:
-+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
-
- /*
- * Fixup the kernel text+data virtual addresses. Note that
-@@ -160,8 +158,8 @@ ENTRY(secondary_startup_64)
- * after the boot processor executes this code.
- */
-
-- /* Enable PAE mode and PGE */
-- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
-+ /* Enable PAE mode and PSE/PGE */
-+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
- movq %rax, %cr4
-
- /* Setup early boot stage 4 level pagetables. */
-@@ -183,9 +181,21 @@ ENTRY(secondary_startup_64)
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_SCE, %eax /* Enable System Call */
-- btl $20,%edi /* No Execute supported? */
-+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
- jnc 1f
- btsl $_EFER_NX, %eax
-+ cmpb $0, init_level4_pgt_initialized(%rip)
-+ jne 1f
-+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
-+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
-+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
-+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
-+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
-+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
-+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
-+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
-+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
-+ movb $1, init_level4_pgt_initialized(%rip)
- 1: wrmsr /* Make changes effective */
-
- /* Setup cr0 */
-@@ -247,6 +257,7 @@ ENTRY(secondary_startup_64)
- * jump. In addition we need to ensure %cs is set so we make this
- * a far return.
- */
-+ pax_set_fptr_mask
- movq initial_code(%rip),%rax
- pushq $0 # fake return address to stop unwinder
- pushq $__KERNEL_CS # set correct cs
-@@ -262,14 +273,14 @@ ENTRY(secondary_startup_64)
- .quad INIT_PER_CPU_VAR(irq_stack_union)
-
- ENTRY(stack_start)
-- .quad init_thread_union+THREAD_SIZE-8
-+ .quad init_thread_union+THREAD_SIZE-16
- .word 0
- __FINITDATA
-
- bad_address:
- jmp bad_address
-
-- .section ".init.text","ax"
-+ __INIT
- #ifdef CONFIG_EARLY_PRINTK
- .globl early_idt_handlers
- early_idt_handlers:
-@@ -314,18 +325,23 @@ ENTRY(early_idt_handler)
- #endif /* EARLY_PRINTK */
- 1: hlt
- jmp 1b
-+ .previous
-
- #ifdef CONFIG_EARLY_PRINTK
-+ __INITDATA
- early_recursion_flag:
- .long 0
-+ .previous
-
-+ .section .rodata,"a",@progbits
- early_idt_msg:
- .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
- early_idt_ripmsg:
- .asciz "RIP %s\n"
-+ .previous
- #endif /* CONFIG_EARLY_PRINTK */
-- .previous
-
-+ .section .rodata,"a",@progbits
- #define NEXT_PAGE(name) \
- .balign PAGE_SIZE; \
- ENTRY(name)
-@@ -338,7 +354,6 @@ ENTRY(name)
- i = i + 1 ; \
- .endr
-
-- .data
- /*
- * This default setting generates an ident mapping at address 0x100000
- * and a mapping for the kernel that precisely maps virtual address
-@@ -349,13 +364,41 @@ NEXT_PAGE(init_level4_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
-+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
-+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
-+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
-+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
-+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .org init_level4_pgt + L4_START_KERNEL*8, 0
- /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+NEXT_PAGE(cpu_pgd)
-+ .rept NR_CPUS
-+ .fill 512,8,0
-+ .endr
-+#endif
-+
- NEXT_PAGE(level3_ident_pgt)
- .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-+#ifdef CONFIG_XEN
- .fill 511,8,0
-+#else
-+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
-+ .fill 510,8,0
-+#endif
-+
-+NEXT_PAGE(level3_vmalloc_start_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level3_vmalloc_end_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level3_vmemmap_pgt)
-+ .fill L3_VMEMMAP_START,8,0
-+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
- NEXT_PAGE(level3_kernel_pgt)
- .fill L3_START_KERNEL,8,0
-@@ -363,20 +406,29 @@ NEXT_PAGE(level3_kernel_pgt)
- .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-
-+NEXT_PAGE(level2_vmemmap_pgt)
-+ .fill 512,8,0
-+
- NEXT_PAGE(level2_fixmap_pgt)
-- .fill 506,8,0
-- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-- .fill 5,8,0
-+ .fill 504,8,0
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
-+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
-+ .fill 4,8,0
-
- NEXT_PAGE(level1_fixmap_pgt)
-+ .fill 3*512,8,0
-+
-+NEXT_PAGE(level1_vsyscall_pgt)
- .fill 512,8,0
-
--NEXT_PAGE(level2_ident_pgt)
-- /* Since I easily can, map the first 1G.
-+ /* Since I easily can, map the first 2G.
- * Don't set NX because code runs from these pages.
- */
-- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
-+NEXT_PAGE(level2_ident_pgt)
-+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
-
- NEXT_PAGE(level2_kernel_pgt)
- /*
-@@ -389,35 +441,59 @@ NEXT_PAGE(level2_kernel_pgt)
- * If you want to increase this then increase MODULES_VADDR
- * too.)
- */
-- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
-- KERNEL_IMAGE_SIZE/PMD_SIZE)
--
--NEXT_PAGE(level2_spare_pgt)
-- .fill 512, 8, 0
-+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
-
- #undef PMDS
- #undef NEXT_PAGE
-
-- .data
-+ .align PAGE_SIZE
-+ENTRY(cpu_gdt_table)
-+ .rept NR_CPUS
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
-+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
-+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
-+ .quad 0x00cffb000000ffff /* __USER32_CS */
-+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
-+ .quad 0x00affb000000ffff /* __USER_CS */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
-+#else
-+ .quad 0x0 /* unused */
-+#endif
-+
-+ .quad 0,0 /* TSS */
-+ .quad 0,0 /* LDT */
-+ .quad 0,0,0 /* three TLS descriptors */
-+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
-+ /* asm/segment.h:GDT_ENTRIES must match this */
-+
-+ /* zero the remaining page */
-+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
-+ .endr
-+
- .align 16
- .globl early_gdt_descr
- early_gdt_descr:
- .word GDT_ENTRIES*8-1
- early_gdt_descr_base:
-- .quad INIT_PER_CPU_VAR(gdt_page)
-+ .quad cpu_gdt_table
-
- ENTRY(phys_base)
- /* This must match the first entry in level2_kernel_pgt */
- .quad 0x0000000000000000
-
-+init_level4_pgt_initialized:
-+ .byte 0x00
-+
- #include "../../x86/xen/xen-head.S"
--
-- .section .bss, "aw", @nobits
-+
-+ .section .rodata,"a",@progbits
- .align L1_CACHE_BYTES
- ENTRY(idt_table)
-- .skip IDT_ENTRIES * 16
-+ .fill 512,8,0
-
-- __PAGE_ALIGNED_BSS
- .align PAGE_SIZE
- ENTRY(empty_zero_page)
- .skip PAGE_SIZE
-diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 9c3bd4a..e1d9b35 100644
---- a/arch/x86/kernel/i386_ksyms_32.c
-+++ b/arch/x86/kernel/i386_ksyms_32.c
-@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
- EXPORT_SYMBOL(cmpxchg8b_emu);
- #endif
-
-+EXPORT_SYMBOL_GPL(cpu_gdt_table);
-+
- /* Networking helper routines. */
- EXPORT_SYMBOL(csum_partial_copy_generic);
-+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
-+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
-
- EXPORT_SYMBOL(__get_user_1);
- EXPORT_SYMBOL(__get_user_2);
-@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
-
- EXPORT_SYMBOL(csum_partial);
- EXPORT_SYMBOL(empty_zero_page);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
-+#endif
-diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
-index 6104852..47826ae 100644
---- a/arch/x86/kernel/i8259.c
-+++ b/arch/x86/kernel/i8259.c
-@@ -111,7 +111,7 @@ static int i8259A_irq_pending(unsigned int irq)
- static void make_8259A_irq(unsigned int irq)
- {
- disable_irq_nosync(irq);
-- io_apic_irqs &= ~(1<<irq);
-+ io_apic_irqs &= ~(1UL<<irq);
- irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
- i8259A_chip.name);
- enable_irq(irq);
-@@ -210,7 +210,7 @@ spurious_8259A_irq:
- "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
-@@ -334,14 +334,16 @@ static void init_8259A(int auto_eoi)
- /* (slave's support for AEOI in flat mode is to be investigated) */
- outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
-
-+ pax_open_kernel();
- if (auto_eoi)
- /*
- * In AEOI mode we just have to mask the interrupt
- * when acking.
- */
-- i8259A_chip.irq_mask_ack = disable_8259A_irq;
-+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
- else
-- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
-+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
-+ pax_close_kernel();
-
- udelay(100); /* wait for 8259A to initialize */
-
-diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
-index 43e9ccf..44ccf6f 100644
---- a/arch/x86/kernel/init_task.c
-+++ b/arch/x86/kernel/init_task.c
-@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
--union thread_union init_thread_union __init_task_data =
-- { INIT_THREAD_INFO(init_task) };
-+union thread_union init_thread_union __init_task_data;
-
- /*
- * Initial task structure.
-@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
- * section. Since TSS's are completely CPU-local, we want them
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */
--DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
--
-+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
-+EXPORT_SYMBOL(init_tss);
-diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
-index a979b5b..1d6db75 100644
---- a/arch/x86/kernel/io_delay.c
-+++ b/arch/x86/kernel/io_delay.c
-@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
- * Quirk table for systems that misbehave (lock up, etc.) if port
- * 0x80 is used:
- */
--static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
-+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
- {
- .callback = dmi_io_delay_0xed_port,
- .ident = "Compaq Presario V6000",
-diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
-index 8c96897..060ff2d 100644
---- a/arch/x86/kernel/ioport.c
-+++ b/arch/x86/kernel/ioport.c
-@@ -6,6 +6,7 @@
- #include <linux/sched.h>
- #include <linux/kernel.h>
- #include <linux/capability.h>
-+#include <linux/security.h>
- #include <linux/errno.h>
- #include <linux/types.h>
- #include <linux/ioport.h>
-@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
- return -EINVAL;
- if (turn_on && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-+#ifdef CONFIG_GRKERNSEC_IO
-+ if (turn_on && grsec_disable_privio) {
-+ gr_handle_ioperm();
-+ return -ENODEV;
-+ }
-+#endif
-
- /*
- * If it's the first ioperm() call in this thread's lifetime, set the
-@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
- * because the ->io_bitmap_max value must match the bitmap
- * contents:
- */
-- tss = &per_cpu(init_tss, get_cpu());
-+ tss = init_tss + get_cpu();
-
- if (turn_on)
- bitmap_clear(t->io_bitmap_ptr, from, num);
-@@ -104,6 +111,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
- if (level > old) {
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-+#ifdef CONFIG_GRKERNSEC_IO
-+ if (grsec_disable_privio) {
-+ gr_handle_iopl();
-+ return -ENODEV;
-+ }
-+#endif
- }
- regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
- t->iopl = level << 12;
-diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
-index 687637b..3e626d9 100644
---- a/arch/x86/kernel/irq.c
-+++ b/arch/x86/kernel/irq.c
-@@ -18,7 +18,7 @@
- #include <asm/mce.h>
- #include <asm/hw_irq.h>
-
--atomic_t irq_err_count;
-+atomic_unchecked_t irq_err_count;
-
- /* Function pointer for generic interrupt vector handling */
- void (*x86_platform_ipi_callback)(void) = NULL;
-@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
- seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
- seq_printf(p, " Machine check polls\n");
- #endif
-- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
-+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
- #if defined(CONFIG_X86_IO_APIC)
-- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
-+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
- #endif
- return 0;
- }
-@@ -159,7 +159,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
-
- u64 arch_irq_stat(void)
- {
-- u64 sum = atomic_read(&irq_err_count);
-+ u64 sum = atomic_read_unchecked(&irq_err_count);
- return sum;
- }
-
-diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index 7209070..ada4d63 100644
---- a/arch/x86/kernel/irq_32.c
-+++ b/arch/x86/kernel/irq_32.c
-@@ -28,6 +28,9 @@ DEFINE_PER_CPU(struct pt_regs *, irq_regs);
- EXPORT_PER_CPU_SYMBOL(irq_regs);
-
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
-+
-+extern void gr_handle_kernel_exploit(void);
-+
- /* Debugging check for stack overflow: is there less than 1KB free? */
- static int check_stack_overflow(void)
- {
-@@ -36,13 +39,14 @@ static int check_stack_overflow(void)
- __asm__ __volatile__("andl %%esp,%0" :
- "=r" (sp) : "0" (THREAD_SIZE - 1));
-
-- return sp < (sizeof(struct thread_info) + STACK_WARN);
-+ return sp < STACK_WARN;
- }
-
- static void print_stack_overflow(void)
- {
- printk(KERN_WARNING "low stack detected by irq handler\n");
- dump_stack();
-+ gr_handle_kernel_exploit();
- }
-
- #else
-@@ -54,8 +58,8 @@ static inline void print_stack_overflow(void) { }
- * per-CPU IRQ handling contexts (thread information and stack)
- */
- union irq_ctx {
-- struct thread_info tinfo;
-- u32 stack[THREAD_SIZE/sizeof(u32)];
-+ unsigned long previous_esp;
-+ u32 stack[THREAD_SIZE/sizeof(u32)];
- } __attribute__((aligned(THREAD_SIZE)));
-
- static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
-@@ -75,10 +79,9 @@ static void call_on_stack(void *func, void *stack)
- static inline int
- execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
- {
-- union irq_ctx *curctx, *irqctx;
-+ union irq_ctx *irqctx;
- u32 *isp, arg1, arg2;
-
-- curctx = (union irq_ctx *) current_thread_info();
- irqctx = __this_cpu_read(hardirq_ctx);
-
- /*
-@@ -87,21 +90,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
- * handler) we can't do that and just have to keep using the
- * current stack (which is the irq stack already after all)
- */
-- if (unlikely(curctx == irqctx))
-+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
- return 0;
-
- /* build the stack frame on the IRQ stack */
-- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
-- irqctx->tinfo.task = curctx->tinfo.task;
-- irqctx->tinfo.previous_esp = current_stack_pointer;
-+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
-+ irqctx->previous_esp = current_stack_pointer;
-
-- /*
-- * Copy the softirq bits in preempt_count so that the
-- * softirq checks work in the hardirq context.
-- */
-- irqctx->tinfo.preempt_count =
-- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
-- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ __set_fs(MAKE_MM_SEG(0));
-+#endif
-
- if (unlikely(overflow))
- call_on_stack(print_stack_overflow, isp);
-@@ -113,6 +111,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
- : "0" (irq), "1" (desc), "2" (isp),
- "D" (desc->handle_irq)
- : "memory", "cc", "ecx");
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ __set_fs(current_thread_info()->addr_limit);
-+#endif
-+
- return 1;
- }
-
-@@ -121,29 +124,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
- */
- void __cpuinit irq_ctx_init(int cpu)
- {
-- union irq_ctx *irqctx;
--
- if (per_cpu(hardirq_ctx, cpu))
- return;
-
-- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
-- THREAD_FLAGS,
-- THREAD_ORDER));
-- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-- irqctx->tinfo.cpu = cpu;
-- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
-- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
--
-- per_cpu(hardirq_ctx, cpu) = irqctx;
--
-- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
-- THREAD_FLAGS,
-- THREAD_ORDER));
-- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-- irqctx->tinfo.cpu = cpu;
-- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
--
-- per_cpu(softirq_ctx, cpu) = irqctx;
-+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
-+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
-
- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
-@@ -152,7 +137,6 @@ void __cpuinit irq_ctx_init(int cpu)
- asmlinkage void do_softirq(void)
- {
- unsigned long flags;
-- struct thread_info *curctx;
- union irq_ctx *irqctx;
- u32 *isp;
-
-@@ -162,15 +146,22 @@ asmlinkage void do_softirq(void)
- local_irq_save(flags);
-
- if (local_softirq_pending()) {
-- curctx = current_thread_info();
- irqctx = __this_cpu_read(softirq_ctx);
-- irqctx->tinfo.task = curctx->task;
-- irqctx->tinfo.previous_esp = current_stack_pointer;
-+ irqctx->previous_esp = current_stack_pointer;
-
- /* build the stack frame on the softirq stack */
-- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
-+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ __set_fs(MAKE_MM_SEG(0));
-+#endif
-
- call_on_stack(__do_softirq, isp);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ __set_fs(current_thread_info()->addr_limit);
-+#endif
-+
- /*
- * Shouldn't happen, we returned above if in_interrupt():
- */
-diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
-index 69bca46..e38f147 100644
---- a/arch/x86/kernel/irq_64.c
-+++ b/arch/x86/kernel/irq_64.c
-@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
- DEFINE_PER_CPU(struct pt_regs *, irq_regs);
- EXPORT_PER_CPU_SYMBOL(irq_regs);
-
-+extern void gr_handle_kernel_exploit(void);
-+
- /*
- * Probabilistic stack overflow check:
- *
-@@ -38,16 +40,17 @@ static inline void stack_overflow_check(struct pt_regs *regs)
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
- u64 curbase = (u64)task_stack_page(current);
-
-- if (user_mode_vm(regs))
-+ if (user_mode(regs))
- return;
-
-- WARN_ONCE(regs->sp >= curbase &&
-- regs->sp <= curbase + THREAD_SIZE &&
-- regs->sp < curbase + sizeof(struct thread_info) +
-- sizeof(struct pt_regs) + 128,
--
-- "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-+ if (regs->sp >= curbase &&
-+ regs->sp <= curbase + THREAD_SIZE &&
-+ regs->sp < curbase + sizeof(struct thread_info) +
-+ sizeof(struct pt_regs) + 128) {
-+ WARN_ONCE(1, "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
- current->comm, curbase, regs->sp);
-+ gr_handle_kernel_exploit();
-+ }
- #endif
- }
-
-diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
-index 2f45c4c..3f51a0c 100644
---- a/arch/x86/kernel/kgdb.c
-+++ b/arch/x86/kernel/kgdb.c
-@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
- #ifdef CONFIG_X86_32
- switch (regno) {
- case GDB_SS:
-- if (!user_mode_vm(regs))
-+ if (!user_mode(regs))
- *(unsigned long *)mem = __KERNEL_DS;
- break;
- case GDB_SP:
-- if (!user_mode_vm(regs))
-+ if (!user_mode(regs))
- *(unsigned long *)mem = kernel_stack_pointer(regs);
- break;
- case GDB_GS:
-@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
- bp->attr.bp_addr = breakinfo[breakno].addr;
- bp->attr.bp_len = breakinfo[breakno].len;
- bp->attr.bp_type = breakinfo[breakno].type;
-- info->address = breakinfo[breakno].addr;
-+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
-+ info->address = ktla_ktva(breakinfo[breakno].addr);
-+ else
-+ info->address = breakinfo[breakno].addr;
- info->len = breakinfo[breakno].len;
- info->type = breakinfo[breakno].type;
- val = arch_install_hw_breakpoint(bp);
-@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
- case 'k':
- /* clear the trace bit */
- linux_regs->flags &= ~X86_EFLAGS_TF;
-- atomic_set(&kgdb_cpu_doing_single_step, -1);
-+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
-
- /* set the trace bit if we're stepping */
- if (remcomInBuffer[0] == 's') {
- linux_regs->flags |= X86_EFLAGS_TF;
-- atomic_set(&kgdb_cpu_doing_single_step,
-+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
- raw_smp_processor_id());
- }
-
-@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
-
- switch (cmd) {
- case DIE_DEBUG:
-- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
- if (user_mode(regs))
- return single_step_cont(regs, args);
- break;
-@@ -748,11 +751,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
- char opc[BREAK_INSTR_SIZE];
-
- bpt->type = BP_BREAKPOINT;
-- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
-+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-- err = probe_kernel_write((char *)bpt->bpt_addr,
-+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
- #ifdef CONFIG_DEBUG_RODATA
- if (!err)
-@@ -765,7 +768,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
- return -EBUSY;
- text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
-- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
-+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
- if (err)
- return err;
- if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
-@@ -790,13 +793,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
- if (mutex_is_locked(&text_mutex))
- goto knl_write;
- text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
-- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
-+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
- if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
- goto knl_write;
- return err;
- knl_write:
- #endif /* CONFIG_DEBUG_RODATA */
-- return probe_kernel_write((char *)bpt->bpt_addr,
-+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
- (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
- }
-
-diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
-index 083848f..69321f0 100644
---- a/arch/x86/kernel/kprobes.c
-+++ b/arch/x86/kernel/kprobes.c
-@@ -117,9 +117,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
- s32 raddr;
- } __attribute__((packed)) *insn;
-
-- insn = (struct __arch_relative_insn *)from;
-+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
-+
-+ pax_open_kernel();
- insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
- insn->op = op;
-+ pax_close_kernel();
- }
-
- /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
- kprobe_opcode_t opcode;
- kprobe_opcode_t *orig_opcodes = opcodes;
-
-- if (search_exception_tables((unsigned long)opcodes))
-+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
- return 0; /* Page fault may occur on this address. */
-
- retry:
-@@ -228,7 +231,7 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
- * for the first byte, we can recover the original instruction
- * from it and kp->opcode.
- */
-- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- buf[0] = kp->opcode;
- return 0;
- }
-@@ -264,7 +267,7 @@ static int __kprobes can_probe(unsigned long paddr)
- * recover it.
- */
- return 0;
-- kernel_insn_init(&insn, buf);
-+ kernel_insn_init(&insn, ktva_ktla(buf));
- }
- insn_get_length(&insn);
- addr += insn.length;
-@@ -313,11 +316,13 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
- (unsigned long)src);
- if (ret)
- return 0;
-- kernel_insn_init(&insn, buf);
-+ kernel_insn_init(&insn, ktva_ktla(buf));
- }
- }
- insn_get_length(&insn);
-+ pax_open_kernel();
- memcpy(dest, insn.kaddr, insn.length);
-+ pax_close_kernel();
-
- #ifdef CONFIG_X86_64
- if (insn_rip_relative(&insn)) {
-@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
- (u8 *) dest;
- BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
- disp = (u8 *) dest + insn_offset_displacement(&insn);
-+ pax_open_kernel();
- *(s32 *) disp = (s32) newdisp;
-+ pax_close_kernel();
- }
- #endif
- return insn.length;
-@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
- */
- __copy_instruction(p->ainsn.insn, p->addr, 0);
-
-- if (can_boost(p->addr))
-+ if (can_boost(ktla_ktva(p->addr)))
- p->ainsn.boostable = 0;
- else
- p->ainsn.boostable = -1;
-
-- p->opcode = *p->addr;
-+ p->opcode = *(ktla_ktva(p->addr));
- }
-
- int __kprobes arch_prepare_kprobe(struct kprobe *p)
-@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
- * nor set current_kprobe, because it doesn't use single
- * stepping.
- */
-- regs->ip = (unsigned long)p->ainsn.insn;
-+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
- preempt_enable_no_resched();
- return;
- }
-@@ -494,9 +501,9 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
- regs->flags &= ~X86_EFLAGS_IF;
- /* single step inline if the instruction is an int3 */
- if (p->opcode == BREAKPOINT_INSTRUCTION)
-- regs->ip = (unsigned long)p->addr;
-+ regs->ip = ktla_ktva((unsigned long)p->addr);
- else
-- regs->ip = (unsigned long)p->ainsn.insn;
-+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
- }
-
- /*
-@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
- setup_singlestep(p, regs, kcb, 0);
- return 1;
- }
-- } else if (*addr != BREAKPOINT_INSTRUCTION) {
-+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
-@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
- " movq %rax, 152(%rsp)\n"
- RESTORE_REGS_STRING
- " popfq\n"
-+#ifdef KERNEXEC_PLUGIN
-+ " btsq $63,(%rsp)\n"
-+#endif
- #else
- " pushf\n"
- SAVE_REGS_STRING
-@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs, struct kprobe_ctlblk *kcb)
- {
- unsigned long *tos = stack_addr(regs);
-- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
-+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
- unsigned long orig_ip = (unsigned long)p->addr;
- kprobe_opcode_t *insn = p->ainsn.insn;
-
-@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- struct die_args *args = data;
- int ret = NOTIFY_DONE;
-
-- if (args->regs && user_mode_vm(args->regs))
-+ if (args->regs && user_mode(args->regs))
- return ret;
-
- switch (val) {
-@@ -1130,6 +1140,7 @@ static void __kprobes synthesize_relcall(void *from, void *to)
- static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
- unsigned long val)
- {
-+ pax_open_kernel();
- #ifdef CONFIG_X86_64
- *addr++ = 0x48;
- *addr++ = 0xbf;
-@@ -1137,6 +1148,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
- *addr++ = 0xb8;
- #endif
- *(unsigned long *)addr = val;
-+ pax_close_kernel();
- }
-
- static void __used __kprobes kprobes_optinsn_template_holder(void)
-@@ -1317,7 +1329,7 @@ static int __kprobes can_optimize(unsigned long paddr)
- ret = recover_probed_instruction(buf, addr);
- if (ret)
- return 0;
-- kernel_insn_init(&insn, buf);
-+ kernel_insn_init(&insn, ktva_ktla(buf));
- }
- insn_get_length(&insn);
- /* Recover address */
-@@ -1394,7 +1406,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
- * Verify if the address gap is in 2GB range, because this uses
- * a relative jump.
- */
-- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
-+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
- if (abs(rel) > 0x7fffffff)
- return -ERANGE;
-
-@@ -1409,16 +1421,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
- op->optinsn.size = ret;
-
- /* Copy arch-dep-instance from template */
-- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
-+ pax_open_kernel();
-+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
-+ pax_close_kernel();
-
- /* Set probe information */
- synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
-
- /* Set probe function call */
-- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
-+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
-
- /* Set returning jmp instruction at the tail of out-of-line buffer */
-- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
-+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
- (u8 *)op->kp.addr + op->optinsn.size);
-
- flush_icache_range((unsigned long) buf,
-@@ -1441,7 +1455,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
- ((long)op->kp.addr + RELATIVEJUMP_SIZE));
-
- /* Backup instructions which will be replaced by jump address */
-- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
-+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
- RELATIVE_ADDR_SIZE);
-
- insn_buf[0] = RELATIVEJUMP_OPCODE;
-@@ -1540,7 +1554,7 @@ static int __kprobes setup_detour_execution(struct kprobe *p,
- /* This kprobe is really able to run optimized path. */
- op = container_of(p, struct optimized_kprobe, kp);
- /* Detour through copied instructions */
-- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
-+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
- if (!reenter)
- reset_current_kprobe();
- preempt_enable_no_resched();
-diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
-index 4b6701e..1a3dcdb 100644
---- a/arch/x86/kernel/kvm.c
-+++ b/arch/x86/kernel/kvm.c
-@@ -444,6 +444,7 @@ static void __init paravirt_ops_setup(void)
- pv_mmu_ops.set_pud = kvm_set_pud;
- #if PAGETABLE_LEVELS == 4
- pv_mmu_ops.set_pgd = kvm_set_pgd;
-+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
- #endif
- #endif
- pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
-@@ -586,7 +587,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
-+static struct notifier_block kvm_cpu_notifier = {
- .notifier_call = kvm_cpu_notify,
- };
- #endif
-diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index 0a8e65e..563640b 100644
---- a/arch/x86/kernel/ldt.c
-+++ b/arch/x86/kernel/ldt.c
-@@ -11,6 +11,7 @@
- #include <linux/sched.h>
- #include <linux/string.h>
- #include <linux/mm.h>
-+#include <linux/ratelimit.h>
- #include <linux/smp.h>
- #include <linux/vmalloc.h>
- #include <linux/uaccess.h>
-@@ -21,6 +22,14 @@
- #include <asm/mmu_context.h>
- #include <asm/syscalls.h>
-
-+#ifdef CONFIG_GRKERNSEC
-+int sysctl_modify_ldt __read_only = 0;
-+#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL)
-+int sysctl_modify_ldt __read_only = 1;
-+#else
-+int sysctl_modify_ldt __read_only = 0;
-+#endif
-+
- #ifdef CONFIG_SMP
- static void flush_ldt(void *current_mm)
- {
-@@ -67,13 +76,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
- if (reload) {
- #ifdef CONFIG_SMP
- preempt_disable();
-- load_LDT(pc);
-+ load_LDT_nolock(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
- #else
-- load_LDT(pc);
-+ load_LDT_nolock(pc);
- #endif
- }
- if (oldsize) {
-@@ -95,7 +104,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
- return err;
-
- for (i = 0; i < old->size; i++)
-- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
-+ write_ldt_entry(new->ldt, i, old->ldt + i);
- return 0;
- }
-
-@@ -116,6 +125,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
- }
-+
-+ if (tsk == current) {
-+ mm->context.vdso = 0;
-+
-+#ifdef CONFIG_X86_32
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ mm->context.user_cs_base = 0UL;
-+ mm->context.user_cs_limit = ~0UL;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+ cpumask_clear(&mm->context.cpu_user_cs_mask);
-+#endif
-+
-+#endif
-+#endif
-+
-+ }
-+
- return retval;
- }
-
-@@ -230,6 +257,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
- }
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
-+ error = -EINVAL;
-+ goto out_unlock;
-+ }
-+#endif
-+
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
- goto out_unlock;
-@@ -255,6 +289,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
- {
- int ret = -ENOSYS;
-
-+ if (!sysctl_modify_ldt) {
-+ printk_ratelimited(KERN_INFO
-+ "Denied a call to modify_ldt() from %s[%d] (uid: %d)."
-+ " Adjust sysctl if this was not an exploit attempt.\n",
-+ current->comm, task_pid_nr(current), current_uid());
-+ return ret;
-+ }
-+
- switch (func) {
- case 0:
- ret = read_ldt(ptr, bytecount);
-diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
-index a3fa43b..8966f4c 100644
---- a/arch/x86/kernel/machine_kexec_32.c
-+++ b/arch/x86/kernel/machine_kexec_32.c
-@@ -27,7 +27,7 @@
- #include <asm/cacheflush.h>
- #include <asm/debugreg.h>
-
--static void set_idt(void *newidt, __u16 limit)
-+static void set_idt(struct desc_struct *newidt, __u16 limit)
- {
- struct desc_ptr curidt;
-
-@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
- }
-
-
--static void set_gdt(void *newgdt, __u16 limit)
-+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
- {
- struct desc_ptr curgdt;
-
-@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
- }
-
- control_page = page_address(image->control_code_page);
-- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
-+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
-
- relocate_kernel_ptr = control_page;
- page_list[PA_CONTROL_PAGE] = __pa(control_page);
-diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
-index 29c95d7..97b7b1b 100644
---- a/arch/x86/kernel/microcode_core.c
-+++ b/arch/x86/kernel/microcode_core.c
-@@ -507,7 +507,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata mc_cpu_notifier = {
-+static struct notifier_block mc_cpu_notifier = {
- .notifier_call = mc_cpu_callback,
- };
-
-diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
-index 3ca42d0..7cff8cc 100644
---- a/arch/x86/kernel/microcode_intel.c
-+++ b/arch/x86/kernel/microcode_intel.c
-@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
-
- static int get_ucode_user(void *to, const void *from, size_t n)
- {
-- return copy_from_user(to, from, n);
-+ return copy_from_user(to, (const void __force_user *)from, n);
- }
-
- static enum ucode_state
- request_microcode_user(int cpu, const void __user *buf, size_t size)
- {
-- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
-+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
- }
-
- static void microcode_fini_cpu(int cpu)
-diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
-index 925179f..b151b74 100644
---- a/arch/x86/kernel/module.c
-+++ b/arch/x86/kernel/module.c
-@@ -36,15 +36,62 @@
- #define DEBUGP(fmt...)
- #endif
-
--void *module_alloc(unsigned long size)
-+static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
-+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
- {
-- if (PAGE_ALIGN(size) > MODULES_LEN)
-+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
- return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
- -1, __builtin_return_address(0));
- }
-
-+void *module_alloc(unsigned long size)
-+{
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ return __module_alloc(size, PAGE_KERNEL);
-+#else
-+ return __module_alloc(size, PAGE_KERNEL_EXEC);
-+#endif
-+
-+}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+void *module_alloc_exec(unsigned long size) __size_overflow(1);
-+void *module_alloc_exec(unsigned long size)
-+{
-+ struct vm_struct *area;
-+
-+ if (size == 0)
-+ return NULL;
-+
-+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
-+ return area ? area->addr : NULL;
-+}
-+EXPORT_SYMBOL(module_alloc_exec);
-+
-+void module_free_exec(struct module *mod, void *module_region)
-+{
-+ vunmap(module_region);
-+}
-+EXPORT_SYMBOL(module_free_exec);
-+#else
-+void module_free_exec(struct module *mod, void *module_region)
-+{
-+ module_free(mod, module_region);
-+}
-+EXPORT_SYMBOL(module_free_exec);
-+
-+void *module_alloc_exec(unsigned long size)
-+{
-+ return __module_alloc(size, PAGE_KERNEL_RX);
-+}
-+EXPORT_SYMBOL(module_alloc_exec);
-+#endif
-+#endif
-+
- #ifdef CONFIG_X86_32
- int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
-@@ -55,14 +102,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
- unsigned int i;
- Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
- Elf32_Sym *sym;
-- uint32_t *location;
-+ uint32_t *plocation, location;
-
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
-- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-- + rel[i].r_offset;
-+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
-+ location = (uint32_t)plocation;
-+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
-+ plocation = ktla_ktva((void *)plocation);
- /* This is the symbol it is referring to. Note that all
- undefined symbols have been resolved. */
- sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
-@@ -71,11 +120,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
- switch (ELF32_R_TYPE(rel[i].r_info)) {
- case R_386_32:
- /* We add the value into the location given */
-- *location += sym->st_value;
-+ pax_open_kernel();
-+ *plocation += sym->st_value;
-+ pax_close_kernel();
- break;
- case R_386_PC32:
- /* Add the value, subtract its postition */
-- *location += sym->st_value - (uint32_t)location;
-+ pax_open_kernel();
-+ *plocation += sym->st_value - location;
-+ pax_close_kernel();
- break;
- default:
- printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-@@ -120,21 +173,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
- case R_X86_64_NONE:
- break;
- case R_X86_64_64:
-+ pax_open_kernel();
- *(u64 *)loc = val;
-+ pax_close_kernel();
- break;
- case R_X86_64_32:
-+ pax_open_kernel();
- *(u32 *)loc = val;
-+ pax_close_kernel();
- if (val != *(u32 *)loc)
- goto overflow;
- break;
- case R_X86_64_32S:
-+ pax_open_kernel();
- *(s32 *)loc = val;
-+ pax_close_kernel();
- if ((s64)val != *(s32 *)loc)
- goto overflow;
- break;
- case R_X86_64_PC32:
- val -= (u64)loc;
-+ pax_open_kernel();
- *(u32 *)loc = val;
-+ pax_close_kernel();
-+
- #if 0
- if ((s64)val != *(s32 *)loc)
- goto overflow;
-diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
-index f7d1a64..28afc4a 100644
---- a/arch/x86/kernel/msr.c
-+++ b/arch/x86/kernel/msr.c
-@@ -37,6 +37,7 @@
- #include <linux/notifier.h>
- #include <linux/uaccess.h>
- #include <linux/gfp.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/processor.h>
- #include <asm/msr.h>
-@@ -104,6 +105,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
- int err = 0;
- ssize_t bytes = 0;
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ gr_handle_msr_write();
-+ return -EPERM;
-+#endif
-+
- if (count % 8)
- return -EINVAL; /* Invalid chunk size */
-
-@@ -151,6 +157,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
- err = -EBADF;
- break;
- }
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ gr_handle_msr_write();
-+ return -EPERM;
-+#endif
- if (copy_from_user(&regs, uregs, sizeof regs)) {
- err = -EFAULT;
- break;
-@@ -235,7 +245,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __refdata msr_class_cpu_notifier = {
-+static struct notifier_block msr_class_cpu_notifier = {
- .notifier_call = msr_class_cpu_callback,
- };
-
-diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
-index e88f37b..45bb4ff 100644
---- a/arch/x86/kernel/nmi.c
-+++ b/arch/x86/kernel/nmi.c
-@@ -126,9 +126,9 @@ static int __setup_nmi(unsigned int type, struct nmiaction *action)
- * event confuses some handlers (kdump uses this flag)
- */
- if (action->flags & NMI_FLAG_FIRST)
-- list_add_rcu(&action->list, &desc->head);
-+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
- else
-- list_add_tail_rcu(&action->list, &desc->head);
-+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
-
- spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
-@@ -150,7 +150,7 @@ static struct nmiaction *__free_nmi(unsigned int type, const char *name)
- if (!strcmp(n->name, name)) {
- WARN(in_nmi(),
- "Trying to free NMI (%s) from NMI context!\n", n->name);
-- list_del_rcu(&n->list);
-+ pax_list_del_rcu((struct list_head *)&n->list);
- break;
- }
- }
-@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
- dotraplinkage notrace __kprobes void
- do_nmi(struct pt_regs *regs, long error_code)
- {
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (!user_mode(regs)) {
-+ unsigned long cs = regs->cs & 0xFFFF;
-+ unsigned long ip = ktva_ktla(regs->ip);
-+
-+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
-+ regs->ip = ip;
-+ }
-+#endif
-+
- nmi_enter();
-
- inc_irq_stat(__nmi_count);
-diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
-index 676b8c7..870ba04 100644
---- a/arch/x86/kernel/paravirt-spinlocks.c
-+++ b/arch/x86/kernel/paravirt-spinlocks.c
-@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
- arch_spin_lock(lock);
- }
-
--struct pv_lock_ops pv_lock_ops = {
-+struct pv_lock_ops pv_lock_ops __read_only = {
- #ifdef CONFIG_SMP
- .spin_is_locked = __ticket_spin_is_locked,
- .spin_is_contended = __ticket_spin_is_contended,
-diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 84c938f..fa25421 100644
---- a/arch/x86/kernel/paravirt.c
-+++ b/arch/x86/kernel/paravirt.c
-@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
- {
- return x;
- }
-+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
-+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
-+#endif
-
- void __init default_banner(void)
- {
-@@ -144,16 +147,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
-
- if (opfunc == NULL)
- /* If there's no function, patch it with a ud2a (BUG) */
-- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-- else if (opfunc == _paravirt_nop)
-+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
-+ else if (opfunc == (void *)_paravirt_nop)
- /* If the operation is a nop, then nop the callsite */
- ret = paravirt_patch_nop();
-
- /* identity functions just return their single argument */
-- else if (opfunc == _paravirt_ident_32)
-+ else if (opfunc == (void *)_paravirt_ident_32)
- ret = paravirt_patch_ident_32(insnbuf, len);
-- else if (opfunc == _paravirt_ident_64)
-+ else if (opfunc == (void *)_paravirt_ident_64)
- ret = paravirt_patch_ident_64(insnbuf, len);
-+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
-+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
-+ ret = paravirt_patch_ident_64(insnbuf, len);
-+#endif
-
- else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
- type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
-@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
- if (insn_len > len || start == NULL)
- insn_len = len;
- else
-- memcpy(insnbuf, start, insn_len);
-+ memcpy(insnbuf, ktla_ktva(start), insn_len);
-
- return insn_len;
- }
-@@ -302,7 +309,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
- return percpu_read(paravirt_lazy_mode);
- }
-
--struct pv_info pv_info = {
-+struct pv_info pv_info __read_only = {
- .name = "bare hardware",
- .paravirt_enabled = 0,
- .kernel_rpl = 0,
-@@ -313,16 +320,16 @@ struct pv_info pv_info = {
- #endif
- };
-
--struct pv_init_ops pv_init_ops = {
-+struct pv_init_ops pv_init_ops __read_only = {
- .patch = native_patch,
- };
-
--struct pv_time_ops pv_time_ops = {
-+struct pv_time_ops pv_time_ops __read_only = {
- .sched_clock = native_sched_clock,
- .steal_clock = native_steal_clock,
- };
-
--struct pv_irq_ops pv_irq_ops = {
-+struct pv_irq_ops pv_irq_ops __read_only = {
- .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
- .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
- .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
-@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
- #endif
- };
-
--struct pv_cpu_ops pv_cpu_ops = {
-+struct pv_cpu_ops pv_cpu_ops __read_only = {
- .cpuid = native_cpuid,
- .get_debugreg = native_get_debugreg,
- .set_debugreg = native_set_debugreg,
-@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
- .end_context_switch = paravirt_nop,
- };
-
--struct pv_apic_ops pv_apic_ops = {
-+struct pv_apic_ops pv_apic_ops __read_only= {
- #ifdef CONFIG_X86_LOCAL_APIC
- .startup_ipi_hook = paravirt_nop,
- #endif
- };
-
--#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
-+#ifdef CONFIG_X86_32
-+#ifdef CONFIG_X86_PAE
-+/* 64-bit pagetable entries */
-+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
-+#else
- /* 32-bit pagetable entries */
- #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
-+#endif
- #else
- /* 64-bit pagetable entries */
- #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
- #endif
-
--struct pv_mmu_ops pv_mmu_ops = {
-+struct pv_mmu_ops pv_mmu_ops __read_only = {
-
- .read_cr2 = native_read_cr2,
- .write_cr2 = native_write_cr2,
-@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
- .make_pud = PTE_IDENT,
-
- .set_pgd = native_set_pgd,
-+ .set_pgd_batched = native_set_pgd_batched,
- #endif
- #endif /* PAGETABLE_LEVELS >= 3 */
-
-@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
- },
-
- .set_fixmap = native_set_fixmap,
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ .pax_open_kernel = native_pax_open_kernel,
-+ .pax_close_kernel = native_pax_close_kernel,
-+#endif
-+
- };
-
- EXPORT_SYMBOL_GPL(pv_time_ops);
-diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
-index a1da673..2c72d5b 100644
---- a/arch/x86/kernel/paravirt_patch_64.c
-+++ b/arch/x86/kernel/paravirt_patch_64.c
-@@ -9,7 +9,9 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
- DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
- DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
- DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-+#ifndef CONFIG_PAX_MEMORY_UDEREF
- DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
-+#endif
- DEF_NATIVE(pv_cpu_ops, clts, "clts");
- DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
-
-@@ -57,7 +59,9 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- PATCH_SITE(pv_mmu_ops, read_cr3);
- PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, clts);
-+#ifndef CONFIG_PAX_MEMORY_UDEREF
- PATCH_SITE(pv_mmu_ops, flush_tlb_single);
-+#endif
- PATCH_SITE(pv_cpu_ops, wbinvd);
-
- patch_site:
-diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
-index 726494b..5d942a3 100644
---- a/arch/x86/kernel/pci-calgary_64.c
-+++ b/arch/x86/kernel/pci-calgary_64.c
-@@ -1341,7 +1341,7 @@ static void __init get_tce_space_from_tar(void)
- tce_space = be64_to_cpu(readq(target));
- tce_space = tce_space & TAR_SW_BITS;
-
-- tce_space = tce_space & (~specified_table_size);
-+ tce_space = tce_space & (~(unsigned long)specified_table_size);
- info->tce_space = (u64 *)__va(tce_space);
- }
- }
-diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
-index 35ccf75..7a15747 100644
---- a/arch/x86/kernel/pci-iommu_table.c
-+++ b/arch/x86/kernel/pci-iommu_table.c
-@@ -2,7 +2,7 @@
- #include <asm/iommu_table.h>
- #include <linux/string.h>
- #include <linux/kallsyms.h>
--
-+#include <linux/sched.h>
-
- #define DEBUG 1
-
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 59b9b37..f02ee42 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
-
- void free_thread_info(struct thread_info *ti)
- {
-- free_thread_xstate(ti->task);
- free_pages((unsigned long)ti, THREAD_ORDER);
- }
-
-+static struct kmem_cache *task_struct_cachep;
-+
- void arch_task_cache_init(void)
- {
-- task_xstate_cachep =
-- kmem_cache_create("task_xstate", xstate_size,
-+ /* create a slab on which task_structs can be allocated */
-+ task_struct_cachep =
-+ kmem_cache_create("task_struct", sizeof(struct task_struct),
-+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
-+
-+ task_xstate_cachep =
-+ kmem_cache_create("task_xstate", xstate_size,
- __alignof__(union thread_xstate),
-- SLAB_PANIC | SLAB_NOTRACK, NULL);
-+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
-+}
-+
-+struct task_struct *alloc_task_struct_node(int node)
-+{
-+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
-+}
-+
-+void free_task_struct(struct task_struct *task)
-+{
-+ free_thread_xstate(task);
-+ kmem_cache_free(task_struct_cachep, task);
- }
-
- /*
-@@ -70,7 +87,7 @@ void exit_thread(void)
- unsigned long *bp = t->io_bitmap_ptr;
-
- if (bp) {
-- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+ struct tss_struct *tss = init_tss + get_cpu();
-
- t->io_bitmap_ptr = NULL;
- clear_thread_flag(TIF_IO_BITMAP);
-@@ -106,7 +123,7 @@ void show_regs_common(void)
-
- printk(KERN_CONT "\n");
- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
-- current->pid, current->comm, print_tainted(),
-+ task_pid_nr(current), current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
-@@ -120,6 +137,9 @@ void flush_thread(void)
- {
- struct task_struct *tsk = current;
-
-+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
-+ loadsegment(gs, 0);
-+#endif
- flush_ptrace_hw_breakpoint(tsk);
- memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
- /*
-@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
- regs.di = (unsigned long) arg;
-
- #ifdef CONFIG_X86_32
-- regs.ds = __USER_DS;
-- regs.es = __USER_DS;
-+ regs.ds = __KERNEL_DS;
-+ regs.es = __KERNEL_DS;
- regs.fs = __KERNEL_PERCPU;
-- regs.gs = __KERNEL_STACK_CANARY;
-+ savesegment(gs, regs.gs);
- #else
- regs.ss = __KERNEL_DS;
- #endif
-@@ -387,7 +407,7 @@ bool set_pm_idle_to_default(void)
-
- return ret;
- }
--void stop_this_cpu(void *dummy)
-+__noreturn void stop_this_cpu(void *dummy)
- {
- local_irq_disable();
- /*
-@@ -629,16 +649,37 @@ static int __init idle_setup(char *str)
- }
- early_param("idle", idle_setup);
-
--unsigned long arch_align_stack(unsigned long sp)
-+#ifdef CONFIG_PAX_RANDKSTACK
-+void pax_randomize_kstack(struct pt_regs *regs)
- {
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() % 8192;
-- return sp & ~0xf;
--}
-+ struct thread_struct *thread = &current->thread;
-+ unsigned long time;
-
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long range_end = mm->brk + 0x02000000;
-- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
--}
-+ if (!randomize_va_space)
-+ return;
-+
-+ if (v8086_mode(regs))
-+ return;
-
-+ rdtscl(time);
-+
-+ /* P4 seems to return a 0 LSB, ignore it */
-+#ifdef CONFIG_MPENTIUM4
-+ time &= 0x3EUL;
-+ time <<= 2;
-+#elif defined(CONFIG_X86_64)
-+ time &= 0xFUL;
-+ time <<= 4;
-+#else
-+ time &= 0x1FUL;
-+ time <<= 3;
-+#endif
-+
-+ thread->sp0 ^= time;
-+ load_sp0(init_tss + smp_processor_id(), thread);
-+
-+#ifdef CONFIG_X86_64
-+ percpu_write(kernel_stack, thread->sp0);
-+#endif
-+}
-+#endif
-diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 8598296..3fd3443 100644
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
- unsigned long thread_saved_pc(struct task_struct *tsk)
- {
- return ((unsigned long *)tsk->thread.sp)[3];
-+//XXX return tsk->thread.eip;
- }
-
- #ifndef CONFIG_SMP
-@@ -130,21 +131,20 @@ void __show_regs(struct pt_regs *regs, int all)
- unsigned long sp;
- unsigned short ss, gs;
-
-- if (user_mode_vm(regs)) {
-+ if (user_mode(regs)) {
- sp = regs->sp;
- ss = regs->ss & 0xffff;
-- gs = get_user_gs(regs);
- } else {
- sp = kernel_stack_pointer(regs);
- savesegment(ss, ss);
-- savesegment(gs, gs);
- }
-+ gs = get_user_gs(regs);
-
- show_regs_common();
-
- printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
- (u16)regs->cs, regs->ip, regs->flags,
-- smp_processor_id());
-+ raw_smp_processor_id());
- print_symbol("EIP is at %s\n", regs->ip);
-
- printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
- struct task_struct *tsk;
- int err;
-
-- childregs = task_pt_regs(p);
-+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
- *childregs = *regs;
- childregs->ax = 0;
- childregs->sp = sp;
-
- p->thread.sp = (unsigned long) childregs;
- p->thread.sp0 = (unsigned long) (childregs+1);
-+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
-
- p->thread.ip = (unsigned long) ret_from_fork;
-
-@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct thread_struct *prev = &prev_p->thread,
- *next = &next_p->thread;
- int cpu = smp_processor_id();
-- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+ struct tss_struct *tss = init_tss + cpu;
- fpu_switch_t fpu;
-
- /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-@@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- */
- lazy_save_gs(prev->gs);
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ __set_fs(task_thread_info(next_p)->addr_limit);
-+#endif
-+
- /*
- * Load the per-thread Thread-Local Storage descriptor.
- */
-@@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- */
- arch_end_context_switch(next_p);
-
-+ percpu_write(current_task, next_p);
-+ percpu_write(current_tinfo, &next_p->tinfo);
-+
- /*
- * Restore %gs if needed (which is common)
- */
-@@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-
- switch_fpu_finish(next_p, fpu);
-
-- percpu_write(current_task, next_p);
--
- return prev_p;
- }
-
-@@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
- } while (count++ < 16);
- return 0;
- }
--
-diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index e361095..4882b55 100644
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -89,7 +89,7 @@ static void __exit_idle(void)
- void exit_idle(void)
- {
- /* idle loop has pid 0 */
-- if (current->pid)
-+ if (task_pid_nr(current))
- return;
- __exit_idle();
- }
-@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
- struct pt_regs *childregs;
- struct task_struct *me = current;
-
-- childregs = ((struct pt_regs *)
-- (THREAD_SIZE + task_stack_page(p))) - 1;
-+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
- *childregs = *regs;
-
- childregs->ax = 0;
-@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
- p->thread.sp = (unsigned long) childregs;
- p->thread.sp0 = (unsigned long) (childregs+1);
- p->thread.usersp = me->thread.usersp;
-+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
-
- set_tsk_thread_flag(p, TIF_FORK);
-
-@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct thread_struct *prev = &prev_p->thread;
- struct thread_struct *next = &next_p->thread;
- int cpu = smp_processor_id();
-- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+ struct tss_struct *tss = init_tss + cpu;
- unsigned fsindex, gsindex;
- fpu_switch_t fpu;
-
-@@ -506,10 +506,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- prev->usersp = percpu_read(old_rsp);
- percpu_write(old_rsp, next->usersp);
- percpu_write(current_task, next_p);
-+ percpu_write(current_tinfo, &next_p->tinfo);
-
-- percpu_write(kernel_stack,
-- (unsigned long)task_stack_page(next_p) +
-- THREAD_SIZE - KERNEL_STACK_OFFSET);
-+ percpu_write(kernel_stack, next->sp0);
-
- /*
- * Now maybe reload the debug registers and handle I/O bitmaps
-@@ -564,12 +563,11 @@ unsigned long get_wchan(struct task_struct *p)
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- stack = (unsigned long)task_stack_page(p);
-- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
-+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
- return 0;
- fp = *(u64 *)(p->thread.sp);
- do {
-- if (fp < (unsigned long)stack ||
-- fp >= (unsigned long)stack+THREAD_SIZE)
-+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
- return 0;
- ip = *(u64 *)(fp+8);
- if (!in_sched_functions(ip))
-diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-index 2dc4121..c7c8aac 100644
---- a/arch/x86/kernel/ptrace.c
-+++ b/arch/x86/kernel/ptrace.c
-@@ -181,14 +181,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
- {
- unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
- unsigned long sp = (unsigned long)&regs->sp;
-- struct thread_info *tinfo;
-
-- if (context == (sp & ~(THREAD_SIZE - 1)))
-+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
- return sp;
-
-- tinfo = (struct thread_info *)context;
-- if (tinfo->previous_esp)
-- return tinfo->previous_esp;
-+ sp = *(unsigned long *)context;
-+ if (sp)
-+ return sp;
-
- return (unsigned long)regs;
- }
-@@ -449,6 +448,20 @@ static int putreg(struct task_struct *child,
- if (child->thread.gs != value)
- return do_arch_prctl(child, ARCH_SET_GS, value);
- return 0;
-+
-+ case offsetof(struct user_regs_struct,ip):
-+ /*
-+ * Protect against any attempt to set ip to an
-+ * impossible address. There are dragons lurking if the
-+ * address is noncanonical. (This explicitly allows
-+ * setting ip to TASK_SIZE_MAX, because user code can do
-+ * that all by itself by running off the end of its
-+ * address space.
-+ */
-+ if (value > TASK_SIZE_MAX)
-+ return -EIO;
-+ break;
-+
- #endif
- }
-
-@@ -585,7 +598,7 @@ static void ptrace_triggered(struct perf_event *bp,
- static unsigned long ptrace_get_dr7(struct perf_event *bp[])
- {
- int i;
-- int dr7 = 0;
-+ unsigned long dr7 = 0;
- struct arch_hw_breakpoint *info;
-
- for (i = 0; i < HBP_NUM; i++) {
-@@ -852,7 +865,7 @@ long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
- {
- int ret;
-- unsigned long __user *datap = (unsigned long __user *)data;
-+ unsigned long __user *datap = (__force unsigned long __user *)data;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
-@@ -937,14 +950,14 @@ long arch_ptrace(struct task_struct *child, long request,
- if ((int) addr < 0)
- return -EIO;
- ret = do_get_thread_area(child, addr,
-- (struct user_desc __user *)data);
-+ (__force struct user_desc __user *) data);
- break;
-
- case PTRACE_SET_THREAD_AREA:
- if ((int) addr < 0)
- return -EIO;
- ret = do_set_thread_area(child, addr,
-- (struct user_desc __user *)data, 0);
-+ (__force struct user_desc __user *) data, 0);
- break;
- #endif
-
-@@ -1229,7 +1242,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
-
- #ifdef CONFIG_X86_64
-
--static struct user_regset x86_64_regsets[] __read_mostly = {
-+static user_regset_no_const x86_64_regsets[] __read_only = {
- [REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
- .n = sizeof(struct user_regs_struct) / sizeof(long),
-@@ -1273,7 +1286,7 @@ static const struct user_regset_view user_x86_64_view = {
- #endif /* CONFIG_X86_64 */
-
- #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
--static struct user_regset x86_32_regsets[] __read_mostly = {
-+static user_regset_no_const x86_32_regsets[] __read_only = {
- [REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
- .n = sizeof(struct user_regs_struct32) / sizeof(u32),
-@@ -1326,7 +1339,7 @@ static const struct user_regset_view user_x86_32_view = {
- */
- u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
-
--void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
-+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
- {
- #ifdef CONFIG_X86_64
- x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
-@@ -1361,7 +1374,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
- memset(info, 0, sizeof(*info));
- info->si_signo = SIGTRAP;
- info->si_code = si_code;
-- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
-+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
- }
-
- void user_single_step_siginfo(struct task_struct *tsk,
-@@ -1390,6 +1403,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- # define IS_IA32 0
- #endif
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
-@@ -1398,6 +1415,11 @@ long syscall_trace_enter(struct pt_regs *regs)
- {
- long ret = 0;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- /*
- * If we stepped into a sysenter/syscall insn, it trapped in
- * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
-@@ -1409,7 +1431,11 @@ long syscall_trace_enter(struct pt_regs *regs)
- regs->flags |= X86_EFLAGS_TF;
-
- /* do the secure computing check first */
-- secure_computing(regs->orig_ax);
-+ if (secure_computing(regs->orig_ax)) {
-+ /* seccomp failures shouldn't expose any additional code. */
-+ ret = -1L;
-+ goto out;
-+ }
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
- ret = -1L;
-@@ -1436,6 +1462,7 @@ long syscall_trace_enter(struct pt_regs *regs)
- #endif
- }
-
-+out:
- return ret ?: regs->orig_ax;
- }
-
-@@ -1443,6 +1470,11 @@ void syscall_trace_leave(struct pt_regs *regs)
- {
- bool step;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (unlikely(current->audit_context))
- audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
-
-diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
-index 42eb330..139955c 100644
---- a/arch/x86/kernel/pvclock.c
-+++ b/arch/x86/kernel/pvclock.c
-@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
- return pv_tsc_khz;
- }
-
--static atomic64_t last_value = ATOMIC64_INIT(0);
-+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
-
- void pvclock_resume(void)
- {
-- atomic64_set(&last_value, 0);
-+ atomic64_set_unchecked(&last_value, 0);
- }
-
- cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
-@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
- * updating at the same time, and one of them could be slightly behind,
- * making the assumption that last_value always go forward fail to hold.
- */
-- last = atomic64_read(&last_value);
-+ last = atomic64_read_unchecked(&last_value);
- do {
- if (ret < last)
- return last;
-- last = atomic64_cmpxchg(&last_value, last, ret);
-+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
- } while (unlikely(last != ret));
-
- return ret;
-diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 41b2f57..9dd7145 100644
---- a/arch/x86/kernel/reboot.c
-+++ b/arch/x86/kernel/reboot.c
-@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
- EXPORT_SYMBOL(pm_power_off);
-
- static const struct desc_ptr no_idt = {};
--static int reboot_mode;
-+static unsigned short reboot_mode;
- enum reboot_type reboot_type = BOOT_ACPI;
- int reboot_force;
-
-@@ -145,7 +145,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata reboot_dmi_table[] = {
-+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
- { /* Handle problems with rebooting on Dell E520's */
- .callback = set_bios_reboot,
- .ident = "Dell E520",
-@@ -308,13 +308,17 @@ core_initcall(reboot_init);
- extern const unsigned char machine_real_restart_asm[];
- extern const u64 machine_real_restart_gdt[3];
-
--void machine_real_restart(unsigned int type)
-+__noreturn void machine_real_restart(unsigned int type)
- {
- void *restart_va;
- unsigned long restart_pa;
-- void (*restart_lowmem)(unsigned int);
-+ void (* __noreturn restart_lowmem)(unsigned int);
- u64 *lowmem_gdt;
-
-+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
-+ struct desc_struct *gdt;
-+#endif
-+
- local_irq_disable();
-
- /* Write zero to CMOS register number 0x0f, which the BIOS POST
-@@ -340,14 +344,14 @@ void machine_real_restart(unsigned int type)
- boot)". This seems like a fairly standard thing that gets set by
- REBOOT.COM programs, and the previous reset routine did this
- too. */
-- *((unsigned short *)0x472) = reboot_mode;
-+ *(unsigned short *)(__va(0x472)) = reboot_mode;
-
- /* Patch the GDT in the low memory trampoline */
- lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
-
- restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
- restart_pa = virt_to_phys(restart_va);
-- restart_lowmem = (void (*)(unsigned int))restart_pa;
-+ restart_lowmem = (void *)restart_pa;
-
- /* GDT[0]: GDT self-pointer */
- lowmem_gdt[0] =
-@@ -358,7 +362,35 @@ void machine_real_restart(unsigned int type)
- GDT_ENTRY(0x009b, restart_pa, 0xffff);
-
- /* Jump to the identity-mapped low memory code */
-+
-+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
-+ gdt = get_cpu_gdt_table(smp_processor_id());
-+ pax_open_kernel();
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
-+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
-+ loadsegment(ds, __KERNEL_DS);
-+ loadsegment(es, __KERNEL_DS);
-+ loadsegment(ss, __KERNEL_DS);
-+#endif
-+#ifdef CONFIG_PAX_KERNEXEC
-+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
-+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
-+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
-+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
-+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
-+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
-+#endif
-+ pax_close_kernel();
-+#endif
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
-+ unreachable();
-+#else
- restart_lowmem(type);
-+#endif
-+
- }
- #ifdef CONFIG_APM_MODULE
- EXPORT_SYMBOL(machine_real_restart);
-@@ -590,7 +622,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
- * try to force a triple fault and then cycle between hitting the keyboard
- * controller and doing that
- */
--static void native_machine_emergency_restart(void)
-+static void __noreturn native_machine_emergency_restart(void)
- {
- int i;
- int attempt = 0;
-@@ -717,13 +749,13 @@ void native_machine_shutdown(void)
- #endif
- }
-
--static void __machine_emergency_restart(int emergency)
-+static __noreturn void __machine_emergency_restart(int emergency)
- {
- reboot_emergency = emergency;
- machine_ops.emergency_restart();
- }
-
--static void native_machine_restart(char *__unused)
-+static void __noreturn native_machine_restart(char *__unused)
- {
- printk("machine restart\n");
-
-@@ -732,7 +764,7 @@ static void native_machine_restart(char *__unused)
- __machine_emergency_restart(0);
- }
-
--static void native_machine_halt(void)
-+static void __noreturn native_machine_halt(void)
- {
- /* stop other cpus and apics */
- machine_shutdown();
-@@ -743,7 +775,7 @@ static void native_machine_halt(void)
- stop_this_cpu(NULL);
- }
-
--static void native_machine_power_off(void)
-+static void __noreturn native_machine_power_off(void)
- {
- if (pm_power_off) {
- if (!reboot_force)
-@@ -752,9 +784,10 @@ static void native_machine_power_off(void)
- }
- /* a fallback in case there is no PM info available */
- tboot_shutdown(TB_SHUTDOWN_HALT);
-+ unreachable();
- }
-
--struct machine_ops machine_ops = {
-+struct machine_ops machine_ops __read_only = {
- .power_off = native_machine_power_off,
- .shutdown = native_machine_shutdown,
- .emergency_restart = native_machine_emergency_restart,
-diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
-index c8e41e9..64049ef 100644
---- a/arch/x86/kernel/reboot_fixups_32.c
-+++ b/arch/x86/kernel/reboot_fixups_32.c
-@@ -57,7 +57,7 @@ struct device_fixup {
- unsigned int vendor;
- unsigned int device;
- void (*reboot_fixup)(struct pci_dev *);
--};
-+} __do_const;
-
- /*
- * PCI ids solely used for fixups_table go here
-diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index b506f41..c954434 100644
---- a/arch/x86/kernel/setup.c
-+++ b/arch/x86/kernel/setup.c
-@@ -176,9 +176,17 @@ static struct resource bss_resource = {
-
- #ifdef CONFIG_X86_32
- /* cpu data as detected by the assembly code in head.S */
--struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
-+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
-+ .wp_works_ok = -1,
-+ .hlt_works_ok = 1,
-+ .fdiv_bug = -1,
-+};
- /* common cpu data for all cpus */
--struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
-+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
-+ .wp_works_ok = -1,
-+ .hlt_works_ok = 1,
-+ .fdiv_bug = -1,
-+};
- EXPORT_SYMBOL(boot_cpu_data);
- static void set_mca_bus(int x)
- {
-@@ -447,7 +455,7 @@ static void __init parse_setup_data(void)
-
- switch (data->type) {
- case SETUP_E820_EXT:
-- parse_e820_ext(data);
-+ parse_e820_ext((struct setup_data __force_kernel *)data);
- break;
- case SETUP_DTB:
- add_dtb(pa_data);
-@@ -727,7 +735,7 @@ static void __init trim_bios_range(void)
- * area (640->1Mb) as ram even though it is not.
- * take them out.
- */
-- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
-+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
-
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- }
-@@ -852,14 +860,14 @@ void __init setup_arch(char **cmdline_p)
-
- if (!boot_params.hdr.root_flags)
- root_mountflags &= ~MS_RDONLY;
-- init_mm.start_code = (unsigned long) _text;
-- init_mm.end_code = (unsigned long) _etext;
-+ init_mm.start_code = ktla_ktva((unsigned long) _text);
-+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = _brk_end;
-
-- code_resource.start = virt_to_phys(_text);
-- code_resource.end = virt_to_phys(_etext)-1;
-- data_resource.start = virt_to_phys(_etext);
-+ code_resource.start = virt_to_phys(ktla_ktva(_text));
-+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
-+ data_resource.start = virt_to_phys(_sdata);
- data_resource.end = virt_to_phys(_edata)-1;
- bss_resource.start = virt_to_phys(&__bss_start);
- bss_resource.end = virt_to_phys(&__bss_stop)-1;
-diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
-index 5a98aa2..5aa4ffc 100644
---- a/arch/x86/kernel/setup_percpu.c
-+++ b/arch/x86/kernel/setup_percpu.c
-@@ -21,19 +21,17 @@
- #include <asm/cpu.h>
- #include <asm/stackprotector.h>
-
--DEFINE_PER_CPU(int, cpu_number);
-+#ifdef CONFIG_SMP
-+DEFINE_PER_CPU(unsigned int, cpu_number);
- EXPORT_PER_CPU_SYMBOL(cpu_number);
-+#endif
-
--#ifdef CONFIG_X86_64
- #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
--#else
--#define BOOT_PERCPU_OFFSET 0
--#endif
-
- DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
- EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
--unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
-+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
- [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
- };
- EXPORT_SYMBOL(__per_cpu_offset);
-@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
- {
- #ifdef CONFIG_NEED_MULTIPLE_NODES
- pg_data_t *last = NULL;
-- unsigned int cpu;
-+ int cpu;
-
- for_each_possible_cpu(cpu) {
- int node = early_cpu_to_node(cpu);
-@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
- {
- #ifdef CONFIG_X86_32
- struct desc_struct gdt;
-+ unsigned long base = per_cpu_offset(cpu);
-
-- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
-- 0x2 | DESCTYPE_S, 0x8);
-- gdt.s = 1;
-+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
-+ 0x83 | DESCTYPE_S, 0xC);
- write_gdt_entry(get_cpu_gdt_table(cpu),
- GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
- #endif
-@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
- /* alrighty, percpu areas up and running */
- delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
- for_each_possible_cpu(cpu) {
-+#ifdef CONFIG_CC_STACKPROTECTOR
-+#ifdef CONFIG_X86_32
-+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
-+#endif
-+#endif
- per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
- per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
- per_cpu(cpu_number, cpu) = cpu;
-@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
- */
- set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
- #endif
-+#ifdef CONFIG_CC_STACKPROTECTOR
-+#ifdef CONFIG_X86_32
-+ if (!cpu)
-+ per_cpu(stack_canary.canary, cpu) = canary;
-+#endif
-+#endif
- /*
- * Up to this point, the boot CPU has been using .init.data
- * area. Reload any changed state for the boot CPU.
-diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 54ddaeb2..158e022 100644
---- a/arch/x86/kernel/signal.c
-+++ b/arch/x86/kernel/signal.c
-@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
- * Align the stack pointer according to the i386 ABI,
- * i.e. so that on function entry ((sp + 4) & 15) == 0.
- */
-- sp = ((sp + 4) & -16ul) - 4;
-+ sp = ((sp - 12) & -16ul) - 4;
- #else /* !CONFIG_X86_32 */
- sp = round_down(sp, 16) - 8;
- #endif
-@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
- * Return an always-bogus address instead so we will die with SIGSEGV.
- */
- if (onsigstack && !likely(on_sig_stack(sp)))
-- return (void __user *)-1L;
-+ return (__force void __user *)-1L;
-
- /* save i387 state */
- if (used_math() && save_i387_xstate(*fpstate) < 0)
-- return (void __user *)-1L;
-+ return (__force void __user *)-1L;
-
- return (void __user *)sp;
- }
-@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
- }
-
- if (current->mm->context.vdso)
-- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
-+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
- else
-- restorer = &frame->retcode;
-+ restorer = (void __user *)&frame->retcode;
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
-
-@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
- * reasons and because gdb uses it as a signature to notice
- * signal handler stack frames.
- */
-- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
-+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
-
- if (err)
- return -EFAULT;
-@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
- /* Set up to return from userspace. */
-- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
-+ if (current->mm->context.vdso)
-+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
-+ else
-+ restorer = (void __user *)&frame->retcode;
- if (ka->sa.sa_flags & SA_RESTORER)
- restorer = ka->sa.sa_restorer;
- put_user_ex(restorer, &frame->pretcode);
-@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- * reasons and because gdb uses it as a signature to notice
- * signal handler stack frames.
- */
-- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
-+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
- } put_user_catch(err);
-
- if (err)
-@@ -655,19 +658,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- {
- int usig = signr_convert(sig);
- sigset_t *set = &current->blocked;
-+ sigset_t sigcopy;
- int ret;
-
- if (current_thread_info()->status & TS_RESTORE_SIGMASK)
- set = &current->saved_sigmask;
-
-+ sigcopy = *set;
-+
- /* Set up the stack frame */
- if (is_ia32) {
- if (ka->sa.sa_flags & SA_SIGINFO)
-- ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
-+ ret = ia32_setup_rt_frame(usig, ka, info, &sigcopy, regs);
- else
-- ret = ia32_setup_frame(usig, ka, set, regs);
-+ ret = ia32_setup_frame(usig, ka, &sigcopy, regs);
- } else
-- ret = __setup_rt_frame(sig, ka, info, set, regs);
-+ ret = __setup_rt_frame(sig, ka, info, &sigcopy, regs);
-
- if (ret) {
- force_sigsegv(sig, current);
-@@ -769,7 +775,7 @@ static void do_signal(struct pt_regs *regs)
- * X86_32: vm86 regs switched out by assembly code before reaching
- * here, so testing against kernel CS suffices.
- */
-- if (!user_mode(regs))
-+ if (!user_mode_novm(regs))
- return;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
-index 16204dc..0e7d4b7 100644
---- a/arch/x86/kernel/smp.c
-+++ b/arch/x86/kernel/smp.c
-@@ -225,7 +225,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
- irq_exit();
- }
-
--struct smp_ops smp_ops = {
-+struct smp_ops smp_ops __read_only = {
- .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
- .smp_prepare_cpus = native_smp_prepare_cpus,
- .smp_cpus_done = native_smp_cpus_done,
-diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index bb28f2ca..e377b54 100644
---- a/arch/x86/kernel/smpboot.c
-+++ b/arch/x86/kernel/smpboot.c
-@@ -252,11 +252,13 @@ notrace static void __cpuinit start_secondary(void *unused)
- preempt_disable();
- smp_callin();
-
--#ifdef CONFIG_X86_32
- /* switch away from the initial page table */
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ load_cr3(get_cpu_pgd(smp_processor_id()));
-+#else
- load_cr3(swapper_pg_dir);
-+#endif
- __flush_tlb_all();
--#endif
-
- /* otherwise gcc will move up smp_processor_id before the cpu_init */
- barrier();
-@@ -699,7 +701,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
- */
- if (c_idle.idle) {
- c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
-- (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
-+ (THREAD_SIZE - 16 + task_stack_page(c_idle.idle))) - 1);
- init_idle(c_idle.idle, cpu);
- goto do_rest;
- }
-@@ -716,17 +718,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
- set_idle_for_cpu(cpu, c_idle.idle);
- do_rest:
- per_cpu(current_task, cpu) = c_idle.idle;
-+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
- #ifdef CONFIG_X86_32
- /* Stack for startup_32 can be just as for start_secondary onwards */
- irq_ctx_init(cpu);
- #else
- clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
- initial_gs = per_cpu_offset(cpu);
-- per_cpu(kernel_stack, cpu) =
-- (unsigned long)task_stack_page(c_idle.idle) -
-- KERNEL_STACK_OFFSET + THREAD_SIZE;
-+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
- #endif
-+
-+ pax_open_kernel();
- early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
-+ pax_close_kernel();
-+
- initial_code = (unsigned long)start_secondary;
- stack_start = c_idle.idle->thread.sp;
-
-@@ -868,6 +873,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
-
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
-+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-+ KERNEL_PGD_PTRS);
-+#endif
-+
- err = do_boot_cpu(apicid, cpu);
- if (err) {
- pr_debug("do_boot_cpu failed %d\n", err);
-diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
-index d4f278e..86c58c0 100644
---- a/arch/x86/kernel/step.c
-+++ b/arch/x86/kernel/step.c
-@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
- struct desc_struct *desc;
- unsigned long base;
-
-- seg &= ~7UL;
-+ seg >>= 3;
-
- mutex_lock(&child->mm->context.lock);
-- if (unlikely((seg >> 3) >= child->mm->context.size))
-+ if (unlikely(seg >= child->mm->context.size))
- addr = -1L; /* bogus selector, access would fault */
- else {
- desc = child->mm->context.ldt + seg;
-@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
- addr += base;
- }
- mutex_unlock(&child->mm->context.lock);
-- }
-+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
-+ addr = ktla_ktva(addr);
-
- return addr;
- }
-@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
- unsigned char opcode[15];
- unsigned long addr = convert_ip_to_linear(child, regs);
-
-+ if (addr == -EINVAL)
-+ return 0;
-+
- copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
- for (i = 0; i < copied; i++) {
- switch (opcode[i]) {
-diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
-index 0b0cb5f..560d0df 100644
---- a/arch/x86/kernel/sys_i386_32.c
-+++ b/arch/x86/kernel/sys_i386_32.c
-@@ -24,17 +24,228 @@
-
- #include <asm/syscalls.h>
-
--/*
-- * Do a system call from kernel instead of calling sys_execve so we
-- * end up with proper pt_regs.
-- */
--int kernel_execve(const char *filename,
-- const char *const argv[],
-- const char *const envp[])
-+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
- {
-- long __res;
-- asm volatile ("int $0x80"
-- : "=a" (__res)
-- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
-- return __res;
-+ unsigned long pax_task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ if (flags & MAP_FIXED)
-+ if (len > pax_task_size || addr > pax_task_size - len)
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+
-+unsigned long
-+arch_get_unmapped_area(struct file *filp, unsigned long addr,
-+ unsigned long len, unsigned long pgoff, unsigned long flags)
-+{
-+ struct mm_struct *mm = current->mm;
-+ struct vm_area_struct *vma;
-+ unsigned long start_addr, pax_task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-+
-+ if (len > pax_task_size)
-+ return -ENOMEM;
-+
-+ if (flags & MAP_FIXED)
-+ return addr;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
-+ if (addr) {
-+ addr = PAGE_ALIGN(addr);
-+ if (pax_task_size - len >= addr) {
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ return addr;
-+ }
-+ }
-+ if (len > mm->cached_hole_size) {
-+ start_addr = addr = mm->free_area_cache;
-+ } else {
-+ start_addr = addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
-+ }
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
-+ start_addr = 0x00110000UL;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ start_addr += mm->delta_mmap & 0x03FFF000UL;
-+#endif
-+
-+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
-+ start_addr = addr = mm->mmap_base;
-+ else
-+ addr = start_addr;
-+ }
-+#endif
-+
-+full_search:
-+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-+ /* At this point: (!vma || addr < vma->vm_end). */
-+ if (pax_task_size - len < addr) {
-+ /*
-+ * Start a new search - just in case we missed
-+ * some holes.
-+ */
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
-+ goto full_search;
-+ }
-+ return -ENOMEM;
-+ }
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ break;
-+ if (addr + mm->cached_hole_size < vma->vm_start)
-+ mm->cached_hole_size = vma->vm_start - addr;
-+ addr = vma->vm_end;
-+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
-+ start_addr = addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
-+ goto full_search;
-+ }
-+ }
-+
-+ /*
-+ * Remember the place where we stopped the search:
-+ */
-+ mm->free_area_cache = addr + len;
-+ return addr;
-+}
-+
-+unsigned long
-+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-+ const unsigned long len, const unsigned long pgoff,
-+ const unsigned long flags)
-+{
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm = current->mm;
-+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-+
-+ /* requested length too big for entire address space */
-+ if (len > pax_task_size)
-+ return -ENOMEM;
-+
-+ if (flags & MAP_FIXED)
-+ return addr;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
-+ goto bottomup;
-+#endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
-+ /* requesting a specific address */
-+ if (addr) {
-+ addr = PAGE_ALIGN(addr);
-+ if (pax_task_size - len >= addr) {
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ return addr;
-+ }
-+ }
-+
-+ /* check if free_area_cache is useful for us */
-+ if (len <= mm->cached_hole_size) {
-+ mm->cached_hole_size = 0;
-+ mm->free_area_cache = mm->mmap_base;
-+ }
-+
-+ /* either no address requested or can't fit in requested address hole */
-+ addr = mm->free_area_cache;
-+
-+ /* make sure it can fit in the remaining address space */
-+ if (addr > len) {
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ /* remember the address as a hint for next time */
-+ return (mm->free_area_cache = addr);
-+ }
-+
-+ if (mm->mmap_base < len)
-+ goto bottomup;
-+
-+ addr = mm->mmap_base-len;
-+
-+ do {
-+ /*
-+ * Lookup failure means no vma is above this address,
-+ * else if new region fits below vma->vm_start,
-+ * return with success:
-+ */
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ /* remember the address as a hint for next time */
-+ return (mm->free_area_cache = addr);
-+
-+ /* remember the largest hole we saw so far */
-+ if (addr + mm->cached_hole_size < vma->vm_start)
-+ mm->cached_hole_size = vma->vm_start - addr;
-+
-+ /* try just below the current vma->vm_start */
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-+
-+bottomup:
-+ /*
-+ * A failed mmap() very likely causes application failure,
-+ * so fall back to the bottom-up function here. This scenario
-+ * can happen with large stack limits and large mmap()
-+ * allocations.
-+ */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
-+ else
-+#endif
-+
-+ mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+ mm->free_area_cache = mm->mmap_base;
-+ mm->cached_hole_size = ~0UL;
-+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-+ /*
-+ * Restore the topdown base:
-+ */
-+ mm->mmap_base = base;
-+ mm->free_area_cache = base;
-+ mm->cached_hole_size = ~0UL;
-+
-+ return addr;
- }
-diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index cdb2fc9..a7264e0 100644
---- a/arch/x86/kernel/sys_x86_64.c
-+++ b/arch/x86/kernel/sys_x86_64.c
-@@ -95,8 +95,8 @@ out:
- return error;
- }
-
--static void find_start_end(unsigned long flags, unsigned long *begin,
-- unsigned long *end)
-+static void find_start_end(struct mm_struct *mm, unsigned long flags,
-+ unsigned long *begin, unsigned long *end)
- {
- if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
- unsigned long new_begin;
-@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
- *begin = new_begin;
- }
- } else {
-- *begin = current->mm->mmap_legacy_base;
-+ *begin = mm->mmap_legacy_base;
- *end = TASK_SIZE;
- }
- }
-@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- struct vm_area_struct *vma;
- unsigned long start_addr;
- unsigned long begin, end;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (flags & MAP_FIXED)
- return addr;
-
-- find_start_end(flags, &begin, &end);
-+ find_start_end(mm, flags, &begin, &end);
-
- if (len > end)
- return -ENOMEM;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-- if (end - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (end - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
- if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
-@@ -172,7 +176,7 @@ full_search:
- }
- return -ENOMEM;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /*
- * Remember the place where we stopped the search:
- */
-@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- {
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
-- unsigned long addr = addr0;
-+ unsigned long base = mm->mmap_base, addr = addr0;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
-@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
- goto bottomup;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
-- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-- return addr;
-+ if (TASK_SIZE - len >= addr) {
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ return addr;
-+ }
- }
-
- /* check if free_area_cache is useful for us */
-@@ -232,7 +242,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- ALIGN_TOPDOWN);
-
- vma = find_vma(mm, tmp_addr);
-- if (!vma || tmp_addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &tmp_addr, len, offset))
- /* remember the address as a hint for next time */
- return mm->free_area_cache = tmp_addr;
- }
-@@ -251,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (!vma || addr+len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- /* remember the address as a hint for next time */
- return mm->free_area_cache = addr;
-
-@@ -260,8 +270,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start-len;
-- } while (len < vma->vm_start);
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- bottomup:
- /*
-@@ -270,13 +280,21 @@ bottomup:
- * can happen with large stack limits and large mmap()
- * allocations.
- */
-+ mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+ mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
-- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
-- mm->free_area_cache = mm->mmap_base;
-+ mm->mmap_base = base;
-+ mm->free_area_cache = base;
- mm->cached_hole_size = ~0UL;
-
- return addr;
-diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
-index 9a0e312..e6f66f2 100644
---- a/arch/x86/kernel/syscall_table_32.S
-+++ b/arch/x86/kernel/syscall_table_32.S
-@@ -1,3 +1,4 @@
-+.section .rodata,"a",@progbits
- ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
- .long sys_exit
-diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
-index e2410e2..5e8d841 100644
---- a/arch/x86/kernel/tboot.c
-+++ b/arch/x86/kernel/tboot.c
-@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
-
- void tboot_shutdown(u32 shutdown_type)
- {
-- void (*shutdown)(void);
-+ void (* __noreturn shutdown)(void);
-
- if (!tboot_enabled())
- return;
-@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
-
- switch_to_tboot_pt();
-
-- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
-+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
- shutdown();
-
- /* should not reach here */
-@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
- tboot_shutdown(acpi_shutdown_map[sleep_state]);
- }
-
--static atomic_t ap_wfs_count;
-+static atomic_unchecked_t ap_wfs_count;
-
- static int tboot_wait_for_aps(int num_aps)
- {
-@@ -322,16 +322,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
- {
- switch (action) {
- case CPU_DYING:
-- atomic_inc(&ap_wfs_count);
-+ atomic_inc_unchecked(&ap_wfs_count);
- if (num_online_cpus() == 1)
-- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
-+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
- return NOTIFY_BAD;
- break;
- }
- return NOTIFY_OK;
- }
-
--static struct notifier_block tboot_cpu_notifier __cpuinitdata =
-+static struct notifier_block tboot_cpu_notifier =
- {
- .notifier_call = tboot_cpu_callback,
- };
-@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
-
- tboot_create_trampoline();
-
-- atomic_set(&ap_wfs_count, 0);
-+ atomic_set_unchecked(&ap_wfs_count, 0);
- register_hotcpu_notifier(&tboot_cpu_notifier);
- return 0;
- }
-diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
-index dd5fbf4..b7f2232 100644
---- a/arch/x86/kernel/time.c
-+++ b/arch/x86/kernel/time.c
-@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
- {
- unsigned long pc = instruction_pointer(regs);
-
-- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
-+ if (!user_mode(regs) && in_lock_functions(pc)) {
- #ifdef CONFIG_FRAME_POINTER
-- return *(unsigned long *)(regs->bp + sizeof(long));
-+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
- #else
- unsigned long *sp =
- (unsigned long *)kernel_stack_pointer(regs);
-@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
- * or above a saved flags. Eflags has bits 22-31 zero,
- * kernel addresses don't.
- */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ return ktla_ktva(sp[0]);
-+#else
- if (sp[0] >> 22)
- return sp[0];
- if (sp[1] >> 22)
- return sp[1];
- #endif
-+
-+#endif
- }
- return pc;
- }
-diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
-index 0c38d06..79ea0e3 100644
---- a/arch/x86/kernel/tls.c
-+++ b/arch/x86/kernel/tls.c
-@@ -140,6 +140,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
-+ return -EINVAL;
-+#endif
-+
- set_tls_desc(p, idx, &info, 1);
-
- return 0;
-@@ -261,7 +266,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
-
- if (kbuf)
- info = kbuf;
-- else if (__copy_from_user(infobuf, ubuf, count))
-+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
- return -EFAULT;
- else
- info = infobuf;
-diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
-index 451c0a7..e57f551 100644
---- a/arch/x86/kernel/trampoline_32.S
-+++ b/arch/x86/kernel/trampoline_32.S
-@@ -32,6 +32,12 @@
- #include <asm/segment.h>
- #include <asm/page_types.h>
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+#define ta(X) (X)
-+#else
-+#define ta(X) ((X) - __PAGE_OFFSET)
-+#endif
-+
- #ifdef CONFIG_SMP
-
- .section ".x86_trampoline","a"
-@@ -62,7 +68,7 @@ r_base = .
- inc %ax # protected mode (PE) bit
- lmsw %ax # into protected mode
- # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
-- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
-+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
-
- # These need to be in the same 64K segment as the above;
- # hence we don't use the boot_gdt_descr defined in head.S
-diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
-index 09ff517..df19fbff 100644
---- a/arch/x86/kernel/trampoline_64.S
-+++ b/arch/x86/kernel/trampoline_64.S
-@@ -90,7 +90,7 @@ startup_32:
- movl $__KERNEL_DS, %eax # Initialize the %ds segment register
- movl %eax, %ds
-
-- movl $X86_CR4_PAE, %eax
-+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
- movl %eax, %cr4 # Enable PAE mode
-
- # Setup trampoline 4 level pagetables
-@@ -138,7 +138,7 @@ tidt:
- # so the kernel can live anywhere
- .balign 4
- tgdt:
-- .short tgdt_end - tgdt # gdt limit
-+ .short tgdt_end - tgdt - 1 # gdt limit
- .long tgdt - r_base
- .short 0
- .quad 0x00cf9b000000ffff # __KERNEL32_CS
-diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index edbd30a..d83c9a9 100644
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
-@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
-
- /* Do we ignore FPU interrupts ? */
- char ignore_fpu_irq;
--
--/*
-- * The IDT has to be page-aligned to simplify the Pentium
-- * F0 0F bug workaround.
-- */
--gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
- #endif
-
- DECLARE_BITMAP(used_vectors, NR_VECTORS);
-@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
- }
-
- static void __kprobes
--do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
-+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
- long error_code, siginfo_t *info)
- {
- struct task_struct *tsk = current;
-
- #ifdef CONFIG_X86_32
-- if (regs->flags & X86_VM_MASK) {
-+ if (v8086_mode(regs)) {
- /*
- * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
- * On nmi (interrupt 2), do_trap should not be called.
-@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
- }
- #endif
-
-- if (!user_mode(regs))
-+ if (!user_mode_novm(regs))
- goto kernel_trap;
-
- #ifdef CONFIG_X86_32
-@@ -148,7 +142,7 @@ trap_signal:
- printk_ratelimit()) {
- printk(KERN_INFO
- "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
-- tsk->comm, tsk->pid, str,
-+ tsk->comm, task_pid_nr(tsk), str,
- regs->ip, regs->sp, error_code);
- print_vma_addr(" in ", regs->ip);
- printk("\n");
-@@ -165,8 +159,20 @@ kernel_trap:
- if (!fixup_exception(regs)) {
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
-+ str = "PAX: suspicious stack segment fault";
-+#endif
-+
- die(str, regs, error_code);
- }
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (trapnr == X86_TRAP_OF)
-+ pax_report_refcount_overflow(regs);
-+#endif
-+
- return;
-
- #ifdef CONFIG_X86_32
-@@ -254,6 +260,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = X86_TRAP_DF;
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
-+ die("grsec: kernel stack overflow detected", regs, error_code);
-+#endif
-+
- /*
- * This is always a kernel trap and never fixable (and thus must
- * never return).
-@@ -271,14 +282,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
- conditional_sti(regs);
-
- #ifdef CONFIG_X86_32
-- if (regs->flags & X86_VM_MASK)
-+ if (v8086_mode(regs))
- goto gp_in_vm86;
- #endif
-
- tsk = current;
-- if (!user_mode(regs))
-+ if (!user_mode_novm(regs))
- goto gp_in_kernel;
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
-+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
-+ struct mm_struct *mm = tsk->mm;
-+ unsigned long limit;
-+
-+ down_write(&mm->mmap_sem);
-+ limit = mm->context.user_cs_limit;
-+ if (limit < TASK_SIZE) {
-+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
-+ up_write(&mm->mmap_sem);
-+ return;
-+ }
-+ up_write(&mm->mmap_sem);
-+ }
-+#endif
-+
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = X86_TRAP_GP;
-
-@@ -311,6 +338,13 @@ gp_in_kernel:
- if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
- X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
- return;
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
-+ die("PAX: suspicious general protection fault", regs, error_code);
-+ else
-+#endif
-+
- die("general protection fault", regs, error_code);
- }
-
-@@ -383,13 +417,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
- container_of(task_pt_regs(current),
- struct bad_iret_stack, regs);
-
-+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
-+ new_stack = s;
-+
- /* Copy the IRET target to the new stack. */
- memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
-
- /* Copy the remainder of the stack from the current stack. */
- memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
-
-- BUG_ON(!user_mode_vm(&new_stack->regs));
-+ BUG_ON(!user_mode(&new_stack->regs));
- return new_stack;
- }
- #endif
-@@ -435,7 +472,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
- * then it's very likely the result of an icebp/int01 trap.
- * User wants a sigtrap for that.
- */
-- if (!dr6 && user_mode_vm(regs))
-+ if (!dr6 && user_mode(regs))
- user_icebp = 1;
-
- /* Catch kmemcheck conditions first of all! */
-@@ -460,7 +497,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
- /* It's safe to allow irq's after DR6 has been saved */
- preempt_conditional_sti(regs);
-
-- if (regs->flags & X86_VM_MASK) {
-+ if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
- preempt_conditional_cli(regs);
-@@ -474,7 +511,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
- * We already checked v86 mode above, so we can check for kernel mode
- * by just checking the CPL of CS.
- */
-- if ((dr6 & DR_STEP) && !user_mode(regs)) {
-+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
- tsk->thread.debugreg6 &= ~DR_STEP;
- set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
- regs->flags &= ~X86_EFLAGS_TF;
-@@ -504,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
- return;
- conditional_sti(regs);
-
-- if (!user_mode_vm(regs))
-+ if (!user_mode(regs))
- {
- if (!fixup_exception(regs)) {
- task->thread.error_code = error_code;
-@@ -617,8 +654,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
- void __math_state_restore(struct task_struct *tsk)
- {
- /* We need a safe address that is cheap to find and that is already
-- in L1. We've just brought in "tsk->thread.has_fpu", so use that */
--#define safe_address (tsk->thread.has_fpu)
-+ in L1. */
-+#define safe_address (init_tss[raw_smp_processor_id()].x86_tss.sp0)
-
- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
- is pending. Clear the x87 state here by setting it to fixed
-diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
-index b9242ba..50c5edd 100644
---- a/arch/x86/kernel/verify_cpu.S
-+++ b/arch/x86/kernel/verify_cpu.S
-@@ -20,6 +20,7 @@
- * arch/x86/boot/compressed/head_64.S: Boot cpu verification
- * arch/x86/kernel/trampoline_64.S: secondary processor verification
- * arch/x86/kernel/head_32.S: processor startup
-+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
- *
- * verify_cpu, returns the status of longmode and SSE in register %eax.
- * 0: Success 1: Failure
-diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
-index 04b8726..0c35b29 100644
---- a/arch/x86/kernel/vm86_32.c
-+++ b/arch/x86/kernel/vm86_32.c
-@@ -41,6 +41,7 @@
- #include <linux/ptrace.h>
- #include <linux/audit.h>
- #include <linux/stddef.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/uaccess.h>
- #include <asm/io.h>
-@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
- do_exit(SIGSEGV);
- }
-
-- tss = &per_cpu(init_tss, get_cpu());
-+ tss = init_tss + get_cpu();
- current->thread.sp0 = current->thread.saved_sp0;
- current->thread.sysenter_cs = __KERNEL_CS;
- load_sp0(tss, &current->thread);
-@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
- struct task_struct *tsk;
- int tmp, ret = -EPERM;
-
-+#ifdef CONFIG_GRKERNSEC_VM86
-+ if (!capable(CAP_SYS_RAWIO)) {
-+ gr_handle_vm86();
-+ goto out;
-+ }
-+#endif
-+
- tsk = current;
- if (tsk->thread.saved_sp0)
- goto out;
-@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
- int tmp, ret;
- struct vm86plus_struct __user *v86;
-
-+#ifdef CONFIG_GRKERNSEC_VM86
-+ if (!capable(CAP_SYS_RAWIO)) {
-+ gr_handle_vm86();
-+ ret = -EPERM;
-+ goto out;
-+ }
-+#endif
-+
- tsk = current;
- switch (cmd) {
- case VM86_REQUEST_IRQ:
-@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
- tsk->thread.saved_fs = info->regs32->fs;
- tsk->thread.saved_gs = get_user_gs(info->regs32);
-
-- tss = &per_cpu(init_tss, get_cpu());
-+ tss = init_tss + get_cpu();
- tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
- if (cpu_has_sep)
- tsk->thread.sysenter_cs = 0;
-@@ -531,7 +547,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
- goto cannot_handle;
- if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
- goto cannot_handle;
-- intr_ptr = (unsigned long __user *) (i << 2);
-+ intr_ptr = (__force unsigned long __user *) (i << 2);
- if (get_user(segoffs, intr_ptr))
- goto cannot_handle;
- if ((segoffs >> 16) == BIOSSEG)
-diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
-index 0f703f1..045a8f1 100644
---- a/arch/x86/kernel/vmlinux.lds.S
-+++ b/arch/x86/kernel/vmlinux.lds.S
-@@ -26,6 +26,13 @@
- #include <asm/page_types.h>
- #include <asm/cache.h>
- #include <asm/boot.h>
-+#include <asm/segment.h>
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
-+#else
-+#define __KERNEL_TEXT_OFFSET 0
-+#endif
-
- #undef i386 /* in case the preprocessor is a 32bit one */
-
-@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
-
- PHDRS {
- text PT_LOAD FLAGS(5); /* R_E */
-+#ifdef CONFIG_X86_32
-+ module PT_LOAD FLAGS(5); /* R_E */
-+#endif
-+#ifdef CONFIG_XEN
-+ rodata PT_LOAD FLAGS(5); /* R_E */
-+#else
-+ rodata PT_LOAD FLAGS(4); /* R__ */
-+#endif
- data PT_LOAD FLAGS(6); /* RW_ */
--#ifdef CONFIG_X86_64
-+ init.begin PT_LOAD FLAGS(6); /* RW_ */
- #ifdef CONFIG_SMP
- percpu PT_LOAD FLAGS(6); /* RW_ */
- #endif
-+ text.init PT_LOAD FLAGS(5); /* R_E */
-+ text.exit PT_LOAD FLAGS(5); /* R_E */
- init PT_LOAD FLAGS(7); /* RWE */
--#endif
- note PT_NOTE FLAGS(0); /* ___ */
- }
-
- SECTIONS
- {
- #ifdef CONFIG_X86_32
-- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
-- phys_startup_32 = startup_32 - LOAD_OFFSET;
-+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
- #else
-- . = __START_KERNEL;
-- phys_startup_64 = startup_64 - LOAD_OFFSET;
-+ . = __START_KERNEL;
- #endif
-
- /* Text and read-only data */
-- .text : AT(ADDR(.text) - LOAD_OFFSET) {
-- _text = .;
-+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
- /* bootstrapping code */
-+#ifdef CONFIG_X86_32
-+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
-+#else
-+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
-+#endif
-+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
-+ _text = .;
- HEAD_TEXT
- #ifdef CONFIG_X86_32
- . = ALIGN(PAGE_SIZE);
-@@ -108,13 +128,48 @@ SECTIONS
- IRQENTRY_TEXT
- *(.fixup)
- *(.gnu.warning)
-- /* End of text section */
-- _etext = .;
- } :text = 0x9090
-
-- NOTES :text :note
-+ . += __KERNEL_TEXT_OFFSET;
-
-- EXCEPTION_TABLE(16) :text = 0x9090
-+#ifdef CONFIG_X86_32
-+ . = ALIGN(PAGE_SIZE);
-+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ MODULES_EXEC_VADDR = .;
-+ BYTE(0)
-+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
-+ . = ALIGN(HPAGE_SIZE) - 1;
-+ MODULES_EXEC_END = .;
-+#endif
-+
-+ } :module
-+#endif
-+
-+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
-+ /* End of text section */
-+ BYTE(0)
-+ _etext = . - __KERNEL_TEXT_OFFSET;
-+ }
-+
-+#ifdef CONFIG_X86_32
-+ . = ALIGN(PAGE_SIZE);
-+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
-+ *(.idt)
-+ . = ALIGN(PAGE_SIZE);
-+ *(.empty_zero_page)
-+ *(.initial_pg_fixmap)
-+ *(.initial_pg_pmd)
-+ *(.initial_page_table)
-+ *(.swapper_pg_dir)
-+ } :rodata
-+#endif
-+
-+ . = ALIGN(PAGE_SIZE);
-+ NOTES :rodata :note
-+
-+ EXCEPTION_TABLE(16) :rodata
-
- #if defined(CONFIG_DEBUG_RODATA)
- /* .text should occupy whole number of pages */
-@@ -126,16 +181,20 @@ SECTIONS
-
- /* Data */
- .data : AT(ADDR(.data) - LOAD_OFFSET) {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ . = ALIGN(HPAGE_SIZE);
-+#else
-+ . = ALIGN(PAGE_SIZE);
-+#endif
-+
- /* Start of data section */
- _sdata = .;
-
- /* init_task */
- INIT_TASK_DATA(THREAD_SIZE)
-
--#ifdef CONFIG_X86_32
-- /* 32 bit has nosave before _edata */
- NOSAVE_DATA
--#endif
-
- PAGE_ALIGNED_DATA(PAGE_SIZE)
-
-@@ -176,12 +235,19 @@ SECTIONS
- #endif /* CONFIG_X86_64 */
-
- /* Init code and data - will be freed after init */
-- . = ALIGN(PAGE_SIZE);
- .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
-+ BYTE(0)
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ . = ALIGN(HPAGE_SIZE);
-+#else
-+ . = ALIGN(PAGE_SIZE);
-+#endif
-+
- __init_begin = .; /* paired with __init_end */
-- }
-+ } :init.begin
-
--#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
-+#ifdef CONFIG_SMP
- /*
- * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
- * output PHDR, so the next output section - .init.text - should
-@@ -190,12 +256,27 @@ SECTIONS
- PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
- #endif
-
-- INIT_TEXT_SECTION(PAGE_SIZE)
--#ifdef CONFIG_X86_64
-- :init
--#endif
-+ . = ALIGN(PAGE_SIZE);
-+ init_begin = .;
-+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
-+ VMLINUX_SYMBOL(_sinittext) = .;
-+ INIT_TEXT
-+ . = ALIGN(PAGE_SIZE);
-+ } :text.init
-
-- INIT_DATA_SECTION(16)
-+ /*
-+ * .exit.text is discard at runtime, not link time, to deal with
-+ * references from .altinstructions and .eh_frame
-+ */
-+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
-+ EXIT_TEXT
-+ VMLINUX_SYMBOL(_einittext) = .;
-+ . = ALIGN(16);
-+ } :text.exit
-+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
-+
-+ . = ALIGN(PAGE_SIZE);
-+ INIT_DATA_SECTION(16) :init
-
- /*
- * Code and data for a variety of lowlevel trampolines, to be
-@@ -269,19 +350,12 @@ SECTIONS
- }
-
- . = ALIGN(8);
-- /*
-- * .exit.text is discard at runtime, not link time, to deal with
-- * references from .altinstructions and .eh_frame
-- */
-- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
-- EXIT_TEXT
-- }
-
- .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
- EXIT_DATA
- }
-
--#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
-+#ifndef CONFIG_SMP
- PERCPU_SECTION(INTERNODE_CACHE_BYTES)
- #endif
-
-@@ -300,16 +374,10 @@ SECTIONS
- .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
- __smp_locks = .;
- *(.smp_locks)
-- . = ALIGN(PAGE_SIZE);
- __smp_locks_end = .;
-+ . = ALIGN(PAGE_SIZE);
- }
-
--#ifdef CONFIG_X86_64
-- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
-- NOSAVE_DATA
-- }
--#endif
--
- /* BSS */
- . = ALIGN(PAGE_SIZE);
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -325,6 +393,7 @@ SECTIONS
- __brk_base = .;
- . += 64 * 1024; /* 64k alignment slop space */
- *(.brk_reservation) /* areas brk users have reserved */
-+ . = ALIGN(HPAGE_SIZE);
- __brk_limit = .;
- }
-
-@@ -351,13 +420,12 @@ SECTIONS
- * for the boot processor.
- */
- #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
--INIT_PER_CPU(gdt_page);
- INIT_PER_CPU(irq_stack_union);
-
- /*
- * Build-time check on the image size:
- */
--. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
-+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
- "kernel image bigger than KERNEL_IMAGE_SIZE");
-
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
-index e4d4a22..47ee71f 100644
---- a/arch/x86/kernel/vsyscall_64.c
-+++ b/arch/x86/kernel/vsyscall_64.c
-@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
- };
-
--static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
-+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
-
- static int __init vsyscall_setup(char *str)
- {
- if (str) {
- if (!strcmp("emulate", str))
- vsyscall_mode = EMULATE;
-- else if (!strcmp("native", str))
-- vsyscall_mode = NATIVE;
- else if (!strcmp("none", str))
- vsyscall_mode = NONE;
- else
-@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
-
- tsk = current;
- if (seccomp_mode(&tsk->seccomp))
-- do_exit(SIGKILL);
-+ do_group_exit(SIGKILL);
-
- switch (vsyscall_nr) {
- case 0:
-@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
- return true;
-
- sigsegv:
-- force_sig(SIGSEGV, current);
-- return true;
-+ do_group_exit(SIGKILL);
- }
-
- /*
-@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
- extern char __vvar_page;
- unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
-
-- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
-- vsyscall_mode == NATIVE
-- ? PAGE_KERNEL_VSYSCALL
-- : PAGE_KERNEL_VVAR);
-+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
- BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
- (unsigned long)VSYSCALL_START);
-
-diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index 9796c2f..f686fbf 100644
---- a/arch/x86/kernel/x8664_ksyms_64.c
-+++ b/arch/x86/kernel/x8664_ksyms_64.c
-@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
- EXPORT_SYMBOL(copy_user_generic_string);
- EXPORT_SYMBOL(copy_user_generic_unrolled);
- EXPORT_SYMBOL(__copy_user_nocache);
--EXPORT_SYMBOL(_copy_from_user);
--EXPORT_SYMBOL(_copy_to_user);
-
- EXPORT_SYMBOL(copy_page);
- EXPORT_SYMBOL(clear_page);
-diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
-index c1d6cd5..204ac00 100644
---- a/arch/x86/kernel/x86_init.c
-+++ b/arch/x86/kernel/x86_init.c
-@@ -90,14 +90,14 @@ struct x86_init_ops x86_init __initdata = {
- },
- };
-
--struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
-+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
- .setup_percpu_clockev = setup_secondary_APIC_clock,
- };
-
- static void default_nmi_init(void) { };
- static int default_i8042_detect(void) { return 1; };
-
--struct x86_platform_ops x86_platform = {
-+struct x86_platform_ops x86_platform __read_only = {
- .calibrate_tsc = native_calibrate_tsc,
- .wallclock_init = wallclock_init_noop,
- .get_wallclock = mach_get_cmos_time,
-@@ -110,7 +110,7 @@ struct x86_platform_ops x86_platform = {
- };
-
- EXPORT_SYMBOL_GPL(x86_platform);
--struct x86_msi_ops x86_msi = {
-+struct x86_msi_ops x86_msi __read_only = {
- .setup_msi_irqs = native_setup_msi_irqs,
- .teardown_msi_irq = native_teardown_msi_irq,
- .teardown_msi_irqs = default_teardown_msi_irqs,
-diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index 7110911..069da9c 100644
---- a/arch/x86/kernel/xsave.c
-+++ b/arch/x86/kernel/xsave.c
-@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
- fx_sw_user->xstate_size > fx_sw_user->extended_size)
- return -EINVAL;
-
-- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
-+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
- fx_sw_user->extended_size -
- FP_XSTATE_MAGIC2_SIZE));
- if (err)
-@@ -266,7 +266,7 @@ fx_only:
- * the other extended state.
- */
- xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
-- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
-+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
- }
-
- /*
-@@ -295,8 +295,7 @@ int restore_i387_xstate(void __user *buf)
- if (use_xsave())
- err = restore_user_xstate(buf);
- else
-- err = fxrstor_checking((__force struct i387_fxsave_struct *)
-- buf);
-+ err = fxrstor_checking((struct i387_fxsave_struct __user *)buf);
- if (unlikely(err)) {
- /*
- * Encountered an error while doing the restore from the
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 9af0b82..086874c 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -249,6 +249,7 @@ struct gprefix {
-
- #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
- do { \
-+ unsigned long _tmp; \
- __asm__ __volatile__ ( \
- _PRE_EFLAGS("0", "4", "2") \
- _op _suffix " %"_x"3,%1; " \
-@@ -263,8 +264,6 @@ struct gprefix {
- /* Raw emulation: instruction has two explicit operands. */
- #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
- do { \
-- unsigned long _tmp; \
-- \
- switch ((ctxt)->dst.bytes) { \
- case 2: \
- ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
-@@ -280,7 +279,6 @@ struct gprefix {
-
- #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
- do { \
-- unsigned long _tmp; \
- switch ((ctxt)->dst.bytes) { \
- case 1: \
- ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
-@@ -383,8 +381,7 @@ struct gprefix {
- _ASM_EXTABLE(1b, 3b) \
- : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
- "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
-- : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
-- "a" (*rax), "d" (*rdx)); \
-+ : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
- } while (0)
-
- /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
-@@ -3013,7 +3010,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
- int cr = ctxt->modrm_reg;
- u64 efer = 0;
-
-- static u64 cr_reserved_bits[] = {
-+ static const u64 cr_reserved_bits[] = {
- 0xffffffff00000000ULL,
- 0, 0, 0, /* CR3 checked later */
- CR4_RESERVED_BITS,
-@@ -3048,7 +3045,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
-
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
- if (efer & EFER_LMA)
-- rsvd = CR3_L_MODE_RESERVED_BITS;
-+ rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
- else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
- rsvd = CR3_PAE_RESERVED_BITS;
- else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 176205a..920cd58 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -53,7 +53,7 @@
- #define APIC_BUS_CYCLE_NS 1
-
- /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
--#define apic_debug(fmt, arg...)
-+#define apic_debug(fmt, arg...) do {} while (0)
-
- #define APIC_LVT_NUM 6
- /* 14 is the version for Xeon and Pentium 8.4.8*/
-diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
-index bfc9507..bf85b38 100644
---- a/arch/x86/kvm/mmu.c
-+++ b/arch/x86/kvm/mmu.c
-@@ -3558,7 +3558,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-
- pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
-
-- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
-+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
-
- /*
- * Assume that the pte write on a page table of the same type
-@@ -3590,7 +3590,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- }
-
- spin_lock(&vcpu->kvm->mmu_lock);
-- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
-+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
- gentry = 0;
- kvm_mmu_free_some_pages(vcpu);
- ++vcpu->kvm->stat.mmu_pte_write;
-diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
-index 9299410..ade2f9b 100644
---- a/arch/x86/kvm/paging_tmpl.h
-+++ b/arch/x86/kvm/paging_tmpl.h
-@@ -197,7 +197,7 @@ retry_walk:
- if (unlikely(kvm_is_error_hva(host_addr)))
- goto error;
-
-- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
-+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
- if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
- goto error;
-
-@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
- if (need_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
-
-- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
-
- spin_unlock(&vcpu->kvm->mmu_lock);
-
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 7a2d9d6..0e8286c 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -3403,7 +3403,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
- int cpu = raw_smp_processor_id();
-
- struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
-+
-+ pax_open_kernel();
- sd->tss_desc->type = 9; /* available 32/64-bit TSS */
-+ pax_close_kernel();
-+
- load_TR_desc();
- }
-
-@@ -3783,6 +3787,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
- #endif
- #endif
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ __set_fs(current_thread_info()->addr_limit);
-+#endif
-+
- reload_tss(vcpu);
-
- local_irq_disable();
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 8831c43..98f1a3e 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -1100,12 +1100,12 @@ static void vmcs_write64(unsigned long field, u64 value)
- #endif
- }
-
--static void vmcs_clear_bits(unsigned long field, u32 mask)
-+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
- {
- vmcs_writel(field, vmcs_readl(field) & ~mask);
- }
-
--static void vmcs_set_bits(unsigned long field, u32 mask)
-+static void vmcs_set_bits(unsigned long field, unsigned long mask)
- {
- vmcs_writel(field, vmcs_readl(field) | mask);
- }
-@@ -1306,7 +1306,11 @@ static void reload_tss(void)
- struct desc_struct *descs;
-
- descs = (void *)gdt->address;
-+
-+ pax_open_kernel();
- descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
-+ pax_close_kernel();
-+
- load_TR_desc();
- }
-
-@@ -1505,6 +1509,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
- vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
-+#endif
-+
- rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
- vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
- vmx->loaded_vmcs->cpu = cpu;
-@@ -2635,8 +2643,11 @@ static __init int hardware_setup(void)
- if (!cpu_has_vmx_flexpriority())
- flexpriority_enabled = 0;
-
-- if (!cpu_has_vmx_tpr_shadow())
-- kvm_x86_ops->update_cr8_intercept = NULL;
-+ if (!cpu_has_vmx_tpr_shadow()) {
-+ pax_open_kernel();
-+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
-+ pax_close_kernel();
-+ }
-
- if (enable_ept && !cpu_has_vmx_ept_2m_page())
- kvm_disable_largepages();
-@@ -3638,7 +3649,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
- unsigned long cr4;
-
- vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
-+
-+#ifndef CONFIG_PAX_PER_CPU_PGD
- vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
-+#endif
-
- /* Save the most likely value for this task's CR4 in the VMCS. */
- cr4 = read_cr4();
-@@ -3655,7 +3669,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
- vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
-
- asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
-- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
-+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
-
- rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
- vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6206,6 +6220,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- "jmp .Lkvm_vmx_return \n\t"
- ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
- ".Lkvm_vmx_return: "
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
-+ ".Lkvm_vmx_return2: "
-+#endif
-+
- /* Save guest registers, load host registers, keep flags */
- "mov %0, %c[wordsize](%%"R"sp) \n\t"
- "pop %0 \n\t"
-@@ -6254,6 +6274,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- #endif
- [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
- [wordsize]"i"(sizeof(ulong))
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ ,[cs]"i"(__KERNEL_CS)
-+#endif
-+
- : "cc", "memory"
- , R"ax", R"bx", R"di", R"si"
- #ifdef CONFIG_X86_64
-@@ -6282,7 +6307,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- }
- }
-
-- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ loadsegment(fs, __KERNEL_PERCPU);
-+#endif
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ __set_fs(current_thread_info()->addr_limit);
-+#endif
-+
- vmx->loaded_vmcs->launched = 1;
-
- vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index bb179cc..2de279c0 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -668,6 +668,8 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
-
- int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
- {
-+ cr3 &= ~CR3_PCID_INVD;
-+
- if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
- kvm_mmu_sync_roots(vcpu);
- kvm_mmu_flush_tlb(vcpu);
-@@ -1369,8 +1371,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
- {
- struct kvm *kvm = vcpu->kvm;
- int lm = is_long_mode(vcpu);
-- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
-- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
-+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
-+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
- u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
- : kvm->arch.xen_hvm_config.blob_size_32;
- u32 page_num = data & ~PAGE_MASK;
-@@ -2187,6 +2189,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
- if (n < msr_list.nmsrs)
- goto out;
- r = -EFAULT;
-+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
-+ goto out;
- if (copy_to_user(user_msr_list->indices, &msrs_to_save,
- num_msrs_to_save * sizeof(u32)))
- goto out;
-@@ -2362,15 +2366,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
- {
-- int r;
-+ int r, i;
-
- r = -E2BIG;
- if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
- goto out;
- r = -EFAULT;
-- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
-+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
- goto out;
-+ for (i = 0; i < cpuid->nent; ++i) {
-+ struct kvm_cpuid_entry2 cpuid_entry;
-+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
-+ goto out;
-+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
-+ }
- vcpu->arch.cpuid_nent = cpuid->nent;
- kvm_apic_set_version(vcpu);
- kvm_x86_ops->cpuid_update(vcpu);
-@@ -2385,15 +2394,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
- {
-- int r;
-+ int r, i;
-
- r = -E2BIG;
- if (cpuid->nent < vcpu->arch.cpuid_nent)
- goto out;
- r = -EFAULT;
-- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
-- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
-+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
- goto out;
-+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
-+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
-+ goto out;
-+ }
- return 0;
-
- out:
-@@ -2768,7 +2781,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
- static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
- struct kvm_interrupt *irq)
- {
-- if (irq->irq < 0 || irq->irq >= 256)
-+ if (irq->irq >= 256)
- return -EINVAL;
- if (irqchip_in_kernel(vcpu->kvm))
- return -ENXIO;
-@@ -5209,7 +5222,7 @@ static void kvm_set_mmio_spte_mask(void)
- kvm_mmu_set_mmio_spte_mask(mask);
- }
-
--int kvm_arch_init(void *opaque)
-+int kvm_arch_init(const void *opaque)
- {
- int r;
- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
-diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index 8f4fda4..353d5cc 100644
---- a/arch/x86/lguest/boot.c
-+++ b/arch/x86/lguest/boot.c
-@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
- * Rebooting also tells the Host we're finished, but the RESTART flag tells the
- * Launcher to reboot us.
- */
--static void lguest_restart(char *reason)
-+static __noreturn void lguest_restart(char *reason)
- {
- hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
-+ BUG();
- }
-
- /*G:050
-diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
-index 042f682..c92afb6 100644
---- a/arch/x86/lib/atomic64_32.c
-+++ b/arch/x86/lib/atomic64_32.c
-@@ -8,18 +8,30 @@
-
- long long atomic64_read_cx8(long long, const atomic64_t *v);
- EXPORT_SYMBOL(atomic64_read_cx8);
-+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
- long long atomic64_set_cx8(long long, const atomic64_t *v);
- EXPORT_SYMBOL(atomic64_set_cx8);
-+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
- long long atomic64_xchg_cx8(long long, unsigned high);
- EXPORT_SYMBOL(atomic64_xchg_cx8);
- long long atomic64_add_return_cx8(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_add_return_cx8);
-+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
- long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_sub_return_cx8);
-+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
- long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_inc_return_cx8);
-+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
- long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_dec_return_cx8);
-+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
- long long atomic64_dec_if_positive_cx8(atomic64_t *v);
- EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
- int atomic64_inc_not_zero_cx8(atomic64_t *v);
-@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
- #ifndef CONFIG_X86_CMPXCHG64
- long long atomic64_read_386(long long, const atomic64_t *v);
- EXPORT_SYMBOL(atomic64_read_386);
-+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_read_unchecked_386);
- long long atomic64_set_386(long long, const atomic64_t *v);
- EXPORT_SYMBOL(atomic64_set_386);
-+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_set_unchecked_386);
- long long atomic64_xchg_386(long long, unsigned high);
- EXPORT_SYMBOL(atomic64_xchg_386);
- long long atomic64_add_return_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_add_return_386);
-+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
- long long atomic64_sub_return_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_sub_return_386);
-+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
- long long atomic64_inc_return_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_inc_return_386);
-+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
- long long atomic64_dec_return_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_dec_return_386);
-+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
- long long atomic64_add_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_add_386);
-+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_add_unchecked_386);
- long long atomic64_sub_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_sub_386);
-+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
- long long atomic64_inc_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_inc_386);
-+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
- long long atomic64_dec_386(long long a, atomic64_t *v);
- EXPORT_SYMBOL(atomic64_dec_386);
-+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
-+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
- long long atomic64_dec_if_positive_386(atomic64_t *v);
- EXPORT_SYMBOL(atomic64_dec_if_positive_386);
- int atomic64_inc_not_zero_386(atomic64_t *v);
-diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
-index e8e7e0d..56fd1b0 100644
---- a/arch/x86/lib/atomic64_386_32.S
-+++ b/arch/x86/lib/atomic64_386_32.S
-@@ -48,6 +48,10 @@ BEGIN(read)
- movl (v), %eax
- movl 4(v), %edx
- RET_ENDP
-+BEGIN(read_unchecked)
-+ movl (v), %eax
-+ movl 4(v), %edx
-+RET_ENDP
- #undef v
-
- #define v %esi
-@@ -55,6 +59,10 @@ BEGIN(set)
- movl %ebx, (v)
- movl %ecx, 4(v)
- RET_ENDP
-+BEGIN(set_unchecked)
-+ movl %ebx, (v)
-+ movl %ecx, 4(v)
-+RET_ENDP
- #undef v
-
- #define v %esi
-@@ -70,6 +78,20 @@ RET_ENDP
- BEGIN(add)
- addl %eax, (v)
- adcl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 0f
-+ subl %eax, (v)
-+ sbbl %edx, 4(v)
-+ int $4
-+0:
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+RET_ENDP
-+BEGIN(add_unchecked)
-+ addl %eax, (v)
-+ adcl %edx, 4(v)
- RET_ENDP
- #undef v
-
-@@ -77,6 +99,24 @@ RET_ENDP
- BEGIN(add_return)
- addl (v), %eax
- adcl 4(v), %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
-+ movl %eax, (v)
-+ movl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+2:
-+#endif
-+
-+RET_ENDP
-+BEGIN(add_return_unchecked)
-+ addl (v), %eax
-+ adcl 4(v), %edx
- movl %eax, (v)
- movl %edx, 4(v)
- RET_ENDP
-@@ -86,6 +126,20 @@ RET_ENDP
- BEGIN(sub)
- subl %eax, (v)
- sbbl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 0f
-+ addl %eax, (v)
-+ adcl %edx, 4(v)
-+ int $4
-+0:
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+RET_ENDP
-+BEGIN(sub_unchecked)
-+ subl %eax, (v)
-+ sbbl %edx, 4(v)
- RET_ENDP
- #undef v
-
-@@ -96,6 +150,27 @@ BEGIN(sub_return)
- sbbl $0, %edx
- addl (v), %eax
- adcl 4(v), %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
-+ movl %eax, (v)
-+ movl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+2:
-+#endif
-+
-+RET_ENDP
-+BEGIN(sub_return_unchecked)
-+ negl %edx
-+ negl %eax
-+ sbbl $0, %edx
-+ addl (v), %eax
-+ adcl 4(v), %edx
- movl %eax, (v)
- movl %edx, 4(v)
- RET_ENDP
-@@ -105,6 +180,20 @@ RET_ENDP
- BEGIN(inc)
- addl $1, (v)
- adcl $0, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 0f
-+ subl $1, (v)
-+ sbbl $0, 4(v)
-+ int $4
-+0:
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+RET_ENDP
-+BEGIN(inc_unchecked)
-+ addl $1, (v)
-+ adcl $0, 4(v)
- RET_ENDP
- #undef v
-
-@@ -114,6 +203,26 @@ BEGIN(inc_return)
- movl 4(v), %edx
- addl $1, %eax
- adcl $0, %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
-+ movl %eax, (v)
-+ movl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+2:
-+#endif
-+
-+RET_ENDP
-+BEGIN(inc_return_unchecked)
-+ movl (v), %eax
-+ movl 4(v), %edx
-+ addl $1, %eax
-+ adcl $0, %edx
- movl %eax, (v)
- movl %edx, 4(v)
- RET_ENDP
-@@ -123,6 +232,20 @@ RET_ENDP
- BEGIN(dec)
- subl $1, (v)
- sbbl $0, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 0f
-+ addl $1, (v)
-+ adcl $0, 4(v)
-+ int $4
-+0:
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+RET_ENDP
-+BEGIN(dec_unchecked)
-+ subl $1, (v)
-+ sbbl $0, 4(v)
- RET_ENDP
- #undef v
-
-@@ -132,6 +255,26 @@ BEGIN(dec_return)
- movl 4(v), %edx
- subl $1, %eax
- sbbl $0, %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
-+ movl %eax, (v)
-+ movl %edx, 4(v)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+2:
-+#endif
-+
-+RET_ENDP
-+BEGIN(dec_return_unchecked)
-+ movl (v), %eax
-+ movl 4(v), %edx
-+ subl $1, %eax
-+ sbbl $0, %edx
- movl %eax, (v)
- movl %edx, 4(v)
- RET_ENDP
-@@ -143,6 +286,13 @@ BEGIN(add_unless)
- adcl %edx, %edi
- addl (v), %eax
- adcl 4(v), %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
- cmpl %eax, %esi
- je 3f
- 1:
-@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
- 1:
- addl $1, %eax
- adcl $0, %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
- movl %eax, (v)
- movl %edx, 4(v)
- movl $1, %eax
-@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
- movl 4(v), %edx
- subl $1, %eax
- sbbl $0, %edx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 1f)
-+#endif
-+
- js 1f
- movl %eax, (v)
- movl %edx, 4(v)
-diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
-index 391a083..3a2cf39 100644
---- a/arch/x86/lib/atomic64_cx8_32.S
-+++ b/arch/x86/lib/atomic64_cx8_32.S
-@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
- CFI_STARTPROC
-
- read64 %ecx
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(atomic64_read_cx8)
-
-+ENTRY(atomic64_read_unchecked_cx8)
-+ CFI_STARTPROC
-+
-+ read64 %ecx
-+ pax_force_retaddr
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(atomic64_read_unchecked_cx8)
-+
- ENTRY(atomic64_set_cx8)
- CFI_STARTPROC
-
-@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
- cmpxchg8b (%esi)
- jne 1b
-
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(atomic64_set_cx8)
-
-+ENTRY(atomic64_set_unchecked_cx8)
-+ CFI_STARTPROC
-+
-+1:
-+/* we don't need LOCK_PREFIX since aligned 64-bit writes
-+ * are atomic on 586 and newer */
-+ cmpxchg8b (%esi)
-+ jne 1b
-+
-+ pax_force_retaddr
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(atomic64_set_unchecked_cx8)
-+
- ENTRY(atomic64_xchg_cx8)
- CFI_STARTPROC
-
-@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
- cmpxchg8b (%esi)
- jne 1b
-
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(atomic64_xchg_cx8)
-
--.macro addsub_return func ins insc
--ENTRY(atomic64_\func\()_return_cx8)
-+.macro addsub_return func ins insc unchecked=""
-+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
- CFI_STARTPROC
- SAVE ebp
- SAVE ebx
-@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
- movl %edx, %ecx
- \ins\()l %esi, %ebx
- \insc\()l %edi, %ecx
-+
-+.ifb \unchecked
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+2:
-+ _ASM_EXTABLE(2b, 3f)
-+#endif
-+.endif
-+
- LOCK_PREFIX
- cmpxchg8b (%ebp)
- jne 1b
--
--10:
- movl %ebx, %eax
- movl %ecx, %edx
-+
-+.ifb \unchecked
-+#ifdef CONFIG_PAX_REFCOUNT
-+3:
-+#endif
-+.endif
-+
- RESTORE edi
- RESTORE esi
- RESTORE ebx
- RESTORE ebp
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--ENDPROC(atomic64_\func\()_return_cx8)
-+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
- .endm
-
- addsub_return add add adc
- addsub_return sub sub sbb
-+addsub_return add add adc _unchecked
-+addsub_return sub sub sbb _unchecked
-
--.macro incdec_return func ins insc
--ENTRY(atomic64_\func\()_return_cx8)
-+.macro incdec_return func ins insc unchecked=""
-+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
- CFI_STARTPROC
- SAVE ebx
-
-@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
- movl %edx, %ecx
- \ins\()l $1, %ebx
- \insc\()l $0, %ecx
-+
-+.ifb \unchecked
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+2:
-+ _ASM_EXTABLE(2b, 3f)
-+#endif
-+.endif
-+
- LOCK_PREFIX
- cmpxchg8b (%esi)
- jne 1b
-
--10:
- movl %ebx, %eax
- movl %ecx, %edx
-+
-+.ifb \unchecked
-+#ifdef CONFIG_PAX_REFCOUNT
-+3:
-+#endif
-+.endif
-+
- RESTORE ebx
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--ENDPROC(atomic64_\func\()_return_cx8)
-+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
- .endm
-
- incdec_return inc add adc
- incdec_return dec sub sbb
-+incdec_return inc add adc _unchecked
-+incdec_return dec sub sbb _unchecked
-
- ENTRY(atomic64_dec_if_positive_cx8)
- CFI_STARTPROC
-@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
- movl %edx, %ecx
- subl $1, %ebx
- sbb $0, %ecx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 2f)
-+#endif
-+
- js 2f
- LOCK_PREFIX
- cmpxchg8b (%esi)
-@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
- movl %ebx, %eax
- movl %ecx, %edx
- RESTORE ebx
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(atomic64_dec_if_positive_cx8)
-@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
- movl %edx, %ecx
- addl %esi, %ebx
- adcl %edi, %ecx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 3f)
-+#endif
-+
- LOCK_PREFIX
- cmpxchg8b (%ebp)
- jne 1b
-@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
- CFI_ADJUST_CFA_OFFSET -8
- RESTORE ebx
- RESTORE ebp
-+ pax_force_retaddr
- ret
- 4:
- cmpl %edx, 4(%esp)
-@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
- movl %edx, %ecx
- addl $1, %ebx
- adcl $0, %ecx
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ into
-+1234:
-+ _ASM_EXTABLE(1234b, 3f)
-+#endif
-+
- LOCK_PREFIX
- cmpxchg8b (%esi)
- jne 1b
-@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
- movl $1, %eax
- 3:
- RESTORE ebx
-+ pax_force_retaddr
- ret
- 4:
- testl %edx, %edx
-diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
-index 78d16a5..fbcf666 100644
---- a/arch/x86/lib/checksum_32.S
-+++ b/arch/x86/lib/checksum_32.S
-@@ -28,7 +28,8 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/errno.h>
--
-+#include <asm/segment.h>
-+
- /*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
-
- #define ARGBASE 16
- #define FP 12
--
--ENTRY(csum_partial_copy_generic)
-+
-+ENTRY(csum_partial_copy_generic_to_user)
- CFI_STARTPROC
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %es
-+ jmp csum_partial_copy_generic
-+#endif
-+
-+ENTRY(csum_partial_copy_generic_from_user)
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %ds
-+#endif
-+
-+ENTRY(csum_partial_copy_generic)
- subl $4,%esp
- CFI_ADJUST_CFA_OFFSET 4
- pushl_cfi %edi
-@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
- jmp 4f
- SRC(1: movw (%esi), %bx )
- addl $2, %esi
--DST( movw %bx, (%edi) )
-+DST( movw %bx, %es:(%edi) )
- addl $2, %edi
- addw %bx, %ax
- adcl $0, %eax
-@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
- SRC(1: movl (%esi), %ebx )
- SRC( movl 4(%esi), %edx )
- adcl %ebx, %eax
--DST( movl %ebx, (%edi) )
-+DST( movl %ebx, %es:(%edi) )
- adcl %edx, %eax
--DST( movl %edx, 4(%edi) )
-+DST( movl %edx, %es:4(%edi) )
-
- SRC( movl 8(%esi), %ebx )
- SRC( movl 12(%esi), %edx )
- adcl %ebx, %eax
--DST( movl %ebx, 8(%edi) )
-+DST( movl %ebx, %es:8(%edi) )
- adcl %edx, %eax
--DST( movl %edx, 12(%edi) )
-+DST( movl %edx, %es:12(%edi) )
-
- SRC( movl 16(%esi), %ebx )
- SRC( movl 20(%esi), %edx )
- adcl %ebx, %eax
--DST( movl %ebx, 16(%edi) )
-+DST( movl %ebx, %es:16(%edi) )
- adcl %edx, %eax
--DST( movl %edx, 20(%edi) )
-+DST( movl %edx, %es:20(%edi) )
-
- SRC( movl 24(%esi), %ebx )
- SRC( movl 28(%esi), %edx )
- adcl %ebx, %eax
--DST( movl %ebx, 24(%edi) )
-+DST( movl %ebx, %es:24(%edi) )
- adcl %edx, %eax
--DST( movl %edx, 28(%edi) )
-+DST( movl %edx, %es:28(%edi) )
-
- lea 32(%esi), %esi
- lea 32(%edi), %edi
-@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
- shrl $2, %edx # This clears CF
- SRC(3: movl (%esi), %ebx )
- adcl %ebx, %eax
--DST( movl %ebx, (%edi) )
-+DST( movl %ebx, %es:(%edi) )
- lea 4(%esi), %esi
- lea 4(%edi), %edi
- dec %edx
-@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
- jb 5f
- SRC( movw (%esi), %cx )
- leal 2(%esi), %esi
--DST( movw %cx, (%edi) )
-+DST( movw %cx, %es:(%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%ecx
- SRC(5: movb (%esi), %cl )
--DST( movb %cl, (%edi) )
-+DST( movb %cl, %es:(%edi) )
- 6: addl %ecx, %eax
- adcl $0, %eax
- 7:
-@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
-
- 6001:
- movl ARGBASE+20(%esp), %ebx # src_err_ptr
-- movl $-EFAULT, (%ebx)
-+ movl $-EFAULT, %ss:(%ebx)
-
- # zero the complete destination - computing the rest
- # is too much work
-@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
-
- 6002:
- movl ARGBASE+24(%esp), %ebx # dst_err_ptr
-- movl $-EFAULT,(%ebx)
-+ movl $-EFAULT,%ss:(%ebx)
- jmp 5000b
-
- .previous
-
-+ pushl_cfi %ss
-+ popl_cfi %ds
-+ pushl_cfi %ss
-+ popl_cfi %es
- popl_cfi %ebx
- CFI_RESTORE ebx
- popl_cfi %esi
-@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
- popl_cfi %ecx # equivalent to addl $4,%esp
- ret
- CFI_ENDPROC
--ENDPROC(csum_partial_copy_generic)
-+ENDPROC(csum_partial_copy_generic_to_user)
-
- #else
-
- /* Version for PentiumII/PPro */
-
- #define ROUND1(x) \
-+ nop; nop; nop; \
- SRC(movl x(%esi), %ebx ) ; \
- addl %ebx, %eax ; \
-- DST(movl %ebx, x(%edi) ) ;
-+ DST(movl %ebx, %es:x(%edi)) ;
-
- #define ROUND(x) \
-+ nop; nop; nop; \
- SRC(movl x(%esi), %ebx ) ; \
- adcl %ebx, %eax ; \
-- DST(movl %ebx, x(%edi) ) ;
-+ DST(movl %ebx, %es:x(%edi)) ;
-
- #define ARGBASE 12
--
--ENTRY(csum_partial_copy_generic)
-+
-+ENTRY(csum_partial_copy_generic_to_user)
- CFI_STARTPROC
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %es
-+ jmp csum_partial_copy_generic
-+#endif
-+
-+ENTRY(csum_partial_copy_generic_from_user)
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %ds
-+#endif
-+
-+ENTRY(csum_partial_copy_generic)
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
- pushl_cfi %edi
-@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
- subl %ebx, %edi
- lea -1(%esi),%edx
- andl $-32,%edx
-- lea 3f(%ebx,%ebx), %ebx
-+ lea 3f(%ebx,%ebx,2), %ebx
- testl %esi, %esi
- jmp *%ebx
- 1: addl $64,%esi
-@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
- jb 5f
- SRC( movw (%esi), %dx )
- leal 2(%esi), %esi
--DST( movw %dx, (%edi) )
-+DST( movw %dx, %es:(%edi) )
- leal 2(%edi), %edi
- je 6f
- shll $16,%edx
- 5:
- SRC( movb (%esi), %dl )
--DST( movb %dl, (%edi) )
-+DST( movb %dl, %es:(%edi) )
- 6: addl %edx, %eax
- adcl $0, %eax
- 7:
- .section .fixup, "ax"
- 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
-- movl $-EFAULT, (%ebx)
-+ movl $-EFAULT, %ss:(%ebx)
- # zero the complete destination (computing the rest is too much work)
- movl ARGBASE+8(%esp),%edi # dst
- movl ARGBASE+12(%esp),%ecx # len
-@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
- rep; stosb
- jmp 7b
- 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
-- movl $-EFAULT, (%ebx)
-+ movl $-EFAULT, %ss:(%ebx)
- jmp 7b
- .previous
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %ss
-+ popl_cfi %ds
-+ pushl_cfi %ss
-+ popl_cfi %es
-+#endif
-+
- popl_cfi %esi
- CFI_RESTORE esi
- popl_cfi %edi
-@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
- CFI_RESTORE ebx
- ret
- CFI_ENDPROC
--ENDPROC(csum_partial_copy_generic)
-+ENDPROC(csum_partial_copy_generic_to_user)
-
- #undef ROUND
- #undef ROUND1
-diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
-index f2145cf..cea889d 100644
---- a/arch/x86/lib/clear_page_64.S
-+++ b/arch/x86/lib/clear_page_64.S
-@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
- movl $4096/8,%ecx
- xorl %eax,%eax
- rep stosq
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(clear_page_c)
-@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
- movl $4096,%ecx
- xorl %eax,%eax
- rep stosb
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(clear_page_c_e)
-@@ -43,6 +45,7 @@ ENTRY(clear_page)
- leaq 64(%rdi),%rdi
- jnz .Lloop
- nop
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- .Lclear_page_end:
-@@ -58,7 +61,7 @@ ENDPROC(clear_page)
-
- #include <asm/cpufeature.h>
-
-- .section .altinstr_replacement,"ax"
-+ .section .altinstr_replacement,"a"
- 1: .byte 0xeb /* jmp <disp8> */
- .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
- 2: .byte 0xeb /* jmp <disp8> */
-diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
-index 1e572c5..2a162cd 100644
---- a/arch/x86/lib/cmpxchg16b_emu.S
-+++ b/arch/x86/lib/cmpxchg16b_emu.S
-@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
-
- popf
- mov $1, %al
-+ pax_force_retaddr
- ret
-
- not_same:
- popf
- xor %al,%al
-+ pax_force_retaddr
- ret
-
- CFI_ENDPROC
-diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
-index 01c805b..16da7cf 100644
---- a/arch/x86/lib/copy_page_64.S
-+++ b/arch/x86/lib/copy_page_64.S
-@@ -9,6 +9,7 @@ copy_page_c:
- CFI_STARTPROC
- movl $4096/8,%ecx
- rep movsq
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(copy_page_c)
-@@ -24,7 +25,7 @@ ENTRY(copy_page)
- CFI_ADJUST_CFA_OFFSET 3*8
- movq %rbx,(%rsp)
- CFI_REL_OFFSET rbx, 0
-- movq %r12,1*8(%rsp)
-+ movq %r14,1*8(%rsp)
- CFI_REL_OFFSET r12, 1*8
- movq %r13,2*8(%rsp)
- CFI_REL_OFFSET r13, 2*8
-@@ -41,7 +42,7 @@ ENTRY(copy_page)
- movq 32 (%rsi), %r9
- movq 40 (%rsi), %r10
- movq 48 (%rsi), %r11
-- movq 56 (%rsi), %r12
-+ movq 56 (%rsi), %r14
-
- prefetcht0 5*64(%rsi)
-
-@@ -52,7 +53,7 @@ ENTRY(copy_page)
- movq %r9, 32 (%rdi)
- movq %r10, 40 (%rdi)
- movq %r11, 48 (%rdi)
-- movq %r12, 56 (%rdi)
-+ movq %r14, 56 (%rdi)
-
- leaq 64 (%rsi), %rsi
- leaq 64 (%rdi), %rdi
-@@ -71,7 +72,7 @@ ENTRY(copy_page)
- movq 32 (%rsi), %r9
- movq 40 (%rsi), %r10
- movq 48 (%rsi), %r11
-- movq 56 (%rsi), %r12
-+ movq 56 (%rsi), %r14
-
- movq %rax, (%rdi)
- movq %rbx, 8 (%rdi)
-@@ -80,7 +81,7 @@ ENTRY(copy_page)
- movq %r9, 32 (%rdi)
- movq %r10, 40 (%rdi)
- movq %r11, 48 (%rdi)
-- movq %r12, 56 (%rdi)
-+ movq %r14, 56 (%rdi)
-
- leaq 64(%rdi),%rdi
- leaq 64(%rsi),%rsi
-@@ -89,12 +90,13 @@ ENTRY(copy_page)
-
- movq (%rsp),%rbx
- CFI_RESTORE rbx
-- movq 1*8(%rsp),%r12
-+ movq 1*8(%rsp),%r14
- CFI_RESTORE r12
- movq 2*8(%rsp),%r13
- CFI_RESTORE r13
- addq $3*8,%rsp
- CFI_ADJUST_CFA_OFFSET -3*8
-+ pax_force_retaddr
- ret
- .Lcopy_page_end:
- CFI_ENDPROC
-@@ -105,7 +107,7 @@ ENDPROC(copy_page)
-
- #include <asm/cpufeature.h>
-
-- .section .altinstr_replacement,"ax"
-+ .section .altinstr_replacement,"a"
- 1: .byte 0xeb /* jmp <disp8> */
- .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
- 2:
-diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
-index 0248402..416b737 100644
---- a/arch/x86/lib/copy_user_64.S
-+++ b/arch/x86/lib/copy_user_64.S
-@@ -16,6 +16,7 @@
- #include <asm/thread_info.h>
- #include <asm/cpufeature.h>
- #include <asm/alternative-asm.h>
-+#include <asm/pgtable.h>
-
- /*
- * By placing feature2 after feature1 in altinstructions section, we logically
-@@ -29,7 +30,7 @@
- .byte 0xe9 /* 32bit jump */
- .long \orig-1f /* by default jump to orig */
- 1:
-- .section .altinstr_replacement,"ax"
-+ .section .altinstr_replacement,"a"
- 2: .byte 0xe9 /* near jump with 32bit immediate */
- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
- 3: .byte 0xe9 /* near jump with 32bit immediate */
-@@ -71,47 +72,20 @@
- #endif
- .endm
-
--/* Standard copy_to_user with segment limit checking */
--ENTRY(_copy_to_user)
-- CFI_STARTPROC
-- GET_THREAD_INFO(%rax)
-- movq %rdi,%rcx
-- addq %rdx,%rcx
-- jc bad_to_user
-- cmpq TI_addr_limit(%rax),%rcx
-- ja bad_to_user
-- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
-- copy_user_generic_unrolled,copy_user_generic_string, \
-- copy_user_enhanced_fast_string
-- CFI_ENDPROC
--ENDPROC(_copy_to_user)
--
--/* Standard copy_from_user with segment limit checking */
--ENTRY(_copy_from_user)
-- CFI_STARTPROC
-- GET_THREAD_INFO(%rax)
-- movq %rsi,%rcx
-- addq %rdx,%rcx
-- jc bad_from_user
-- cmpq TI_addr_limit(%rax),%rcx
-- ja bad_from_user
-- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
-- copy_user_generic_unrolled,copy_user_generic_string, \
-- copy_user_enhanced_fast_string
-- CFI_ENDPROC
--ENDPROC(_copy_from_user)
--
- .section .fixup,"ax"
- /* must zero dest */
- ENTRY(bad_from_user)
- bad_from_user:
- CFI_STARTPROC
-+ testl %edx,%edx
-+ js bad_to_user
- movl %edx,%ecx
- xorl %eax,%eax
- rep
- stosb
- bad_to_user:
- movl %edx,%eax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(bad_from_user)
-@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
- decl %ecx
- jnz 21b
- 23: xor %eax,%eax
-+ pax_force_retaddr
- ret
-
- .section .fixup,"ax"
-@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
- 3: rep
- movsb
- 4: xorl %eax,%eax
-+ pax_force_retaddr
- ret
-
- .section .fixup,"ax"
-@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
- 1: rep
- movsb
- 2: xorl %eax,%eax
-+ pax_force_retaddr
- ret
-
- .section .fixup,"ax"
-diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
-index cb0c112..cb2d3c5 100644
---- a/arch/x86/lib/copy_user_nocache_64.S
-+++ b/arch/x86/lib/copy_user_nocache_64.S
-@@ -8,12 +8,14 @@
-
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
-+#include <asm/alternative-asm.h>
-
- #define FIX_ALIGNMENT 1
-
- #include <asm/current.h>
- #include <asm/asm-offsets.h>
- #include <asm/thread_info.h>
-+#include <asm/pgtable.h>
-
- .macro ALIGN_DESTINATION
- #ifdef FIX_ALIGNMENT
-@@ -50,6 +52,15 @@
- */
- ENTRY(__copy_user_nocache)
- CFI_STARTPROC
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov pax_user_shadow_base,%rcx
-+ cmp %rcx,%rsi
-+ jae 1f
-+ add %rcx,%rsi
-+1:
-+#endif
-+
- cmpl $8,%edx
- jb 20f /* less then 8 bytes, go to byte copy loop */
- ALIGN_DESTINATION
-@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
- jnz 21b
- 23: xorl %eax,%eax
- sfence
-+ pax_force_retaddr
- ret
-
- .section .fixup,"ax"
-diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
-index fb903b7..83cc6fb 100644
---- a/arch/x86/lib/csum-copy_64.S
-+++ b/arch/x86/lib/csum-copy_64.S
-@@ -8,6 +8,7 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/errno.h>
-+#include <asm/alternative-asm.h>
-
- /*
- * Checksum copy with exception handling.
-@@ -64,8 +65,8 @@ ENTRY(csum_partial_copy_generic)
- CFI_ADJUST_CFA_OFFSET 7*8
- movq %rbx, 2*8(%rsp)
- CFI_REL_OFFSET rbx, 2*8
-- movq %r12, 3*8(%rsp)
-- CFI_REL_OFFSET r12, 3*8
-+ movq %r15, 3*8(%rsp)
-+ CFI_REL_OFFSET r15, 3*8
- movq %r14, 4*8(%rsp)
- CFI_REL_OFFSET r14, 4*8
- movq %r13, 5*8(%rsp)
-@@ -80,16 +81,16 @@ ENTRY(csum_partial_copy_generic)
- movl %edx, %ecx
-
- xorl %r9d, %r9d
-- movq %rcx, %r12
-+ movq %rcx, %r15
-
-- shrq $6, %r12
-+ shrq $6, %r15
- jz .Lhandle_tail /* < 64 */
-
- clc
-
- /* main loop. clear in 64 byte blocks */
- /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
-- /* r11: temp3, rdx: temp4, r12 loopcnt */
-+ /* r11: temp3, rdx: temp4, r15 loopcnt */
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
- .p2align 4
- .Lloop:
-@@ -123,7 +124,7 @@ ENTRY(csum_partial_copy_generic)
- adcq %r14, %rax
- adcq %r13, %rax
-
-- decl %r12d
-+ decl %r15d
-
- dest
- movq %rbx, (%rsi)
-@@ -218,8 +219,8 @@ ENTRY(csum_partial_copy_generic)
- .Lende:
- movq 2*8(%rsp), %rbx
- CFI_RESTORE rbx
-- movq 3*8(%rsp), %r12
-- CFI_RESTORE r12
-+ movq 3*8(%rsp), %r15
-+ CFI_RESTORE r15
- movq 4*8(%rsp), %r14
- CFI_RESTORE r14
- movq 5*8(%rsp), %r13
-@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
- CFI_RESTORE rbp
- addq $7*8, %rsp
- CFI_ADJUST_CFA_OFFSET -7*8
-+ pax_force_retaddr
- ret
- CFI_RESTORE_STATE
-
-diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
-index 459b58a..d67737f 100644
---- a/arch/x86/lib/csum-wrappers_64.c
-+++ b/arch/x86/lib/csum-wrappers_64.c
-@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
- len -= 2;
- }
- }
-- isum = csum_partial_copy_generic((__force const void *)src,
-+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
- dst, len, isum, errp, NULL);
- if (unlikely(*errp))
- goto out_err;
-@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
- }
-
- *errp = 0;
-- return csum_partial_copy_generic(src, (void __force *)dst,
-+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
- len, isum, NULL, errp);
- }
- EXPORT_SYMBOL(csum_partial_copy_to_user);
-diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
-index 51f1504..144f6bd 100644
---- a/arch/x86/lib/getuser.S
-+++ b/arch/x86/lib/getuser.S
-@@ -33,15 +33,38 @@
- #include <asm/asm-offsets.h>
- #include <asm/thread_info.h>
- #include <asm/asm.h>
-+#include <asm/segment.h>
-+#include <asm/pgtable.h>
-+#include <asm/alternative-asm.h>
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __copyuser_seg gs;
-+#else
-+#define __copyuser_seg
-+#endif
-
- .text
- ENTRY(__get_user_1)
- CFI_STARTPROC
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
- jae bad_get_user
--1: movzb (%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_DX
-+ cmp %_ASM_DX,%_ASM_AX
-+ jae 1234f
-+ add %_ASM_DX,%_ASM_AX
-+1234:
-+#endif
-+
-+#endif
-+
-+1: __copyuser_seg movzb (%_ASM_AX),%edx
- xor %eax,%eax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(__get_user_1)
-@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
- ENTRY(__get_user_2)
- CFI_STARTPROC
- add $1,%_ASM_AX
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
- jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
- jae bad_get_user
--2: movzwl -1(%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_DX
-+ cmp %_ASM_DX,%_ASM_AX
-+ jae 1234f
-+ add %_ASM_DX,%_ASM_AX
-+1234:
-+#endif
-+
-+#endif
-+
-+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
- xor %eax,%eax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(__get_user_2)
-@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
- ENTRY(__get_user_4)
- CFI_STARTPROC
- add $3,%_ASM_AX
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
- jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
- jae bad_get_user
--3: mov -3(%_ASM_AX),%edx
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_DX
-+ cmp %_ASM_DX,%_ASM_AX
-+ jae 1234f
-+ add %_ASM_DX,%_ASM_AX
-+1234:
-+#endif
-+
-+#endif
-+
-+3: __copyuser_seg mov -3(%_ASM_AX),%edx
- xor %eax,%eax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(__get_user_4)
-@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
- jae bad_get_user
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov pax_user_shadow_base,%_ASM_DX
-+ cmp %_ASM_DX,%_ASM_AX
-+ jae 1234f
-+ add %_ASM_DX,%_ASM_AX
-+1234:
-+#endif
-+
- 4: movq -7(%_ASM_AX),%_ASM_DX
- xor %eax,%eax
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(__get_user_8)
-@@ -91,6 +152,7 @@ bad_get_user:
- CFI_STARTPROC
- xor %edx,%edx
- mov $(-EFAULT),%_ASM_AX
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- END(bad_get_user)
-diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
-index 374562e..a75830b 100644
---- a/arch/x86/lib/insn.c
-+++ b/arch/x86/lib/insn.c
-@@ -21,6 +21,11 @@
- #include <linux/string.h>
- #include <asm/inat.h>
- #include <asm/insn.h>
-+#ifdef __KERNEL__
-+#include <asm/pgtable_types.h>
-+#else
-+#define ktla_ktva(addr) addr
-+#endif
-
- /* Verify next sizeof(t) bytes can be on the same instruction */
- #define validate_next(t, insn, n) \
-@@ -49,8 +54,8 @@
- void insn_init(struct insn *insn, const void *kaddr, int x86_64)
- {
- memset(insn, 0, sizeof(*insn));
-- insn->kaddr = kaddr;
-- insn->next_byte = kaddr;
-+ insn->kaddr = ktla_ktva(kaddr);
-+ insn->next_byte = ktla_ktva(kaddr);
- insn->x86_64 = x86_64 ? 1 : 0;
- insn->opnd_bytes = 4;
- if (x86_64)
-diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
-index 05a95e7..326f2fa 100644
---- a/arch/x86/lib/iomap_copy_64.S
-+++ b/arch/x86/lib/iomap_copy_64.S
-@@ -17,6 +17,7 @@
-
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
-+#include <asm/alternative-asm.h>
-
- /*
- * override generic version in lib/iomap_copy.c
-@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
- CFI_STARTPROC
- movl %edx,%ecx
- rep movsd
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(__iowrite32_copy)
-diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
-index efbf2a0..8090894 100644
---- a/arch/x86/lib/memcpy_64.S
-+++ b/arch/x86/lib/memcpy_64.S
-@@ -34,6 +34,7 @@
- rep movsq
- movl %edx, %ecx
- rep movsb
-+ pax_force_retaddr
- ret
- .Lmemcpy_e:
- .previous
-@@ -51,6 +52,7 @@
-
- movl %edx, %ecx
- rep movsb
-+ pax_force_retaddr
- ret
- .Lmemcpy_e_e:
- .previous
-@@ -141,6 +143,7 @@ ENTRY(memcpy)
- movq %r9, 1*8(%rdi)
- movq %r10, -2*8(%rdi, %rdx)
- movq %r11, -1*8(%rdi, %rdx)
-+ pax_force_retaddr
- retq
- .p2align 4
- .Lless_16bytes:
-@@ -153,6 +156,7 @@ ENTRY(memcpy)
- movq -1*8(%rsi, %rdx), %r9
- movq %r8, 0*8(%rdi)
- movq %r9, -1*8(%rdi, %rdx)
-+ pax_force_retaddr
- retq
- .p2align 4
- .Lless_8bytes:
-@@ -166,6 +170,7 @@ ENTRY(memcpy)
- movl -4(%rsi, %rdx), %r8d
- movl %ecx, (%rdi)
- movl %r8d, -4(%rdi, %rdx)
-+ pax_force_retaddr
- retq
- .p2align 4
- .Lless_3bytes:
-@@ -183,6 +188,7 @@ ENTRY(memcpy)
- jnz .Lloop_1
-
- .Lend:
-+ pax_force_retaddr
- retq
- CFI_ENDPROC
- ENDPROC(memcpy)
-diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
-index ee16461..c4f4918 100644
---- a/arch/x86/lib/memmove_64.S
-+++ b/arch/x86/lib/memmove_64.S
-@@ -202,6 +202,7 @@ ENTRY(memmove)
- movb (%rsi), %r11b
- movb %r11b, (%rdi)
- 13:
-+ pax_force_retaddr
- retq
- CFI_ENDPROC
-
-@@ -210,6 +211,7 @@ ENTRY(memmove)
- /* Forward moving data. */
- movq %rdx, %rcx
- rep movsb
-+ pax_force_retaddr
- retq
- .Lmemmove_end_forward_efs:
- .previous
-diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
-index 79bd454..24b3780 100644
---- a/arch/x86/lib/memset_64.S
-+++ b/arch/x86/lib/memset_64.S
-@@ -31,6 +31,7 @@
- movl %r8d,%ecx
- rep stosb
- movq %r9,%rax
-+ pax_force_retaddr
- ret
- .Lmemset_e:
- .previous
-@@ -53,6 +54,7 @@
- movl %edx,%ecx
- rep stosb
- movq %r9,%rax
-+ pax_force_retaddr
- ret
- .Lmemset_e_e:
- .previous
-@@ -121,6 +123,7 @@ ENTRY(__memset)
-
- .Lende:
- movq %r10,%rax
-+ pax_force_retaddr
- ret
-
- CFI_RESTORE_STATE
-diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
-index c9f2d9b..e7fd2c0 100644
---- a/arch/x86/lib/mmx_32.c
-+++ b/arch/x86/lib/mmx_32.c
-@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
- {
- void *p;
- int i;
-+ unsigned long cr0;
-
- if (unlikely(in_interrupt()))
- return __memcpy(to, from, len);
-@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-- "1: prefetch (%0)\n" /* This set is 28 bytes */
-- " prefetch 64(%0)\n"
-- " prefetch 128(%0)\n"
-- " prefetch 192(%0)\n"
-- " prefetch 256(%0)\n"
-+ "1: prefetch (%1)\n" /* This set is 28 bytes */
-+ " prefetch 64(%1)\n"
-+ " prefetch 128(%1)\n"
-+ " prefetch 192(%1)\n"
-+ " prefetch 256(%1)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+ "3: \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
-- : : "r" (from));
-+ : "=&r" (cr0) : "r" (from) : "ax");
-
- for ( ; i > 5; i--) {
- __asm__ __volatile__ (
-- "1: prefetch 320(%0)\n"
-- "2: movq (%0), %%mm0\n"
-- " movq 8(%0), %%mm1\n"
-- " movq 16(%0), %%mm2\n"
-- " movq 24(%0), %%mm3\n"
-- " movq %%mm0, (%1)\n"
-- " movq %%mm1, 8(%1)\n"
-- " movq %%mm2, 16(%1)\n"
-- " movq %%mm3, 24(%1)\n"
-- " movq 32(%0), %%mm0\n"
-- " movq 40(%0), %%mm1\n"
-- " movq 48(%0), %%mm2\n"
-- " movq 56(%0), %%mm3\n"
-- " movq %%mm0, 32(%1)\n"
-- " movq %%mm1, 40(%1)\n"
-- " movq %%mm2, 48(%1)\n"
-- " movq %%mm3, 56(%1)\n"
-+ "1: prefetch 320(%1)\n"
-+ "2: movq (%1), %%mm0\n"
-+ " movq 8(%1), %%mm1\n"
-+ " movq 16(%1), %%mm2\n"
-+ " movq 24(%1), %%mm3\n"
-+ " movq %%mm0, (%2)\n"
-+ " movq %%mm1, 8(%2)\n"
-+ " movq %%mm2, 16(%2)\n"
-+ " movq %%mm3, 24(%2)\n"
-+ " movq 32(%1), %%mm0\n"
-+ " movq 40(%1), %%mm1\n"
-+ " movq 48(%1), %%mm2\n"
-+ " movq 56(%1), %%mm3\n"
-+ " movq %%mm0, 32(%2)\n"
-+ " movq %%mm1, 40(%2)\n"
-+ " movq %%mm2, 48(%2)\n"
-+ " movq %%mm3, 56(%2)\n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+ "3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
-- : : "r" (from), "r" (to) : "memory");
-+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
-
- from += 64;
- to += 64;
-@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
- static void fast_copy_page(void *to, void *from)
- {
- int i;
-+ unsigned long cr0;
-
- kernel_fpu_begin();
-
-@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
- * but that is for later. -AV
- */
- __asm__ __volatile__(
-- "1: prefetch (%0)\n"
-- " prefetch 64(%0)\n"
-- " prefetch 128(%0)\n"
-- " prefetch 192(%0)\n"
-- " prefetch 256(%0)\n"
-+ "1: prefetch (%1)\n"
-+ " prefetch 64(%1)\n"
-+ " prefetch 128(%1)\n"
-+ " prefetch 192(%1)\n"
-+ " prefetch 256(%1)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+ "3: \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
-- _ASM_EXTABLE(1b, 3b) : : "r" (from));
-+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
-
- for (i = 0; i < (4096-320)/64; i++) {
- __asm__ __volatile__ (
-- "1: prefetch 320(%0)\n"
-- "2: movq (%0), %%mm0\n"
-- " movntq %%mm0, (%1)\n"
-- " movq 8(%0), %%mm1\n"
-- " movntq %%mm1, 8(%1)\n"
-- " movq 16(%0), %%mm2\n"
-- " movntq %%mm2, 16(%1)\n"
-- " movq 24(%0), %%mm3\n"
-- " movntq %%mm3, 24(%1)\n"
-- " movq 32(%0), %%mm4\n"
-- " movntq %%mm4, 32(%1)\n"
-- " movq 40(%0), %%mm5\n"
-- " movntq %%mm5, 40(%1)\n"
-- " movq 48(%0), %%mm6\n"
-- " movntq %%mm6, 48(%1)\n"
-- " movq 56(%0), %%mm7\n"
-- " movntq %%mm7, 56(%1)\n"
-+ "1: prefetch 320(%1)\n"
-+ "2: movq (%1), %%mm0\n"
-+ " movntq %%mm0, (%2)\n"
-+ " movq 8(%1), %%mm1\n"
-+ " movntq %%mm1, 8(%2)\n"
-+ " movq 16(%1), %%mm2\n"
-+ " movntq %%mm2, 16(%2)\n"
-+ " movq 24(%1), %%mm3\n"
-+ " movntq %%mm3, 24(%2)\n"
-+ " movq 32(%1), %%mm4\n"
-+ " movntq %%mm4, 32(%2)\n"
-+ " movq 40(%1), %%mm5\n"
-+ " movntq %%mm5, 40(%2)\n"
-+ " movq 48(%1), %%mm6\n"
-+ " movntq %%mm6, 48(%2)\n"
-+ " movq 56(%1), %%mm7\n"
-+ " movntq %%mm7, 56(%2)\n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+ "3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
-- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
-+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
-
- from += 64;
- to += 64;
-@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
- static void fast_copy_page(void *to, void *from)
- {
- int i;
-+ unsigned long cr0;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
-- "1: prefetch (%0)\n"
-- " prefetch 64(%0)\n"
-- " prefetch 128(%0)\n"
-- " prefetch 192(%0)\n"
-- " prefetch 256(%0)\n"
-+ "1: prefetch (%1)\n"
-+ " prefetch 64(%1)\n"
-+ " prefetch 128(%1)\n"
-+ " prefetch 192(%1)\n"
-+ " prefetch 256(%1)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+ "3: \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
-- _ASM_EXTABLE(1b, 3b) : : "r" (from));
-+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
-
- for (i = 0; i < 4096/64; i++) {
- __asm__ __volatile__ (
-- "1: prefetch 320(%0)\n"
-- "2: movq (%0), %%mm0\n"
-- " movq 8(%0), %%mm1\n"
-- " movq 16(%0), %%mm2\n"
-- " movq 24(%0), %%mm3\n"
-- " movq %%mm0, (%1)\n"
-- " movq %%mm1, 8(%1)\n"
-- " movq %%mm2, 16(%1)\n"
-- " movq %%mm3, 24(%1)\n"
-- " movq 32(%0), %%mm0\n"
-- " movq 40(%0), %%mm1\n"
-- " movq 48(%0), %%mm2\n"
-- " movq 56(%0), %%mm3\n"
-- " movq %%mm0, 32(%1)\n"
-- " movq %%mm1, 40(%1)\n"
-- " movq %%mm2, 48(%1)\n"
-- " movq %%mm3, 56(%1)\n"
-+ "1: prefetch 320(%1)\n"
-+ "2: movq (%1), %%mm0\n"
-+ " movq 8(%1), %%mm1\n"
-+ " movq 16(%1), %%mm2\n"
-+ " movq 24(%1), %%mm3\n"
-+ " movq %%mm0, (%2)\n"
-+ " movq %%mm1, 8(%2)\n"
-+ " movq %%mm2, 16(%2)\n"
-+ " movq %%mm3, 24(%2)\n"
-+ " movq 32(%1), %%mm0\n"
-+ " movq 40(%1), %%mm1\n"
-+ " movq 48(%1), %%mm2\n"
-+ " movq 56(%1), %%mm3\n"
-+ " movq %%mm0, 32(%2)\n"
-+ " movq %%mm1, 40(%2)\n"
-+ " movq %%mm2, 48(%2)\n"
-+ " movq %%mm3, 56(%2)\n"
- ".section .fixup, \"ax\"\n"
-- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+ "3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %%cr0, %0\n"
-+ " movl %0, %%eax\n"
-+ " andl $0xFFFEFFFF, %%eax\n"
-+ " movl %%eax, %%cr0\n"
-+#endif
-+
-+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ " movl %0, %%cr0\n"
-+#endif
-+
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
-- : : "r" (from), "r" (to) : "memory");
-+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
-
- from += 64;
- to += 64;
-diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
-index 69fa106..234ac7f 100644
---- a/arch/x86/lib/msr-reg.S
-+++ b/arch/x86/lib/msr-reg.S
-@@ -3,6 +3,7 @@
- #include <asm/dwarf2.h>
- #include <asm/asm.h>
- #include <asm/msr.h>
-+#include <asm/alternative-asm.h>
-
- #ifdef CONFIG_X86_64
- /*
-@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
- movl %edi, 28(%r10)
- popq_cfi %rbp
- popq_cfi %rbx
-+ pax_force_retaddr
- ret
- 3:
- CFI_RESTORE_STATE
-diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
-index 36b0d15..3edf573 100644
---- a/arch/x86/lib/putuser.S
-+++ b/arch/x86/lib/putuser.S
-@@ -15,7 +15,9 @@
- #include <asm/thread_info.h>
- #include <asm/errno.h>
- #include <asm/asm.h>
--
-+#include <asm/segment.h>
-+#include <asm/pgtable.h>
-+#include <asm/alternative-asm.h>
-
- /*
- * __put_user_X
-@@ -29,52 +31,119 @@
- * as they get called from within inline assembly.
- */
-
--#define ENTER CFI_STARTPROC ; \
-- GET_THREAD_INFO(%_ASM_BX)
--#define EXIT ret ; \
-+#define ENTER CFI_STARTPROC
-+#define EXIT pax_force_retaddr; ret ; \
- CFI_ENDPROC
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define _DEST %_ASM_CX,%_ASM_BX
-+#else
-+#define _DEST %_ASM_CX
-+#endif
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __copyuser_seg gs;
-+#else
-+#define __copyuser_seg
-+#endif
-+
- .text
- ENTRY(__put_user_1)
- ENTER
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
-+ GET_THREAD_INFO(%_ASM_BX)
- cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
- jae bad_put_user
--1: movb %al,(%_ASM_CX)
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_BX
-+ cmp %_ASM_BX,%_ASM_CX
-+ jb 1234f
-+ xor %ebx,%ebx
-+1234:
-+#endif
-+
-+#endif
-+
-+1: __copyuser_seg movb %al,(_DEST)
- xor %eax,%eax
- EXIT
- ENDPROC(__put_user_1)
-
- ENTRY(__put_user_2)
- ENTER
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
-+ GET_THREAD_INFO(%_ASM_BX)
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
- sub $1,%_ASM_BX
- cmp %_ASM_BX,%_ASM_CX
- jae bad_put_user
--2: movw %ax,(%_ASM_CX)
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_BX
-+ cmp %_ASM_BX,%_ASM_CX
-+ jb 1234f
-+ xor %ebx,%ebx
-+1234:
-+#endif
-+
-+#endif
-+
-+2: __copyuser_seg movw %ax,(_DEST)
- xor %eax,%eax
- EXIT
- ENDPROC(__put_user_2)
-
- ENTRY(__put_user_4)
- ENTER
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
-+ GET_THREAD_INFO(%_ASM_BX)
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
- sub $3,%_ASM_BX
- cmp %_ASM_BX,%_ASM_CX
- jae bad_put_user
--3: movl %eax,(%_ASM_CX)
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_BX
-+ cmp %_ASM_BX,%_ASM_CX
-+ jb 1234f
-+ xor %ebx,%ebx
-+1234:
-+#endif
-+
-+#endif
-+
-+3: __copyuser_seg movl %eax,(_DEST)
- xor %eax,%eax
- EXIT
- ENDPROC(__put_user_4)
-
- ENTRY(__put_user_8)
- ENTER
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
-+ GET_THREAD_INFO(%_ASM_BX)
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
- sub $7,%_ASM_BX
- cmp %_ASM_BX,%_ASM_CX
- jae bad_put_user
--4: mov %_ASM_AX,(%_ASM_CX)
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ mov pax_user_shadow_base,%_ASM_BX
-+ cmp %_ASM_BX,%_ASM_CX
-+ jb 1234f
-+ xor %ebx,%ebx
-+1234:
-+#endif
-+
-+#endif
-+
-+4: __copyuser_seg mov %_ASM_AX,(_DEST)
- #ifdef CONFIG_X86_32
--5: movl %edx,4(%_ASM_CX)
-+5: __copyuser_seg movl %edx,4(_DEST)
- #endif
- xor %eax,%eax
- EXIT
-diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
-index 1cad221..de671ee 100644
---- a/arch/x86/lib/rwlock.S
-+++ b/arch/x86/lib/rwlock.S
-@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
- FRAME
- 0: LOCK_PREFIX
- WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 1234f
-+ LOCK_PREFIX
-+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
-+ int $4
-+1234:
-+ _ASM_EXTABLE(1234b, 1234b)
-+#endif
-+
- 1: rep; nop
- cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
- jne 1b
- LOCK_PREFIX
- WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 1234f
-+ LOCK_PREFIX
-+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
-+ int $4
-+1234:
-+ _ASM_EXTABLE(1234b, 1234b)
-+#endif
-+
- jnz 0b
- ENDFRAME
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- END(__write_lock_failed)
-@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
- FRAME
- 0: LOCK_PREFIX
- READ_LOCK_SIZE(inc) (%__lock_ptr)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 1234f
-+ LOCK_PREFIX
-+ READ_LOCK_SIZE(dec) (%__lock_ptr)
-+ int $4
-+1234:
-+ _ASM_EXTABLE(1234b, 1234b)
-+#endif
-+
- 1: rep; nop
- READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
- js 1b
- LOCK_PREFIX
- READ_LOCK_SIZE(dec) (%__lock_ptr)
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ jno 1234f
-+ LOCK_PREFIX
-+ READ_LOCK_SIZE(inc) (%__lock_ptr)
-+ int $4
-+1234:
-+ _ASM_EXTABLE(1234b, 1234b)
-+#endif
-+
- js 0b
- ENDFRAME
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- END(__read_lock_failed)
-diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
-index 5dff5f0..cadebf4 100644
---- a/arch/x86/lib/rwsem.S
-+++ b/arch/x86/lib/rwsem.S
-@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
- __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
- CFI_RESTORE __ASM_REG(dx)
- restore_common_regs
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_down_read_failed)
-@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
- movq %rax,%rdi
- call rwsem_down_write_failed
- restore_common_regs
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_down_write_failed)
-@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
- movq %rax,%rdi
- call rwsem_wake
- restore_common_regs
--1: ret
-+1: pax_force_retaddr
-+ ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_wake)
-
-@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
- __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
- CFI_RESTORE __ASM_REG(dx)
- restore_common_regs
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(call_rwsem_downgrade_wake)
-diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
-index a63efd6..8149fbe 100644
---- a/arch/x86/lib/thunk_64.S
-+++ b/arch/x86/lib/thunk_64.S
-@@ -8,6 +8,7 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/calling.h>
-+#include <asm/alternative-asm.h>
-
- /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro THUNK name, func, put_ret_addr_in_rdi=0
-@@ -15,11 +16,11 @@
- \name:
- CFI_STARTPROC
-
-- /* this one pushes 9 elems, the next one would be %rIP */
-- SAVE_ARGS
-+ /* this one pushes 15+1 elems, the next one would be %rIP */
-+ SAVE_ARGS 8
-
- .if \put_ret_addr_in_rdi
-- movq_cfi_restore 9*8, rdi
-+ movq_cfi_restore RIP, rdi
- .endif
-
- call \func
-@@ -38,8 +39,9 @@
-
- /* SAVE_ARGS below is used only for the .cfi directives it contains. */
- CFI_STARTPROC
-- SAVE_ARGS
-+ SAVE_ARGS 8
- restore:
-- RESTORE_ARGS
-+ RESTORE_ARGS 1,8
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
-diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index e218d5d..3966c85 100644
---- a/arch/x86/lib/usercopy_32.c
-+++ b/arch/x86/lib/usercopy_32.c
-@@ -43,7 +43,7 @@ do { \
- __asm__ __volatile__( \
- " testl %1,%1\n" \
- " jz 2f\n" \
-- "0: lodsb\n" \
-+ "0: "__copyuser_seg"lodsb\n" \
- " stosb\n" \
- " testb %%al,%%al\n" \
- " jz 1f\n" \
-@@ -128,10 +128,12 @@ do { \
- int __d0; \
- might_fault(); \
- __asm__ __volatile__( \
-+ __COPYUSER_SET_ES \
- "0: rep; stosl\n" \
- " movl %2,%0\n" \
- "1: rep; stosb\n" \
- "2:\n" \
-+ __COPYUSER_RESTORE_ES \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%2,%0,4),%0\n" \
- " jmp 2b\n" \
-@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
- might_fault();
-
- __asm__ __volatile__(
-+ __COPYUSER_SET_ES
- " testl %0, %0\n"
- " jz 3f\n"
- " andl %0,%%ecx\n"
-@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
- " subl %%ecx,%0\n"
- " addl %0,%%eax\n"
- "1:\n"
-+ __COPYUSER_RESTORE_ES
- ".section .fixup,\"ax\"\n"
- "2: xorl %%eax,%%eax\n"
- " jmp 1b\n"
-@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
-
- #ifdef CONFIG_X86_INTEL_USERCOPY
- static unsigned long
--__copy_user_intel(void __user *to, const void *from, unsigned long size)
-+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
- {
- int d0, d1;
- __asm__ __volatile__(
-@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
- " .align 2,0x90\n"
- "3: movl 0(%4), %%eax\n"
- "4: movl 4(%4), %%edx\n"
-- "5: movl %%eax, 0(%3)\n"
-- "6: movl %%edx, 4(%3)\n"
-+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
-+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
- "7: movl 8(%4), %%eax\n"
- "8: movl 12(%4),%%edx\n"
-- "9: movl %%eax, 8(%3)\n"
-- "10: movl %%edx, 12(%3)\n"
-+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
-+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
- "11: movl 16(%4), %%eax\n"
- "12: movl 20(%4), %%edx\n"
-- "13: movl %%eax, 16(%3)\n"
-- "14: movl %%edx, 20(%3)\n"
-+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
-+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
- "15: movl 24(%4), %%eax\n"
- "16: movl 28(%4), %%edx\n"
-- "17: movl %%eax, 24(%3)\n"
-- "18: movl %%edx, 28(%3)\n"
-+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
-+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
- "19: movl 32(%4), %%eax\n"
- "20: movl 36(%4), %%edx\n"
-- "21: movl %%eax, 32(%3)\n"
-- "22: movl %%edx, 36(%3)\n"
-+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
-+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
- "23: movl 40(%4), %%eax\n"
- "24: movl 44(%4), %%edx\n"
-- "25: movl %%eax, 40(%3)\n"
-- "26: movl %%edx, 44(%3)\n"
-+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
-+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
- "27: movl 48(%4), %%eax\n"
- "28: movl 52(%4), %%edx\n"
-- "29: movl %%eax, 48(%3)\n"
-- "30: movl %%edx, 52(%3)\n"
-+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
-+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
- "31: movl 56(%4), %%eax\n"
- "32: movl 60(%4), %%edx\n"
-- "33: movl %%eax, 56(%3)\n"
-- "34: movl %%edx, 60(%3)\n"
-+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
-+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
- " addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
-@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
-+ __COPYUSER_SET_ES
- "99: rep; movsl\n"
- "36: movl %%eax, %0\n"
- "37: rep; movsb\n"
- "100:\n"
-+ __COPYUSER_RESTORE_ES
-+ ".section .fixup,\"ax\"\n"
-+ "101: lea 0(%%eax,%0,4),%0\n"
-+ " jmp 100b\n"
-+ ".previous\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 1b,100b\n"
-+ " .long 2b,100b\n"
-+ " .long 3b,100b\n"
-+ " .long 4b,100b\n"
-+ " .long 5b,100b\n"
-+ " .long 6b,100b\n"
-+ " .long 7b,100b\n"
-+ " .long 8b,100b\n"
-+ " .long 9b,100b\n"
-+ " .long 10b,100b\n"
-+ " .long 11b,100b\n"
-+ " .long 12b,100b\n"
-+ " .long 13b,100b\n"
-+ " .long 14b,100b\n"
-+ " .long 15b,100b\n"
-+ " .long 16b,100b\n"
-+ " .long 17b,100b\n"
-+ " .long 18b,100b\n"
-+ " .long 19b,100b\n"
-+ " .long 20b,100b\n"
-+ " .long 21b,100b\n"
-+ " .long 22b,100b\n"
-+ " .long 23b,100b\n"
-+ " .long 24b,100b\n"
-+ " .long 25b,100b\n"
-+ " .long 26b,100b\n"
-+ " .long 27b,100b\n"
-+ " .long 28b,100b\n"
-+ " .long 29b,100b\n"
-+ " .long 30b,100b\n"
-+ " .long 31b,100b\n"
-+ " .long 32b,100b\n"
-+ " .long 33b,100b\n"
-+ " .long 34b,100b\n"
-+ " .long 35b,100b\n"
-+ " .long 36b,100b\n"
-+ " .long 37b,100b\n"
-+ " .long 99b,101b\n"
-+ ".previous"
-+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
-+ : "1"(to), "2"(from), "0"(size)
-+ : "eax", "edx", "memory");
-+ return size;
-+}
-+
-+static unsigned long
-+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
-+{
-+ int d0, d1;
-+ __asm__ __volatile__(
-+ " .align 2,0x90\n"
-+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
-+ " cmpl $67, %0\n"
-+ " jbe 3f\n"
-+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
-+ " .align 2,0x90\n"
-+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
-+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
-+ "5: movl %%eax, 0(%3)\n"
-+ "6: movl %%edx, 4(%3)\n"
-+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
-+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
-+ "9: movl %%eax, 8(%3)\n"
-+ "10: movl %%edx, 12(%3)\n"
-+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
-+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
-+ "13: movl %%eax, 16(%3)\n"
-+ "14: movl %%edx, 20(%3)\n"
-+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
-+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
-+ "17: movl %%eax, 24(%3)\n"
-+ "18: movl %%edx, 28(%3)\n"
-+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
-+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
-+ "21: movl %%eax, 32(%3)\n"
-+ "22: movl %%edx, 36(%3)\n"
-+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
-+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
-+ "25: movl %%eax, 40(%3)\n"
-+ "26: movl %%edx, 44(%3)\n"
-+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
-+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
-+ "29: movl %%eax, 48(%3)\n"
-+ "30: movl %%edx, 52(%3)\n"
-+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
-+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
-+ "33: movl %%eax, 56(%3)\n"
-+ "34: movl %%edx, 60(%3)\n"
-+ " addl $-64, %0\n"
-+ " addl $64, %4\n"
-+ " addl $64, %3\n"
-+ " cmpl $63, %0\n"
-+ " ja 1b\n"
-+ "35: movl %0, %%eax\n"
-+ " shrl $2, %0\n"
-+ " andl $3, %%eax\n"
-+ " cld\n"
-+ "99: rep; "__copyuser_seg" movsl\n"
-+ "36: movl %%eax, %0\n"
-+ "37: rep; "__copyuser_seg" movsb\n"
-+ "100:\n"
- ".section .fixup,\"ax\"\n"
- "101: lea 0(%%eax,%0,4),%0\n"
- " jmp 100b\n"
-@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
- int d0, d1;
- __asm__ __volatile__(
- " .align 2,0x90\n"
-- "0: movl 32(%4), %%eax\n"
-+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 2f\n"
-- "1: movl 64(%4), %%eax\n"
-+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
-- "2: movl 0(%4), %%eax\n"
-- "21: movl 4(%4), %%edx\n"
-+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
-+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
- " movl %%eax, 0(%3)\n"
- " movl %%edx, 4(%3)\n"
-- "3: movl 8(%4), %%eax\n"
-- "31: movl 12(%4),%%edx\n"
-+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
-+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
- " movl %%eax, 8(%3)\n"
- " movl %%edx, 12(%3)\n"
-- "4: movl 16(%4), %%eax\n"
-- "41: movl 20(%4), %%edx\n"
-+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
-+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
- " movl %%eax, 16(%3)\n"
- " movl %%edx, 20(%3)\n"
-- "10: movl 24(%4), %%eax\n"
-- "51: movl 28(%4), %%edx\n"
-+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
-+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
- " movl %%eax, 24(%3)\n"
- " movl %%edx, 28(%3)\n"
-- "11: movl 32(%4), %%eax\n"
-- "61: movl 36(%4), %%edx\n"
-+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
-+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
- " movl %%eax, 32(%3)\n"
- " movl %%edx, 36(%3)\n"
-- "12: movl 40(%4), %%eax\n"
-- "71: movl 44(%4), %%edx\n"
-+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
-+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
- " movl %%eax, 40(%3)\n"
- " movl %%edx, 44(%3)\n"
-- "13: movl 48(%4), %%eax\n"
-- "81: movl 52(%4), %%edx\n"
-+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
-+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
- " movl %%eax, 48(%3)\n"
- " movl %%edx, 52(%3)\n"
-- "14: movl 56(%4), %%eax\n"
-- "91: movl 60(%4), %%edx\n"
-+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
-+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
- " movl %%eax, 56(%3)\n"
- " movl %%edx, 60(%3)\n"
- " addl $-64, %0\n"
-@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
-- "6: rep; movsl\n"
-+ "6: rep; "__copyuser_seg" movsl\n"
- " movl %%eax,%0\n"
-- "7: rep; movsb\n"
-+ "7: rep; "__copyuser_seg" movsb\n"
- "8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
-@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
-
- __asm__ __volatile__(
- " .align 2,0x90\n"
-- "0: movl 32(%4), %%eax\n"
-+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 2f\n"
-- "1: movl 64(%4), %%eax\n"
-+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
-- "2: movl 0(%4), %%eax\n"
-- "21: movl 4(%4), %%edx\n"
-+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
-+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
- " movnti %%eax, 0(%3)\n"
- " movnti %%edx, 4(%3)\n"
-- "3: movl 8(%4), %%eax\n"
-- "31: movl 12(%4),%%edx\n"
-+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
-+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
- " movnti %%eax, 8(%3)\n"
- " movnti %%edx, 12(%3)\n"
-- "4: movl 16(%4), %%eax\n"
-- "41: movl 20(%4), %%edx\n"
-+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
-+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
- " movnti %%eax, 16(%3)\n"
- " movnti %%edx, 20(%3)\n"
-- "10: movl 24(%4), %%eax\n"
-- "51: movl 28(%4), %%edx\n"
-+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
-+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
- " movnti %%eax, 24(%3)\n"
- " movnti %%edx, 28(%3)\n"
-- "11: movl 32(%4), %%eax\n"
-- "61: movl 36(%4), %%edx\n"
-+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
-+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
- " movnti %%eax, 32(%3)\n"
- " movnti %%edx, 36(%3)\n"
-- "12: movl 40(%4), %%eax\n"
-- "71: movl 44(%4), %%edx\n"
-+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
-+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
- " movnti %%eax, 40(%3)\n"
- " movnti %%edx, 44(%3)\n"
-- "13: movl 48(%4), %%eax\n"
-- "81: movl 52(%4), %%edx\n"
-+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
-+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
- " movnti %%eax, 48(%3)\n"
- " movnti %%edx, 52(%3)\n"
-- "14: movl 56(%4), %%eax\n"
-- "91: movl 60(%4), %%edx\n"
-+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
-+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
- " movnti %%eax, 56(%3)\n"
- " movnti %%edx, 60(%3)\n"
- " addl $-64, %0\n"
-@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
-- "6: rep; movsl\n"
-+ "6: rep; "__copyuser_seg" movsl\n"
- " movl %%eax,%0\n"
-- "7: rep; movsb\n"
-+ "7: rep; "__copyuser_seg" movsb\n"
- "8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
-@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
-
- __asm__ __volatile__(
- " .align 2,0x90\n"
-- "0: movl 32(%4), %%eax\n"
-+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 2f\n"
-- "1: movl 64(%4), %%eax\n"
-+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
-- "2: movl 0(%4), %%eax\n"
-- "21: movl 4(%4), %%edx\n"
-+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
-+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
- " movnti %%eax, 0(%3)\n"
- " movnti %%edx, 4(%3)\n"
-- "3: movl 8(%4), %%eax\n"
-- "31: movl 12(%4),%%edx\n"
-+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
-+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
- " movnti %%eax, 8(%3)\n"
- " movnti %%edx, 12(%3)\n"
-- "4: movl 16(%4), %%eax\n"
-- "41: movl 20(%4), %%edx\n"
-+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
-+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
- " movnti %%eax, 16(%3)\n"
- " movnti %%edx, 20(%3)\n"
-- "10: movl 24(%4), %%eax\n"
-- "51: movl 28(%4), %%edx\n"
-+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
-+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
- " movnti %%eax, 24(%3)\n"
- " movnti %%edx, 28(%3)\n"
-- "11: movl 32(%4), %%eax\n"
-- "61: movl 36(%4), %%edx\n"
-+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
-+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
- " movnti %%eax, 32(%3)\n"
- " movnti %%edx, 36(%3)\n"
-- "12: movl 40(%4), %%eax\n"
-- "71: movl 44(%4), %%edx\n"
-+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
-+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
- " movnti %%eax, 40(%3)\n"
- " movnti %%edx, 44(%3)\n"
-- "13: movl 48(%4), %%eax\n"
-- "81: movl 52(%4), %%edx\n"
-+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
-+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
- " movnti %%eax, 48(%3)\n"
- " movnti %%edx, 52(%3)\n"
-- "14: movl 56(%4), %%eax\n"
-- "91: movl 60(%4), %%edx\n"
-+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
-+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
- " movnti %%eax, 56(%3)\n"
- " movnti %%edx, 60(%3)\n"
- " addl $-64, %0\n"
-@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
-- "6: rep; movsl\n"
-+ "6: rep; "__copyuser_seg" movsl\n"
- " movl %%eax,%0\n"
-- "7: rep; movsb\n"
-+ "7: rep; "__copyuser_seg" movsb\n"
- "8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
-@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
- */
- unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
- unsigned long size);
--unsigned long __copy_user_intel(void __user *to, const void *from,
-+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
-+ unsigned long size);
-+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
- unsigned long size);
- unsigned long __copy_user_zeroing_intel_nocache(void *to,
- const void __user *from, unsigned long size);
- #endif /* CONFIG_X86_INTEL_USERCOPY */
-
- /* Generic arbitrary sized copy. */
--#define __copy_user(to, from, size) \
-+#define __copy_user(to, from, size, prefix, set, restore) \
- do { \
- int __d0, __d1, __d2; \
- __asm__ __volatile__( \
-+ set \
- " cmp $7,%0\n" \
- " jbe 1f\n" \
- " movl %1,%0\n" \
- " negl %0\n" \
- " andl $7,%0\n" \
- " subl %0,%3\n" \
-- "4: rep; movsb\n" \
-+ "4: rep; "prefix"movsb\n" \
- " movl %3,%0\n" \
- " shrl $2,%0\n" \
- " andl $3,%3\n" \
- " .align 2,0x90\n" \
-- "0: rep; movsl\n" \
-+ "0: rep; "prefix"movsl\n" \
- " movl %3,%0\n" \
-- "1: rep; movsb\n" \
-+ "1: rep; "prefix"movsb\n" \
- "2:\n" \
-+ restore \
- ".section .fixup,\"ax\"\n" \
- "5: addl %3,%0\n" \
- " jmp 2b\n" \
-@@ -682,14 +799,14 @@ do { \
- " negl %0\n" \
- " andl $7,%0\n" \
- " subl %0,%3\n" \
-- "4: rep; movsb\n" \
-+ "4: rep; "__copyuser_seg"movsb\n" \
- " movl %3,%0\n" \
- " shrl $2,%0\n" \
- " andl $3,%3\n" \
- " .align 2,0x90\n" \
-- "0: rep; movsl\n" \
-+ "0: rep; "__copyuser_seg"movsl\n" \
- " movl %3,%0\n" \
-- "1: rep; movsb\n" \
-+ "1: rep; "__copyuser_seg"movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "5: addl %3,%0\n" \
-@@ -775,9 +892,9 @@ survive:
- }
- #endif
- if (movsl_is_ok(to, from, n))
-- __copy_user(to, from, n);
-+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
- else
-- n = __copy_user_intel(to, from, n);
-+ n = __generic_copy_to_user_intel(to, from, n);
- return n;
- }
- EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
- unsigned long n)
- {
- if (movsl_is_ok(to, from, n))
-- __copy_user(to, from, n);
-+ __copy_user(to, from, n, __copyuser_seg, "", "");
- else
-- n = __copy_user_intel((void __user *)to,
-- (const void *)from, n);
-+ n = __generic_copy_from_user_intel(to, from, n);
- return n;
- }
- EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-@@ -827,65 +943,49 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
- if (n > 64 && cpu_has_xmm2)
- n = __copy_user_intel_nocache(to, from, n);
- else
-- __copy_user(to, from, n);
-+ __copy_user(to, from, n, __copyuser_seg, "", "");
- #else
-- __copy_user(to, from, n);
-+ __copy_user(to, from, n, __copyuser_seg, "", "");
- #endif
- return n;
- }
- EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
-
--/**
-- * copy_to_user: - Copy a block of data into user space.
-- * @to: Destination address, in user space.
-- * @from: Source address, in kernel space.
-- * @n: Number of bytes to copy.
-- *
-- * Context: User context only. This function may sleep.
-- *
-- * Copy data from kernel space to user space.
-- *
-- * Returns number of bytes that could not be copied.
-- * On success, this will be zero.
-- */
--unsigned long
--copy_to_user(void __user *to, const void *from, unsigned long n)
--{
-- if (access_ok(VERIFY_WRITE, to, n))
-- n = __copy_to_user(to, from, n);
-- return n;
--}
--EXPORT_SYMBOL(copy_to_user);
--
--/**
-- * copy_from_user: - Copy a block of data from user space.
-- * @to: Destination address, in kernel space.
-- * @from: Source address, in user space.
-- * @n: Number of bytes to copy.
-- *
-- * Context: User context only. This function may sleep.
-- *
-- * Copy data from user space to kernel space.
-- *
-- * Returns number of bytes that could not be copied.
-- * On success, this will be zero.
-- *
-- * If some data could not be copied, this function will pad the copied
-- * data to the requested size using zero bytes.
-- */
--unsigned long
--_copy_from_user(void *to, const void __user *from, unsigned long n)
--{
-- if (access_ok(VERIFY_READ, from, n))
-- n = __copy_from_user(to, from, n);
-- else
-- memset(to, 0, n);
-- return n;
--}
--EXPORT_SYMBOL(_copy_from_user);
--
- void copy_from_user_overflow(void)
- {
- WARN(1, "Buffer overflow detected!\n");
- }
- EXPORT_SYMBOL(copy_from_user_overflow);
-+
-+void copy_to_user_overflow(void)
-+{
-+ WARN(1, "Buffer overflow detected!\n");
-+}
-+EXPORT_SYMBOL(copy_to_user_overflow);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+void __set_fs(mm_segment_t x)
-+{
-+ switch (x.seg) {
-+ case 0:
-+ loadsegment(gs, 0);
-+ break;
-+ case TASK_SIZE_MAX:
-+ loadsegment(gs, __USER_DS);
-+ break;
-+ case -1UL:
-+ loadsegment(gs, __KERNEL_DS);
-+ break;
-+ default:
-+ BUG();
-+ }
-+}
-+EXPORT_SYMBOL(__set_fs);
-+
-+void set_fs(mm_segment_t x)
-+{
-+ current_thread_info()->addr_limit = x;
-+ __set_fs(x);
-+}
-+EXPORT_SYMBOL(set_fs);
-+#endif
-diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
-index 554b7b5..4027e2c 100644
---- a/arch/x86/lib/usercopy_64.c
-+++ b/arch/x86/lib/usercopy_64.c
-@@ -42,6 +42,12 @@ long
- __strncpy_from_user(char *dst, const char __user *src, long count)
- {
- long res;
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if ((unsigned long)src < pax_user_shadow_base)
-+ src += pax_user_shadow_base;
-+#endif
-+
- __do_strncpy_from_user(dst, src, count, res);
- return res;
- }
-@@ -87,7 +93,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
- _ASM_EXTABLE(0b,3b)
- _ASM_EXTABLE(1b,2b)
- : [size8] "=&c"(size), [dst] "=&D" (__d0)
-- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
-+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
- [zero] "r" (0UL), [eight] "r" (8UL));
- return size;
- }
-@@ -149,12 +155,11 @@ long strlen_user(const char __user *s)
- }
- EXPORT_SYMBOL(strlen_user);
-
--unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
-+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
- {
-- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
-- return copy_user_generic((__force void *)to, (__force void *)from, len);
-- }
-- return len;
-+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
-+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
-+ return len;
- }
- EXPORT_SYMBOL(copy_in_user);
-
-@@ -164,7 +169,7 @@ EXPORT_SYMBOL(copy_in_user);
- * it is not necessary to optimize tail handling.
- */
- unsigned long
--copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
-+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
- {
- char c;
- unsigned zero_len;
-@@ -181,3 +186,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
- break;
- return len;
- }
-+
-+void copy_from_user_overflow(void)
-+{
-+ WARN(1, "Buffer overflow detected!\n");
-+}
-+EXPORT_SYMBOL(copy_from_user_overflow);
-+
-+void copy_to_user_overflow(void)
-+{
-+ WARN(1, "Buffer overflow detected!\n");
-+}
-+EXPORT_SYMBOL(copy_to_user_overflow);
-diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
-index d0474ad..36e9257 100644
---- a/arch/x86/mm/extable.c
-+++ b/arch/x86/mm/extable.c
-@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
- const struct exception_table_entry *fixup;
-
- #ifdef CONFIG_PNPBIOS
-- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
-+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
- extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
- extern u32 pnp_bios_is_utter_crap;
- pnp_bios_is_utter_crap = 1;
-diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 351590e..ad0d399 100644
---- a/arch/x86/mm/fault.c
-+++ b/arch/x86/mm/fault.c
-@@ -13,11 +13,18 @@
- #include <linux/perf_event.h> /* perf_sw_event */
- #include <linux/hugetlb.h> /* hstate_index_to_shift */
- #include <linux/prefetch.h> /* prefetchw */
-+#include <linux/unistd.h>
-+#include <linux/compiler.h>
-
- #include <asm/traps.h> /* dotraplinkage, ... */
- #include <asm/pgalloc.h> /* pgd_*(), ... */
- #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
- #include <asm/fixmap.h> /* VSYSCALL_START */
-+#include <asm/tlbflush.h>
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#include <asm/stacktrace.h>
-+#endif
-
- /*
- * Page fault error code bits:
-@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
- int ret = 0;
-
- /* kprobe_running() needs smp_processor_id() */
-- if (kprobes_built_in() && !user_mode_vm(regs)) {
-+ if (kprobes_built_in() && !user_mode(regs)) {
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, 14))
- ret = 1;
-@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
- return !instr_lo || (instr_lo>>1) == 1;
- case 0x00:
- /* Prefetch instruction is 0x0F0D or 0x0F18 */
-- if (probe_kernel_address(instr, opcode))
-+ if (user_mode(regs)) {
-+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
-+ return 0;
-+ } else if (probe_kernel_address(instr, opcode))
- return 0;
-
- *prefetch = (instr_lo == 0xF) &&
-@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
- while (instr < max_instr) {
- unsigned char opcode;
-
-- if (probe_kernel_address(instr, opcode))
-+ if (user_mode(regs)) {
-+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
-+ break;
-+ } else if (probe_kernel_address(instr, opcode))
- break;
-
- instr++;
-@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- force_sig_info(si_signo, &info, tsk);
- }
-
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+static int pax_handle_fetch_fault(struct pt_regs *regs);
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ pgd = pgd_offset(mm, address);
-+ if (!pgd_present(*pgd))
-+ return NULL;
-+ pud = pud_offset(pgd, address);
-+ if (!pud_present(*pud))
-+ return NULL;
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd_present(*pmd))
-+ return NULL;
-+ return pmd;
-+}
-+#endif
-+
- DEFINE_SPINLOCK(pgd_lock);
- LIST_HEAD(pgd_list);
-
-@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
- for (address = VMALLOC_START & PMD_MASK;
- address >= TASK_SIZE && address < FIXADDR_TOP;
- address += PMD_SIZE) {
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ unsigned long cpu;
-+#else
- struct page *page;
-+#endif
-
- spin_lock(&pgd_lock);
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
-+ pgd_t *pgd = get_cpu_pgd(cpu);
-+ pmd_t *ret;
-+#else
- list_for_each_entry(page, &pgd_list, lru) {
-+ pgd_t *pgd;
- spinlock_t *pgt_lock;
- pmd_t *ret;
-
-@@ -242,8 +295,14 @@ void vmalloc_sync_all(void)
- pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
-
- spin_lock(pgt_lock);
-- ret = vmalloc_sync_one(page_address(page), address);
-+ pgd = page_address(page);
-+#endif
-+
-+ ret = vmalloc_sync_one(pgd, address);
-+
-+#ifndef CONFIG_PAX_PER_CPU_PGD
- spin_unlock(pgt_lock);
-+#endif
-
- if (!ret)
- break;
-@@ -277,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
- * an interrupt in the middle of a task switch..
- */
- pgd_paddr = read_cr3();
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
-+#endif
-+
- pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
- if (!pmd_k)
- return -1;
-@@ -372,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
- * happen within a race in page table update. In the later
- * case just flush:
- */
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
-+ pgd = pgd_offset_cpu(smp_processor_id(), address);
-+#else
- pgd = pgd_offset(current->active_mm, address);
-+#endif
-+
- pgd_ref = pgd_offset_k(address);
- if (pgd_none(*pgd_ref))
- return -1;
-@@ -542,7 +613,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
- static int is_errata100(struct pt_regs *regs, unsigned long address)
- {
- #ifdef CONFIG_X86_64
-- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
-+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
- return 1;
- #endif
- return 0;
-@@ -569,7 +640,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
- }
-
- static const char nx_warning[] = KERN_CRIT
--"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
-
- static void
- show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -578,15 +649,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
- if (!oops_may_print())
- return;
-
-- if (error_code & PF_INSTR) {
-+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
- unsigned int level;
-
- pte_t *pte = lookup_address(address, &level);
-
- if (pte && pte_present(*pte) && !pte_exec(*pte))
-- printk(nx_warning, current_uid());
-+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
- }
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ if (init_mm.start_code <= address && address < init_mm.end_code) {
-+ if (current->signal->curr_ip)
-+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
-+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
-+ else
-+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
-+ current->comm, task_pid_nr(current), current_uid(), current_euid());
-+ }
-+#endif
-+
- printk(KERN_ALERT "BUG: unable to handle kernel ");
- if (address < PAGE_SIZE)
- printk(KERN_CONT "NULL pointer dereference");
-@@ -740,6 +822,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- return;
- }
- #endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ if (pax_is_fetch_fault(regs, error_code, address)) {
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ switch (pax_handle_fetch_fault(regs)) {
-+ case 2:
-+ return;
-+ }
-+#endif
-+
-+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- /* Kernel addresses are always protection faults: */
- if (address >= TASK_SIZE)
- error_code |= PF_PROT;
-@@ -839,7 +937,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
- printk(KERN_ERR
- "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
-- tsk->comm, tsk->pid, address);
-+ tsk->comm, task_pid_nr(tsk), address);
- code = BUS_MCEERR_AR;
- }
- #endif
-@@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
- return 1;
- }
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
-+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
-+{
-+ pte_t *pte;
-+ pmd_t *pmd;
-+ spinlock_t *ptl;
-+ unsigned char pte_mask;
-+
-+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
-+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
-+ return 0;
-+
-+ /* PaX: it's our fault, let's handle it if we can */
-+
-+ /* PaX: take a look at read faults before acquiring any locks */
-+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
-+ /* instruction fetch attempt from a protected page in user mode */
-+ up_read(&mm->mmap_sem);
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ switch (pax_handle_fetch_fault(regs)) {
-+ case 2:
-+ return 1;
-+ }
-+#endif
-+
-+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
-+ do_group_exit(SIGKILL);
-+ }
-+
-+ pmd = pax_get_pmd(mm, address);
-+ if (unlikely(!pmd))
-+ return 0;
-+
-+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
-+ pte_unmap_unlock(pte, ptl);
-+ return 0;
-+ }
-+
-+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
-+ /* write attempt to a protected page in user mode */
-+ pte_unmap_unlock(pte, ptl);
-+ return 0;
-+ }
-+
-+#ifdef CONFIG_SMP
-+ if (likely(address > get_limit(regs->cs) && cpumask_test_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask)))
-+#else
-+ if (likely(address > get_limit(regs->cs)))
-+#endif
-+ {
-+ set_pte(pte, pte_mkread(*pte));
-+ __flush_tlb_one(address);
-+ pte_unmap_unlock(pte, ptl);
-+ up_read(&mm->mmap_sem);
-+ return 1;
-+ }
-+
-+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
-+
-+ /*
-+ * PaX: fill DTLB with user rights and retry
-+ */
-+ __asm__ __volatile__ (
-+ "orb %2,(%1)\n"
-+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
-+/*
-+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
-+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
-+ * page fault when examined during a TLB load attempt. this is true not only
-+ * for PTEs holding a non-present entry but also present entries that will
-+ * raise a page fault (such as those set up by PaX, or the copy-on-write
-+ * mechanism). in effect it means that we do *not* need to flush the TLBs
-+ * for our target pages since their PTEs are simply not in the TLBs at all.
-+
-+ * the best thing in omitting it is that we gain around 15-20% speed in the
-+ * fast path of the page fault handler and can get rid of tracing since we
-+ * can no longer flush unintended entries.
-+ */
-+ "invlpg (%0)\n"
-+#endif
-+ __copyuser_seg"testb $0,(%0)\n"
-+ "xorb %3,(%1)\n"
-+ :
-+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
-+ : "memory", "cc");
-+ pte_unmap_unlock(pte, ptl);
-+ up_read(&mm->mmap_sem);
-+ return 1;
-+}
-+#endif
-+
- /*
- * Handle a spurious fault caused by a stale TLB entry.
- *
-@@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
- static inline int
- access_error(unsigned long error_code, struct vm_area_struct *vma)
- {
-+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
-+ return 1;
-+
- if (error_code & PF_WRITE) {
- /* write, present and write, not present: */
- if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -1001,18 +1195,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
- {
- struct vm_area_struct *vma;
- struct task_struct *tsk;
-- unsigned long address;
- struct mm_struct *mm;
- int fault;
- int write = error_code & PF_WRITE;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
-
-- tsk = current;
-- mm = tsk->mm;
--
- /* Get the faulting address: */
-- address = read_cr2();
-+ unsigned long address = read_cr2();
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
-+ if (!search_exception_tables(regs->ip)) {
-+ bad_area_nosemaphore(regs, error_code, address);
-+ return;
-+ }
-+ if (address < pax_user_shadow_base) {
-+ printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
-+ printk(KERN_EMERG "PAX: faulting IP: %pS\n", (void *)regs->ip);
-+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
-+ } else
-+ address -= pax_user_shadow_base;
-+ }
-+#endif
-+
-+ tsk = current;
-+ mm = tsk->mm;
-
- /*
- * Detect and handle instructions that would cause a page fault for
-@@ -1073,7 +1281,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
- * User-mode registers count as a user access even for any
- * potential system fault or CPU buglet:
- */
-- if (user_mode_vm(regs)) {
-+ if (user_mode(regs)) {
- local_irq_enable();
- error_code |= PF_USER;
- } else {
-@@ -1128,6 +1336,11 @@ retry:
- might_sleep();
- }
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
-+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
-+ return;
-+#endif
-+
- vma = find_vma(mm, address);
- if (unlikely(!vma)) {
- bad_area(regs, error_code, address);
-@@ -1139,18 +1352,24 @@ retry:
- bad_area(regs, error_code, address);
- return;
- }
-- if (error_code & PF_USER) {
-- /*
-- * Accessing the stack below %sp is always a bug.
-- * The large cushion allows instructions like enter
-- * and pusha to work. ("enter $65535, $31" pushes
-- * 32 pointers and then decrements %sp by 65535.)
-- */
-- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
-- bad_area(regs, error_code, address);
-- return;
-- }
-+ /*
-+ * Accessing the stack below %sp is always a bug.
-+ * The large cushion allows instructions like enter
-+ * and pusha to work. ("enter $65535, $31" pushes
-+ * 32 pointers and then decrements %sp by 65535.)
-+ */
-+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
-+ bad_area(regs, error_code, address);
-+ return;
- }
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
-+ bad_area(regs, error_code, address);
-+ return;
-+ }
-+#endif
-+
- if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, error_code, address);
- return;
-@@ -1205,3 +1424,292 @@ good_area:
-
- up_read(&mm->mmap_sem);
- }
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
-+{
-+ struct mm_struct *mm = current->mm;
-+ unsigned long ip = regs->ip;
-+
-+ if (v8086_mode(regs))
-+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
-+ return true;
-+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
-+ return true;
-+ return false;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
-+ return true;
-+ return false;
-+ }
-+#endif
-+
-+ return false;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
-+{
-+ int err;
-+
-+ do { /* PaX: libffi trampoline emulation */
-+ unsigned char mov, jmp;
-+ unsigned int addr1, addr2;
-+
-+#ifdef CONFIG_X86_64
-+ if ((regs->ip + 9) >> 32)
-+ break;
-+#endif
-+
-+ err = get_user(mov, (unsigned char __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
-+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
-+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
-+
-+ if (err)
-+ break;
-+
-+ if (mov == 0xB8 && jmp == 0xE9) {
-+ regs->ax = addr1;
-+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: gcc trampoline emulation #1 */
-+ unsigned char mov1, mov2;
-+ unsigned short jmp;
-+ unsigned int addr1, addr2;
-+
-+#ifdef CONFIG_X86_64
-+ if ((regs->ip + 11) >> 32)
-+ break;
-+#endif
-+
-+ err = get_user(mov1, (unsigned char __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
-+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
-+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
-+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
-+
-+ if (err)
-+ break;
-+
-+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
-+ regs->cx = addr1;
-+ regs->ax = addr2;
-+ regs->ip = addr2;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: gcc trampoline emulation #2 */
-+ unsigned char mov, jmp;
-+ unsigned int addr1, addr2;
-+
-+#ifdef CONFIG_X86_64
-+ if ((regs->ip + 9) >> 32)
-+ break;
-+#endif
-+
-+ err = get_user(mov, (unsigned char __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
-+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
-+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
-+
-+ if (err)
-+ break;
-+
-+ if (mov == 0xB9 && jmp == 0xE9) {
-+ regs->cx = addr1;
-+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
-+ return 2;
-+ }
-+ } while (0);
-+
-+ return 1; /* PaX in action */
-+}
-+
-+#ifdef CONFIG_X86_64
-+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
-+{
-+ int err;
-+
-+ do { /* PaX: libffi trampoline emulation */
-+ unsigned short mov1, mov2, jmp1;
-+ unsigned char stcclc, jmp2;
-+ unsigned long addr1, addr2;
-+
-+ err = get_user(mov1, (unsigned short __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
-+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
-+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
-+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
-+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
-+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
-+
-+ if (err)
-+ break;
-+
-+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
-+ regs->r11 = addr1;
-+ regs->r10 = addr2;
-+ if (stcclc == 0xF8)
-+ regs->flags &= ~X86_EFLAGS_CF;
-+ else
-+ regs->flags |= X86_EFLAGS_CF;
-+ regs->ip = addr1;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: gcc trampoline emulation #1 */
-+ unsigned short mov1, mov2, jmp1;
-+ unsigned char jmp2;
-+ unsigned int addr1;
-+ unsigned long addr2;
-+
-+ err = get_user(mov1, (unsigned short __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
-+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
-+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
-+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
-+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
-+
-+ if (err)
-+ break;
-+
-+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
-+ regs->r11 = addr1;
-+ regs->r10 = addr2;
-+ regs->ip = addr1;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: gcc trampoline emulation #2 */
-+ unsigned short mov1, mov2, jmp1;
-+ unsigned char jmp2;
-+ unsigned long addr1, addr2;
-+
-+ err = get_user(mov1, (unsigned short __user *)regs->ip);
-+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
-+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
-+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
-+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
-+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
-+
-+ if (err)
-+ break;
-+
-+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
-+ regs->r11 = addr1;
-+ regs->r10 = addr2;
-+ regs->ip = addr1;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ return 1; /* PaX in action */
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->ip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when gcc trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+ if (v8086_mode(regs))
-+ return 1;
-+
-+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+ return 1;
-+
-+#ifdef CONFIG_X86_32
-+ return pax_handle_fetch_fault_32(regs);
-+#else
-+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
-+ return pax_handle_fetch_fault_32(regs);
-+ else
-+ return pax_handle_fetch_fault_64(regs);
-+#endif
-+}
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 20; i++) {
-+ unsigned char c;
-+ if (get_user(c, (unsigned char __force_user *)pc+i))
-+ printk(KERN_CONT "?? ");
-+ else
-+ printk(KERN_CONT "%02x ", c);
-+ }
-+ printk("\n");
-+
-+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
-+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
-+ unsigned long c;
-+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
-+#ifdef CONFIG_X86_32
-+ printk(KERN_CONT "???????? ");
-+#else
-+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
-+ printk(KERN_CONT "???????? ???????? ");
-+ else
-+ printk(KERN_CONT "???????????????? ");
-+#endif
-+ } else {
-+#ifdef CONFIG_X86_64
-+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
-+ printk(KERN_CONT "%08x ", (unsigned int)c);
-+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
-+ } else
-+#endif
-+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
-+ }
-+ }
-+ printk("\n");
-+}
-+#endif
-+
-+/**
-+ * probe_kernel_write(): safely attempt to write to a location
-+ * @dst: address to write to
-+ * @src: pointer to the data that shall be written
-+ * @size: size of the data chunk
-+ *
-+ * Safely write to address @dst from the buffer at @src. If a kernel fault
-+ * happens, handle that and return -EFAULT.
-+ */
-+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
-+{
-+ long ret;
-+ mm_segment_t old_fs = get_fs();
-+
-+ set_fs(KERNEL_DS);
-+ pagefault_disable();
-+ pax_open_kernel();
-+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
-+ pax_close_kernel();
-+ pagefault_enable();
-+ set_fs(old_fs);
-+
-+ return ret ? -EFAULT : 0;
-+}
-diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
-index dd74e46..3f2d038 100644
---- a/arch/x86/mm/gup.c
-+++ b/arch/x86/mm/gup.c
-@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
-+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
- return 0;
-
-@@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- goto slow_irqon;
- #endif
-
-+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
-+ (void __user *)start, len)))
-+ return 0;
-+
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
-diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index f4f29b1..5cac4fb 100644
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-+
-+ pax_open_kernel();
- set_pte(kmap_pte-idx, mk_pte(page, prot));
-+ pax_close_kernel();
-+
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index df7d12c..93fae8e 100644
---- a/arch/x86/mm/hugetlbpage.c
-+++ b/arch/x86/mm/hugetlbpage.c
-@@ -277,13 +277,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
- struct hstate *h = hstate_file(file);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-- unsigned long start_addr;
-+ unsigned long start_addr, pax_task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-
- if (len > mm->cached_hole_size) {
-- start_addr = mm->free_area_cache;
-+ start_addr = mm->free_area_cache;
- } else {
-- start_addr = TASK_UNMAPPED_BASE;
-- mm->cached_hole_size = 0;
-+ start_addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
- }
-
- full_search:
-@@ -291,26 +299,27 @@ full_search:
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
-- if (TASK_SIZE - len < addr) {
-+ if (pax_task_size - len < addr) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
-- if (start_addr != TASK_UNMAPPED_BASE) {
-- start_addr = TASK_UNMAPPED_BASE;
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = mm->mmap_base;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-- mm->free_area_cache = addr + len;
-- return addr;
-- }
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ break;
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = ALIGN(vma->vm_end, huge_page_size(h));
- }
-+
-+ mm->free_area_cache = addr + len;
-+ return addr;
- }
-
- static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -319,10 +328,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- {
- struct hstate *h = hstate_file(file);
- struct mm_struct *mm = current->mm;
-- struct vm_area_struct *vma, *prev_vma;
-- unsigned long base = mm->mmap_base, addr = addr0;
-+ struct vm_area_struct *vma;
-+ unsigned long base = mm->mmap_base, addr;
- unsigned long largest_hole = mm->cached_hole_size;
-- int first_time = 1;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-
- /* don't allow allocations above current base */
- if (mm->free_area_cache > base)
-@@ -332,64 +341,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- largest_hole = 0;
- mm->free_area_cache = base;
- }
--try_again:
-+
- /* make sure it can fit in the remaining address space */
- if (mm->free_area_cache < len)
- goto fail;
-
- /* either no address requested or can't fit in requested address hole */
-- addr = (mm->free_area_cache - len) & huge_page_mask(h);
-+ addr = (mm->free_area_cache - len);
- do {
-+ addr &= huge_page_mask(h);
- /*
- * Lookup failure means no vma is above this address,
- * i.e. return with success:
- */
-- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
-+ vma = find_vma(mm, addr);
-+ if (!vma)
- return addr;
-
- /*
- * new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
- */
-- if (addr + len <= vma->vm_start &&
-- (!prev_vma || (addr >= prev_vma->vm_end))) {
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
- /* remember the address as a hint for next time */
-- mm->cached_hole_size = largest_hole;
-- return (mm->free_area_cache = addr);
-- } else {
-- /* pull free_area_cache down to the first hole */
-- if (mm->free_area_cache == vma->vm_end) {
-- mm->free_area_cache = vma->vm_start;
-- mm->cached_hole_size = largest_hole;
-- }
-+ mm->cached_hole_size = largest_hole;
-+ return (mm->free_area_cache = addr);
-+ }
-+ /* pull free_area_cache down to the first hole */
-+ if (mm->free_area_cache == vma->vm_end) {
-+ mm->free_area_cache = vma->vm_start;
-+ mm->cached_hole_size = largest_hole;
- }
-
- /* remember the largest hole we saw so far */
- if (addr + largest_hole < vma->vm_start)
-- largest_hole = vma->vm_start - addr;
-+ largest_hole = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = (vma->vm_start - len) & huge_page_mask(h);
-- } while (len <= vma->vm_start);
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
-
- fail:
- /*
-- * if hint left us with no space for the requested
-- * mapping then try again:
-- */
-- if (first_time) {
-- mm->free_area_cache = base;
-- largest_hole = 0;
-- first_time = 0;
-- goto try_again;
-- }
-- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
-- mm->free_area_cache = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
-+ else
-+#endif
-+
-+ mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+ mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
- addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
- len, pgoff, flags);
-@@ -397,6 +410,7 @@ fail:
- /*
- * Restore the topdown base:
- */
-+ mm->mmap_base = base;
- mm->free_area_cache = base;
- mm->cached_hole_size = ~0UL;
-
-@@ -410,10 +424,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- struct hstate *h = hstate_file(file);
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-+ unsigned long pax_task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
-- if (len > TASK_SIZE)
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-+
-+ if (len > pax_task_size)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
-@@ -422,11 +446,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- return addr;
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
- if (mm->get_unmapped_area == arch_get_unmapped_area)
-diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index a4cca06..9e00106 100644
---- a/arch/x86/mm/init.c
-+++ b/arch/x86/mm/init.c
-@@ -3,6 +3,7 @@
- #include <linux/ioport.h>
- #include <linux/swap.h>
- #include <linux/memblock.h>
-+#include <linux/tboot.h>
-
- #include <asm/cacheflush.h>
- #include <asm/e820.h>
-@@ -15,6 +16,8 @@
- #include <asm/tlbflush.h>
- #include <asm/tlb.h>
- #include <asm/proto.h>
-+#include <asm/desc.h>
-+#include <asm/bios_ebda.h>
-
- unsigned long __initdata pgt_buf_start;
- unsigned long __meminitdata pgt_buf_end;
-@@ -43,7 +46,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
- {
- int i;
- unsigned long puds = 0, pmds = 0, ptes = 0, tables;
-- unsigned long start = 0, good_end;
-+ unsigned long start = 0x100000, good_end;
- unsigned long pgd_extra = 0;
- phys_addr_t base;
-
-@@ -282,7 +285,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
-
- #ifdef CONFIG_X86_32
- early_ioremap_page_table_range_init();
-+#endif
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
-+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-+ KERNEL_PGD_PTRS);
-+ load_cr3(get_cpu_pgd(0));
-+#elif defined(CONFIG_X86_32)
- load_cr3(swapper_pg_dir);
- #endif
-
-@@ -324,10 +334,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-+
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+static unsigned int ebda_start __read_only;
-+static unsigned int ebda_end __read_only;
-+#endif
-+
- int devmem_is_allowed(unsigned long pagenr)
- {
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ /* allow BDA */
-+ if (!pagenr)
-+ return 1;
-+ /* allow EBDA */
-+ if (pagenr >= ebda_start && pagenr < ebda_end)
-+ return 1;
-+ /* if tboot is in use, allow access to its hardcoded serial log range */
-+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
-+ return 1;
-+#else
-+ if (!pagenr)
-+ return 1;
-+#ifdef CONFIG_VM86
-+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
-+ return 1;
-+#endif
-+#endif
-+
-+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
-+ return 1;
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ /* throw out everything else below 1MB */
- if (pagenr <= 256)
-- return 1;
-+ return 0;
-+#endif
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
- return 0;
- if (!page_is_ram(pagenr))
-@@ -384,8 +424,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
- #endif
- }
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+static inline void gr_init_ebda(void)
-+{
-+ unsigned int ebda_addr;
-+ unsigned int ebda_size = 0;
-+
-+ ebda_addr = get_bios_ebda();
-+ if (ebda_addr) {
-+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
-+ ebda_size <<= 10;
-+ }
-+ if (ebda_addr && ebda_size) {
-+ ebda_start = ebda_addr >> PAGE_SHIFT;
-+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
-+ } else {
-+ ebda_start = 0x9f000 >> PAGE_SHIFT;
-+ ebda_end = 0xa0000 >> PAGE_SHIFT;
-+ }
-+}
-+#else
-+static inline void gr_init_ebda(void) { }
-+#endif
-+
- void free_initmem(void)
- {
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+ /* PaX: limit KERNEL_CS to actual size */
-+ unsigned long addr, limit;
-+ struct desc_struct d;
-+ int cpu;
-+#else
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ unsigned long addr, end;
-+#endif
-+#endif
-+
-+ gr_init_ebda();
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+
-+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
-+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
-+ }
-+
-+ /* PaX: make KERNEL_CS read-only */
-+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
-+ if (!paravirt_enabled())
-+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
-+/*
-+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ }
-+*/
-+#ifdef CONFIG_X86_PAE
-+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
-+/*
-+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+*/
-+#endif
-+
-+#ifdef CONFIG_MODULES
-+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
-+#endif
-+
-+#else
-+ /* PaX: make kernel code/rodata read-only, rest non-executable */
-+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ else
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+
-+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
-+ end = addr + KERNEL_IMAGE_SIZE;
-+ for (; addr < end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ }
-+#endif
-+
-+ flush_tlb_all();
-+#endif
-+
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
-diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 29f7c6d9..5122941 100644
---- a/arch/x86/mm/init_32.c
-+++ b/arch/x86/mm/init_32.c
-@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
- }
-
- /*
-- * Creates a middle page table and puts a pointer to it in the
-- * given global directory entry. This only returns the gd entry
-- * in non-PAE compilation mode, since the middle layer is folded.
-- */
--static pmd_t * __init one_md_table_init(pgd_t *pgd)
--{
-- pud_t *pud;
-- pmd_t *pmd_table;
--
--#ifdef CONFIG_X86_PAE
-- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-- if (after_bootmem)
-- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
-- else
-- pmd_table = (pmd_t *)alloc_low_page();
-- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
-- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-- pud = pud_offset(pgd, 0);
-- BUG_ON(pmd_table != pmd_offset(pud, 0));
--
-- return pmd_table;
-- }
--#endif
-- pud = pud_offset(pgd, 0);
-- pmd_table = pmd_offset(pud, 0);
--
-- return pmd_table;
--}
--
--/*
- * Create a page table and place a pointer to it in a middle page
- * directory entry:
- */
-@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
- page_table = (pte_t *)alloc_low_page();
-
- paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
-+#else
- set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+#endif
- BUG_ON(page_table != pte_offset_kernel(pmd, 0));
- }
-
- return pte_offset_kernel(pmd, 0);
- }
-
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
-+{
-+ pud_t *pud;
-+ pmd_t *pmd_table;
-+
-+ pud = pud_offset(pgd, 0);
-+ pmd_table = pmd_offset(pud, 0);
-+
-+ return pmd_table;
-+}
-+
- pmd_t * __init populate_extra_pmd(unsigned long vaddr)
- {
- int pgd_idx = pgd_index(vaddr);
-@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
- int pgd_idx, pmd_idx;
- unsigned long vaddr;
- pgd_t *pgd;
-+ pud_t *pud;
- pmd_t *pmd;
- pte_t *pte = NULL;
-
-@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
- pgd = pgd_base + pgd_idx;
-
- for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-- pmd = one_md_table_init(pgd);
-- pmd = pmd + pmd_index(vaddr);
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+
-+#ifdef CONFIG_X86_PAE
-+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
-+#endif
-+
- for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
- pmd++, pmd_idx++) {
- pte = page_table_kmap_check(one_page_table_init(pmd),
-@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
- }
- }
-
--static inline int is_kernel_text(unsigned long addr)
-+static inline int is_kernel_text(unsigned long start, unsigned long end)
- {
-- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
-- return 1;
-- return 0;
-+ if ((start >= ktla_ktva((unsigned long)_etext) ||
-+ end <= ktla_ktva((unsigned long)_stext)) &&
-+ (start >= ktla_ktva((unsigned long)_einittext) ||
-+ end <= ktla_ktva((unsigned long)_sinittext)) &&
-+
-+#ifdef CONFIG_ACPI_SLEEP
-+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
-+#endif
-+
-+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
-+ return 0;
-+ return 1;
- }
-
- /*
-@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
- unsigned long last_map_addr = end;
- unsigned long start_pfn, end_pfn;
- pgd_t *pgd_base = swapper_pg_dir;
-- int pgd_idx, pmd_idx, pte_ofs;
-+ unsigned int pgd_idx, pmd_idx, pte_ofs;
- unsigned long pfn;
- pgd_t *pgd;
-+ pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned pages_2m, pages_4k;
-@@ -281,8 +282,13 @@ repeat:
- pfn = start_pfn;
- pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
- pgd = pgd_base + pgd_idx;
-- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-- pmd = one_md_table_init(pgd);
-+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
-+ pud = pud_offset(pgd, 0);
-+ pmd = pmd_offset(pud, 0);
-+
-+#ifdef CONFIG_X86_PAE
-+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
-+#endif
-
- if (pfn >= end_pfn)
- continue;
-@@ -294,14 +300,13 @@ repeat:
- #endif
- for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
- pmd++, pmd_idx++) {
-- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
-+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
-
- /*
- * Map with big pages if possible, otherwise
- * create normal page tables:
- */
- if (use_pse) {
-- unsigned int addr2;
- pgprot_t prot = PAGE_KERNEL_LARGE;
- /*
- * first pass will use the same initial
-@@ -311,11 +316,7 @@ repeat:
- __pgprot(PTE_IDENT_ATTR |
- _PAGE_PSE);
-
-- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
-- PAGE_OFFSET + PAGE_SIZE-1;
--
-- if (is_kernel_text(addr) ||
-- is_kernel_text(addr2))
-+ if (is_kernel_text(address, address + PMD_SIZE))
- prot = PAGE_KERNEL_LARGE_EXEC;
-
- pages_2m++;
-@@ -332,7 +333,7 @@ repeat:
- pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
- pte += pte_ofs;
- for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
-- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
-+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
- pgprot_t prot = PAGE_KERNEL;
- /*
- * first pass will use the same initial
-@@ -340,7 +341,7 @@ repeat:
- */
- pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
-
-- if (is_kernel_text(addr))
-+ if (is_kernel_text(address, address + PAGE_SIZE))
- prot = PAGE_KERNEL_EXEC;
-
- pages_4k++;
-@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
-
- pud = pud_offset(pgd, va);
- pmd = pmd_offset(pud, va);
-- if (!pmd_present(*pmd))
-+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
- break;
-
- pte = pte_offset_kernel(pmd, va);
-@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
-
- static void __init pagetable_init(void)
- {
-- pgd_t *pgd_base = swapper_pg_dir;
--
-- permanent_kmaps_init(pgd_base);
-+ permanent_kmaps_init(swapper_pg_dir);
- }
-
--pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
-+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- /* user-defined highmem size */
-@@ -774,7 +773,7 @@ void __init mem_init(void)
- set_highmem_pages_init();
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
-- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
-+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
-@@ -815,10 +814,10 @@ void __init mem_init(void)
- ((unsigned long)&__init_end -
- (unsigned long)&__init_begin) >> 10,
-
-- (unsigned long)&_etext, (unsigned long)&_edata,
-- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
-+ (unsigned long)&_sdata, (unsigned long)&_edata,
-+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
-
-- (unsigned long)&_text, (unsigned long)&_etext,
-+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
- ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
-
- /*
-@@ -896,6 +895,7 @@ void set_kernel_text_rw(void)
- if (!kernel_set_to_readonly)
- return;
-
-+ start = ktla_ktva(start);
- pr_debug("Set kernel text: %lx - %lx for read write\n",
- start, start+size);
-
-@@ -910,6 +910,7 @@ void set_kernel_text_ro(void)
- if (!kernel_set_to_readonly)
- return;
-
-+ start = ktla_ktva(start);
- pr_debug("Set kernel text: %lx - %lx for read only\n",
- start, start+size);
-
-@@ -938,6 +939,7 @@ void mark_rodata_ro(void)
- unsigned long start = PFN_ALIGN(_text);
- unsigned long size = PFN_ALIGN(_etext) - start;
-
-+ start = ktla_ktva(start);
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
- size >> 10);
-diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 44b93da..79d59f5 100644
---- a/arch/x86/mm/init_64.c
-+++ b/arch/x86/mm/init_64.c
-@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
- * around without checking the pgd every time.
- */
-
--pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
-+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- int force_personality32;
-@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
-
- for (address = start; address <= end; address += PGDIR_SIZE) {
- const pgd_t *pgd_ref = pgd_offset_k(address);
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ unsigned long cpu;
-+#else
- struct page *page;
-+#endif
-
- if (pgd_none(*pgd_ref))
- continue;
-
- spin_lock(&pgd_lock);
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
-+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
-+#else
- list_for_each_entry(page, &pgd_list, lru) {
- pgd_t *pgd;
- spinlock_t *pgt_lock;
-@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
- /* the pgt_lock only for Xen */
- pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
- spin_lock(pgt_lock);
-+#endif
-
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
-@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
- BUG_ON(pgd_page_vaddr(*pgd)
- != pgd_page_vaddr(*pgd_ref));
-
-+#ifndef CONFIG_PAX_PER_CPU_PGD
- spin_unlock(pgt_lock);
-+#endif
-+
- }
- spin_unlock(&pgd_lock);
- }
-@@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
- {
- if (pgd_none(*pgd)) {
- pud_t *pud = (pud_t *)spp_getpage();
-- pgd_populate(&init_mm, pgd, pud);
-+ pgd_populate_kernel(&init_mm, pgd, pud);
- if (pud != pud_offset(pgd, 0))
- printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
- pud, pud_offset(pgd, 0));
-@@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
- {
- if (pud_none(*pud)) {
- pmd_t *pmd = (pmd_t *) spp_getpage();
-- pud_populate(&init_mm, pud, pmd);
-+ pud_populate_kernel(&init_mm, pud, pmd);
- if (pmd != pmd_offset(pud, 0))
- printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
- pmd, pmd_offset(pud, 0));
-@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
- pmd = fill_pmd(pud, vaddr);
- pte = fill_pte(pmd, vaddr);
-
-+ pax_open_kernel();
- set_pte(pte, new_pte);
-+ pax_close_kernel();
-
- /*
- * It's enough to flush this one mapping.
-@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
- pgd = pgd_offset_k((unsigned long)__va(phys));
- if (pgd_none(*pgd)) {
- pud = (pud_t *) spp_getpage();
-- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
-- _PAGE_USER));
-+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
- }
- pud = pud_offset(pgd, (unsigned long)__va(phys));
- if (pud_none(*pud)) {
- pmd = (pmd_t *) spp_getpage();
-- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
-- _PAGE_USER));
-+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
- }
- pmd = pmd_offset(pud, phys);
- BUG_ON(!pmd_none(*pmd));
-@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
- if (pfn >= pgt_buf_top)
- panic("alloc_low_page: ran out of memory");
-
-- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
-+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
- clear_page(adr);
- *phys = pfn * PAGE_SIZE;
- return adr;
-@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
-
- phys = __pa(virt);
- left = phys & (PAGE_SIZE - 1);
-- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
-+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
- adr = (void *)(((unsigned long)adr) | left);
-
- return adr;
-@@ -413,7 +427,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
-
- int i = pmd_index(address);
-
-- for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
-+ for (; i < PTRS_PER_PMD; i++, address = (address & PMD_MASK) + PMD_SIZE) {
- unsigned long pte_phys;
- pmd_t *pmd = pmd_page + pmd_index(address);
- pte_t *pte;
-@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
- unmap_low_page(pmd);
-
- spin_lock(&init_mm.page_table_lock);
-- pud_populate(&init_mm, pud, __va(pmd_phys));
-+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
- spin_unlock(&init_mm.page_table_lock);
- }
- __flush_tlb_all();
-@@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
- unmap_low_page(pud);
-
- spin_lock(&init_mm.page_table_lock);
-- pgd_populate(&init_mm, pgd, __va(pud_phys));
-+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
- spin_unlock(&init_mm.page_table_lock);
- pgd_changed = true;
- }
-@@ -856,8 +870,8 @@ int kern_addr_valid(unsigned long addr)
- static struct vm_area_struct gate_vma = {
- .vm_start = VSYSCALL_START,
- .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
-- .vm_page_prot = PAGE_READONLY_EXEC,
-- .vm_flags = VM_READ | VM_EXEC
-+ .vm_page_prot = PAGE_READONLY,
-+ .vm_flags = VM_READ
- };
-
- struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-@@ -891,7 +905,7 @@ int in_gate_area_no_mm(unsigned long addr)
-
- const char *arch_vma_name(struct vm_area_struct *vma)
- {
-- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
-+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
- return "[vdso]";
- if (vma == &gate_vma)
- return "[vsyscall]";
-diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 7b179b49..6bd17777 100644
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+
-+ pax_open_kernel();
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+ pax_close_kernel();
-+
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-index dec49d3..1943563 100644
---- a/arch/x86/mm/ioremap.c
-+++ b/arch/x86/mm/ioremap.c
-@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
- unsigned long i;
-
- for (i = 0; i < nr_pages; ++i)
-- if (pfn_valid(start_pfn + i) &&
-- !PageReserved(pfn_to_page(start_pfn + i)))
-+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
-+ !PageReserved(pfn_to_page(start_pfn + i))))
- return 1;
-
- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
- *
- * Caller must ensure there is only one unmapping for the same pointer.
- */
--void iounmap(volatile void __iomem *addr)
-+void iounmap(const volatile void __iomem *addr)
- {
- struct vm_struct *p, *o;
-
-@@ -322,23 +322,22 @@ EXPORT_SYMBOL(iounmap);
- */
- void *xlate_dev_mem_ptr(unsigned long phys)
- {
-- void *addr;
-- unsigned long start = phys & PAGE_MASK;
--
- /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
-- if (page_is_ram(start >> PAGE_SHIFT))
-+ if (page_is_ram(phys >> PAGE_SHIFT))
-+#ifdef CONFIG_HIGHMEM
-+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
-+#endif
- return __va(phys);
-
-- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
-- if (addr)
-- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
--
-- return addr;
-+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
- }
-
- void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
- {
- if (page_is_ram(phys >> PAGE_SHIFT))
-+#ifdef CONFIG_HIGHMEM
-+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
-+#endif
- return;
-
- iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
-@@ -356,7 +355,7 @@ static int __init early_ioremap_debug_setup(char *str)
- early_param("early_ioremap_debug", early_ioremap_debug_setup);
-
- static __initdata int after_paging_init;
--static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
-+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
-
- static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
- {
-@@ -393,8 +392,7 @@ void __init early_ioremap_init(void)
- slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
-
- pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-- memset(bm_pte, 0, sizeof(bm_pte));
-- pmd_populate_kernel(&init_mm, pmd, bm_pte);
-+ pmd_populate_user(&init_mm, pmd, bm_pte);
-
- /*
- * The boot-ioremap range spans multiple pmds, for which
-diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
-index d87dd6d..bf3fa66 100644
---- a/arch/x86/mm/kmemcheck/kmemcheck.c
-+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
-@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
- * memory (e.g. tracked pages)? For now, we need this to avoid
- * invoking kmemcheck for PnP BIOS calls.
- */
-- if (regs->flags & X86_VM_MASK)
-+ if (v8086_mode(regs))
- return false;
-- if (regs->cs != __KERNEL_CS)
-+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
- return false;
-
- pte = kmemcheck_pte_lookup(address);
-diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 75f9e5d..77b1e62 100644
---- a/arch/x86/mm/mmap.c
-+++ b/arch/x86/mm/mmap.c
-@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
- * Leave an at least ~128 MB hole with possible stack randomization.
- */
- #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
--#define MAX_GAP (TASK_SIZE/6*5)
-+#define MAX_GAP (pax_task_size/6*5)
-
- static int mmap_is_legacy(void)
- {
-@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
- return rnd << PAGE_SHIFT;
- }
-
--static unsigned long mmap_base(void)
-+static unsigned long mmap_base(struct mm_struct *mm)
- {
- unsigned long gap = rlimit(RLIMIT_STACK);
-+ unsigned long pax_task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
-- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
- }
-
- /*
- * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
- * does, but not when emulating X86_32
- */
--static unsigned long mmap_legacy_base(void)
-+static unsigned long mmap_legacy_base(struct mm_struct *mm)
- {
-- if (mmap_is_ia32())
-+ if (mmap_is_ia32()) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ return SEGMEXEC_TASK_UNMAPPED_BASE;
-+ else
-+#endif
-+
- return TASK_UNMAPPED_BASE;
-- else
-+ } else
- return TASK_UNMAPPED_BASE + mmap_rnd();
- }
-
-@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
- */
- void arch_pick_mmap_layout(struct mm_struct *mm)
- {
-- mm->mmap_legacy_base = mmap_legacy_base();
-- mm->mmap_base = mmap_base();
-+ mm->mmap_legacy_base = mmap_legacy_base(mm);
-+ mm->mmap_base = mmap_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
-+ mm->mmap_legacy_base += mm->delta_mmap;
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+ }
-+#endif
-
- if (mmap_is_legacy()) {
- mm->mmap_base = mm->mmap_legacy_base;
-diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
-index de54b9b..935281f 100644
---- a/arch/x86/mm/mmio-mod.c
-+++ b/arch/x86/mm/mmio-mod.c
-@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
- break;
- default:
- {
-- unsigned char *ip = (unsigned char *)instptr;
-+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
- my_trace->opcode = MMIO_UNKNOWN_OP;
- my_trace->width = 0;
- my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
-@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
- static void ioremap_trace_core(resource_size_t offset, unsigned long size,
- void __iomem *addr)
- {
-- static atomic_t next_id;
-+ static atomic_unchecked_t next_id;
- struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
- /* These are page-unaligned. */
- struct mmiotrace_map map = {
-@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
- .private = trace
- },
- .phys = offset,
-- .id = atomic_inc_return(&next_id)
-+ .id = atomic_inc_return_unchecked(&next_id)
- };
- map.map_id = trace->id;
-
-@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
- ioremap_trace_core(offset, size, addr);
- }
-
--static void iounmap_trace_core(volatile void __iomem *addr)
-+static void iounmap_trace_core(const volatile void __iomem *addr)
- {
- struct mmiotrace_map map = {
- .phys = 0,
-@@ -328,7 +328,7 @@ not_enabled:
- }
- }
-
--void mmiotrace_iounmap(volatile void __iomem *addr)
-+void mmiotrace_iounmap(const volatile void __iomem *addr)
- {
- might_sleep();
- if (is_enabled()) /* recheck and proper locking in *_core() */
-diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
-index fbeaaf4..559063f 100644
---- a/arch/x86/mm/numa.c
-+++ b/arch/x86/mm/numa.c
-@@ -494,7 +494,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
- return true;
- }
-
--static int __init numa_register_memblks(struct numa_meminfo *mi)
-+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
- {
- unsigned long uninitialized_var(pfn_align);
- int i, nid;
-diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
-index b008656..773eac2 100644
---- a/arch/x86/mm/pageattr-test.c
-+++ b/arch/x86/mm/pageattr-test.c
-@@ -36,7 +36,7 @@ enum {
-
- static int pte_testbit(pte_t pte)
- {
-- return pte_flags(pte) & _PAGE_UNUSED1;
-+ return pte_flags(pte) & _PAGE_CPA_TEST;
- }
-
- struct split_state {
-diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index f9e5267..5c194c9 100644
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- */
- #ifdef CONFIG_PCI_BIOS
- if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
-- pgprot_val(forbidden) |= _PAGE_NX;
-+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
- #endif
-
- /*
-@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- * Does not cover __inittext since that is gone later on. On
- * 64bit we do not enforce !NX on the low mapping
- */
-- if (within(address, (unsigned long)_text, (unsigned long)_etext))
-- pgprot_val(forbidden) |= _PAGE_NX;
-+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
-+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
-
-+#ifdef CONFIG_DEBUG_RODATA
- /*
- * The .rodata section needs to be read-only. Using the pfn
- * catches all aliases.
-@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
- __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_RW;
-+#endif
-
- #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
- /*
-@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- }
- #endif
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
-+ pgprot_val(forbidden) |= _PAGE_RW;
-+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
-+ }
-+#endif
-+
- prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
-
- return prot;
-@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
- static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
- {
- /* change init_mm */
-+ pax_open_kernel();
- set_pte_atomic(kpte, pte);
-+
- #ifdef CONFIG_X86_32
- if (!SHARED_KERNEL_PMD) {
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ unsigned long cpu;
-+#else
- struct page *page;
-+#endif
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
-+ pgd_t *pgd = get_cpu_pgd(cpu);
-+#else
- list_for_each_entry(page, &pgd_list, lru) {
-- pgd_t *pgd;
-+ pgd_t *pgd = (pgd_t *)page_address(page);
-+#endif
-+
- pud_t *pud;
- pmd_t *pmd;
-
-- pgd = (pgd_t *)page_address(page) + pgd_index(address);
-+ pgd += pgd_index(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- set_pte_atomic((pte_t *)pmd, pte);
- }
- }
- #endif
-+ pax_close_kernel();
- }
-
- static int
-diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
-index f6ff57b..481690f 100644
---- a/arch/x86/mm/pat.c
-+++ b/arch/x86/mm/pat.c
-@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
-
- if (!entry) {
- printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
-- current->comm, current->pid, start, end);
-+ current->comm, task_pid_nr(current), start, end);
- return -EINVAL;
- }
-
-@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
- while (cursor < to) {
- if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO
-- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
-- current->comm, from, to);
-+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
-+ current->comm, from, to, cursor);
- return 0;
- }
- cursor += PAGE_SIZE;
-@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
- printk(KERN_INFO
- "%s:%d ioremap_change_attr failed %s "
- "for %Lx-%Lx\n",
-- current->comm, current->pid,
-+ current->comm, task_pid_nr(current),
- cattr_name(flags),
- base, (unsigned long long)(base + size));
- return -EINVAL;
-@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
- if (want_flags != flags) {
- printk(KERN_WARNING
- "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
-- current->comm, current->pid,
-+ current->comm, task_pid_nr(current),
- cattr_name(want_flags),
- (unsigned long long)paddr,
- (unsigned long long)(paddr + size),
-@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
- free_memtype(paddr, paddr + size);
- printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
- " for %Lx-%Lx, got %s\n",
-- current->comm, current->pid,
-+ current->comm, task_pid_nr(current),
- cattr_name(want_flags),
- (unsigned long long)paddr,
- (unsigned long long)(paddr + size),
-diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
-index 8acaddd..4eaa657 100644
---- a/arch/x86/mm/pat_rbtree.c
-+++ b/arch/x86/mm/pat_rbtree.c
-@@ -165,7 +165,7 @@ success:
-
- failure:
- printk(KERN_INFO "%s:%d conflicting memory types "
-- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
-+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
- end, cattr_name(found_type), cattr_name(match->type));
- return -EBUSY;
- }
-diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
-index 9f0614d..92ae64a 100644
---- a/arch/x86/mm/pf_in.c
-+++ b/arch/x86/mm/pf_in.c
-@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
- int i;
- enum reason_type rv = OTHERS;
-
-- p = (unsigned char *)ins_addr;
-+ p = (unsigned char *)ktla_ktva(ins_addr);
- p += skip_prefix(p, &prf);
- p += get_opcode(p, &opcode);
-
-@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
- struct prefix_bits prf;
- int i;
-
-- p = (unsigned char *)ins_addr;
-+ p = (unsigned char *)ktla_ktva(ins_addr);
- p += skip_prefix(p, &prf);
- p += get_opcode(p, &opcode);
-
-@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
- struct prefix_bits prf;
- int i;
-
-- p = (unsigned char *)ins_addr;
-+ p = (unsigned char *)ktla_ktva(ins_addr);
- p += skip_prefix(p, &prf);
- p += get_opcode(p, &opcode);
-
-@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
- struct prefix_bits prf;
- int i;
-
-- p = (unsigned char *)ins_addr;
-+ p = (unsigned char *)ktla_ktva(ins_addr);
- p += skip_prefix(p, &prf);
- p += get_opcode(p, &opcode);
- for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
-@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
- struct prefix_bits prf;
- int i;
-
-- p = (unsigned char *)ins_addr;
-+ p = (unsigned char *)ktla_ktva(ins_addr);
- p += skip_prefix(p, &prf);
- p += get_opcode(p, &opcode);
- for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
-diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index 8573b83..4f3ed7e 100644
---- a/arch/x86/mm/pgtable.c
-+++ b/arch/x86/mm/pgtable.c
-@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
- list_del(&page->lru);
- }
-
--#define UNSHARED_PTRS_PER_PGD \
-- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
-
-+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
-+{
-+ unsigned int count = USER_PGD_PTRS;
-
-+ while (count--)
-+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
-+{
-+ unsigned int count = USER_PGD_PTRS;
-+
-+ while (count--) {
-+ pgd_t pgd;
-+
-+#ifdef CONFIG_X86_64
-+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
-+#else
-+ pgd = *src++;
-+#endif
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
-+#endif
-+
-+ *dst++ = pgd;
-+ }
-+
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+#define pxd_t pud_t
-+#define pyd_t pgd_t
-+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
-+#define pxd_free(mm, pud) pud_free((mm), (pud))
-+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
-+#define pyd_offset(mm, address) pgd_offset((mm), (address))
-+#define PYD_SIZE PGDIR_SIZE
-+#else
-+#define pxd_t pmd_t
-+#define pyd_t pud_t
-+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
-+#define pxd_free(mm, pud) pmd_free((mm), (pud))
-+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
-+#define pyd_offset(mm, address) pud_offset((mm), (address))
-+#define PYD_SIZE PUD_SIZE
-+#endif
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
-+static inline void pgd_dtor(pgd_t *pgd) {}
-+#else
- static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
- {
- BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
-@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
- pgd_list_del(pgd);
- spin_unlock(&pgd_lock);
- }
-+#endif
-
- /*
- * List of all pgd's needed for non-PAE so it can invalidate entries
-@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
- * -- wli
- */
-
--#ifdef CONFIG_X86_PAE
-+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
- /*
- * In PAE mode, we need to do a cr3 reload (=tlb flush) when
- * updating the top-level pagetable entries to guarantee the
-@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
- * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
- * and initialize the kernel pmds here.
- */
--#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
-+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
-
- void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
- {
-@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
- */
- flush_tlb_mm(mm);
- }
-+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
-+#define PREALLOCATED_PXDS USER_PGD_PTRS
- #else /* !CONFIG_X86_PAE */
-
- /* No need to prepopulate any pagetable entries in non-PAE modes. */
--#define PREALLOCATED_PMDS 0
-+#define PREALLOCATED_PXDS 0
-
- #endif /* CONFIG_X86_PAE */
-
--static void free_pmds(pmd_t *pmds[])
-+static void free_pxds(pxd_t *pxds[])
- {
- int i;
-
-- for(i = 0; i < PREALLOCATED_PMDS; i++)
-- if (pmds[i])
-- free_page((unsigned long)pmds[i]);
-+ for(i = 0; i < PREALLOCATED_PXDS; i++)
-+ if (pxds[i])
-+ free_page((unsigned long)pxds[i]);
- }
-
--static int preallocate_pmds(pmd_t *pmds[])
-+static int preallocate_pxds(pxd_t *pxds[])
- {
- int i;
- bool failed = false;
-
-- for(i = 0; i < PREALLOCATED_PMDS; i++) {
-- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
-- if (pmd == NULL)
-+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
-+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
-+ if (pxd == NULL)
- failed = true;
-- pmds[i] = pmd;
-+ pxds[i] = pxd;
- }
-
- if (failed) {
-- free_pmds(pmds);
-+ free_pxds(pxds);
- return -ENOMEM;
- }
-
-@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
- * preallocate which never got a corresponding vma will need to be
- * freed manually.
- */
--static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
-+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
- {
- int i;
-
-- for(i = 0; i < PREALLOCATED_PMDS; i++) {
-+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
- pgd_t pgd = pgdp[i];
-
- if (pgd_val(pgd) != 0) {
-- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
-+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
-
-- pgdp[i] = native_make_pgd(0);
-+ set_pgd(pgdp + i, native_make_pgd(0));
-
-- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
-- pmd_free(mm, pmd);
-+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
-+ pxd_free(mm, pxd);
- }
- }
- }
-
--static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
-+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
- {
-- pud_t *pud;
-+ pyd_t *pyd;
- unsigned long addr;
- int i;
-
-- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
-+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
- return;
-
-- pud = pud_offset(pgd, 0);
-+#ifdef CONFIG_X86_64
-+ pyd = pyd_offset(mm, 0L);
-+#else
-+ pyd = pyd_offset(pgd, 0L);
-+#endif
-
-- for (addr = i = 0; i < PREALLOCATED_PMDS;
-- i++, pud++, addr += PUD_SIZE) {
-- pmd_t *pmd = pmds[i];
-+ for (addr = i = 0; i < PREALLOCATED_PXDS;
-+ i++, pyd++, addr += PYD_SIZE) {
-+ pxd_t *pxd = pxds[i];
-
- if (i >= KERNEL_PGD_BOUNDARY)
-- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
-- sizeof(pmd_t) * PTRS_PER_PMD);
-+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
-+ sizeof(pxd_t) * PTRS_PER_PMD);
-
-- pud_populate(mm, pud, pmd);
-+ pyd_populate(mm, pyd, pxd);
- }
- }
-
- pgd_t *pgd_alloc(struct mm_struct *mm)
- {
- pgd_t *pgd;
-- pmd_t *pmds[PREALLOCATED_PMDS];
-+ pxd_t *pxds[PREALLOCATED_PXDS];
-
- pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
-
-@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
-
- mm->pgd = pgd;
-
-- if (preallocate_pmds(pmds) != 0)
-+ if (preallocate_pxds(pxds) != 0)
- goto out_free_pgd;
-
- if (paravirt_pgd_alloc(mm) != 0)
-- goto out_free_pmds;
-+ goto out_free_pxds;
-
- /*
- * Make sure that pre-populating the pmds is atomic with
-@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
- spin_lock(&pgd_lock);
-
- pgd_ctor(mm, pgd);
-- pgd_prepopulate_pmd(mm, pgd, pmds);
-+ pgd_prepopulate_pxd(mm, pgd, pxds);
-
- spin_unlock(&pgd_lock);
-
- return pgd;
-
--out_free_pmds:
-- free_pmds(pmds);
-+out_free_pxds:
-+ free_pxds(pxds);
- out_free_pgd:
- free_page((unsigned long)pgd);
- out:
-@@ -295,7 +356,7 @@ out:
-
- void pgd_free(struct mm_struct *mm, pgd_t *pgd)
- {
-- pgd_mop_up_pmds(mm, pgd);
-+ pgd_mop_up_pxds(mm, pgd);
- pgd_dtor(pgd);
- paravirt_pgd_free(mm, pgd);
- free_page((unsigned long)pgd);
-diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
-index cac7184..09a39fa 100644
---- a/arch/x86/mm/pgtable_32.c
-+++ b/arch/x86/mm/pgtable_32.c
-@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
- return;
- }
- pte = pte_offset_kernel(pmd, vaddr);
-+
-+ pax_open_kernel();
- if (pte_val(pteval))
- set_pte_at(&init_mm, vaddr, pte, pteval);
- else
- pte_clear(&init_mm, vaddr, pte);
-+ pax_close_kernel();
-
- /*
- * It's enough to flush this one mapping.
-diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
-index d2e2735..5c6586f 100644
---- a/arch/x86/mm/physaddr.c
-+++ b/arch/x86/mm/physaddr.c
-@@ -8,7 +8,7 @@
-
- #ifdef CONFIG_X86_64
-
--unsigned long __phys_addr(unsigned long x)
-+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
- {
- if (x >= __START_KERNEL_map) {
- x -= __START_KERNEL_map;
-@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
- #else
-
- #ifdef CONFIG_DEBUG_VIRTUAL
--unsigned long __phys_addr(unsigned long x)
-+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
- {
- /* VMALLOC_* aren't constants */
- VIRTUAL_BUG_ON(x < PAGE_OFFSET);
-diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
-index 410531d..0f16030 100644
---- a/arch/x86/mm/setup_nx.c
-+++ b/arch/x86/mm/setup_nx.c
-@@ -5,8 +5,10 @@
- #include <asm/pgtable.h>
- #include <asm/proto.h>
-
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
- static int disable_nx __cpuinitdata;
-
-+#ifndef CONFIG_PAX_PAGEEXEC
- /*
- * noexec = on|off
- *
-@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
- return 0;
- }
- early_param("noexec", noexec_setup);
-+#endif
-+
-+#endif
-
- void __cpuinit x86_configure_nx(void)
- {
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
- if (cpu_has_nx && !disable_nx)
- __supported_pte_mask |= _PAGE_NX;
- else
-+#endif
- __supported_pte_mask &= ~_PAGE_NX;
- }
-
-diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
-index d6c0418..06a0ad5 100644
---- a/arch/x86/mm/tlb.c
-+++ b/arch/x86/mm/tlb.c
-@@ -65,7 +65,11 @@ void leave_mm(int cpu)
- BUG();
- cpumask_clear_cpu(cpu,
- mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
-+
-+#ifndef CONFIG_PAX_PER_CPU_PGD
- load_cr3(swapper_pg_dir);
-+#endif
-+
- }
- EXPORT_SYMBOL_GPL(leave_mm);
-
-diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
-index 6687022..ceabcfa 100644
---- a/arch/x86/net/bpf_jit.S
-+++ b/arch/x86/net/bpf_jit.S
-@@ -9,6 +9,7 @@
- */
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
-+#include <asm/alternative-asm.h>
-
- /*
- * Calling convention :
-@@ -35,6 +36,7 @@ sk_load_word:
- jle bpf_slow_path_word
- mov (SKBDATA,%rsi),%eax
- bswap %eax /* ntohl() */
-+ pax_force_retaddr
- ret
-
-
-@@ -53,6 +55,7 @@ sk_load_half:
- jle bpf_slow_path_half
- movzwl (SKBDATA,%rsi),%eax
- rol $8,%ax # ntohs()
-+ pax_force_retaddr
- ret
-
- sk_load_byte_ind:
-@@ -66,6 +69,7 @@ sk_load_byte:
- cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
- jle bpf_slow_path_byte
- movzbl (SKBDATA,%rsi),%eax
-+ pax_force_retaddr
- ret
-
- /**
-@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
- movzbl (SKBDATA,%rsi),%ebx
- and $15,%bl
- shl $2,%bl
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- ENDPROC(sk_load_byte_msh)
-@@ -91,6 +96,7 @@ bpf_error:
- xor %eax,%eax
- mov -8(%rbp),%rbx
- leaveq
-+ pax_force_retaddr
- ret
-
- /* rsi contains offset and can be scratched */
-@@ -113,6 +119,7 @@ bpf_slow_path_word:
- js bpf_error
- mov -12(%rbp),%eax
- bswap %eax
-+ pax_force_retaddr
- ret
-
- bpf_slow_path_half:
-@@ -121,12 +128,14 @@ bpf_slow_path_half:
- mov -12(%rbp),%ax
- rol $8,%ax
- movzwl %ax,%eax
-+ pax_force_retaddr
- ret
-
- bpf_slow_path_byte:
- bpf_slow_path_common(1)
- js bpf_error
- movzbl -12(%rbp),%eax
-+ pax_force_retaddr
- ret
-
- bpf_slow_path_byte_msh:
-@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
- and $15,%al
- shl $2,%al
- xchg %eax,%ebx
-+ pax_force_retaddr
- ret
-diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index 5a5b6e4..07b4acb 100644
---- a/arch/x86/net/bpf_jit_comp.c
-+++ b/arch/x86/net/bpf_jit_comp.c
-@@ -11,6 +11,7 @@
- #include <asm/cacheflush.h>
- #include <linux/netdevice.h>
- #include <linux/filter.h>
-+#include <linux/random.h>
-
- /*
- * Conventions :
-@@ -45,13 +46,96 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
- return ptr + len;
- }
-
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+#define MAX_INSTR_CODE_SIZE 96
-+#else
-+#define MAX_INSTR_CODE_SIZE 64
-+#endif
-+
- #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
-
- #define EMIT1(b1) EMIT(b1, 1)
- #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
- #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
- #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
-+
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+/* original constant will appear in ecx */
-+#define DILUTE_CONST_SEQUENCE(_off, _key) \
-+do { \
-+ /* mov ecx, randkey */ \
-+ EMIT1(0xb9); \
-+ EMIT(_key, 4); \
-+ /* xor ecx, randkey ^ off */ \
-+ EMIT2(0x81, 0xf1); \
-+ EMIT((_key) ^ (_off), 4); \
-+} while (0)
-+#define SHORT_JMP_LENGTH 2
-+#define NEAR_JMP_LENGTH (5 + 8)
-+#define EMIT1_off32(b1, _off) \
-+do { \
-+ switch (b1) { \
-+ case 0x05: /* add eax, imm32 */ \
-+ case 0x2d: /* sub eax, imm32 */ \
-+ case 0x25: /* and eax, imm32 */ \
-+ case 0x0d: /* or eax, imm32 */ \
-+ case 0x3d: /* cmp eax, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
-+ break; \
-+ case 0xb8: /* mov eax, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ /* mov eax, ecx */ \
-+ EMIT2(0x89, 0xc8); \
-+ break; \
-+ case 0xa9: /* test eax, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ /* test eax, ecx */ \
-+ EMIT2(0x85, 0xc8); \
-+ break; \
-+ case 0xbb: /* mov ebx, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ /* mov ebx, ecx */ \
-+ EMIT2(0x89, 0xcb); \
-+ break; \
-+ case 0xbe: /* mov esi, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ /* mov esi, ecx */ \
-+ EMIT2(0x89, 0xce); \
-+ break; \
-+ case 0xe8: /* call rel imm32, always to known funcs */ \
-+ EMIT1(b1); \
-+ EMIT(_off, 4); \
-+ break; \
-+ case 0xe9: /* jmp rel imm32 */ \
-+ BUG_ON((int)(_off) < 0); \
-+ EMIT1(b1); \
-+ EMIT(_off + 8, 4); \
-+ /* prevent fall-through, we're not called if off = 0 */ \
-+ EMIT(0xcccccccc, 4); \
-+ EMIT(0xcccccccc, 4); \
-+ break; \
-+ default: \
-+ BUILD_BUG_ON(1); \
-+ } \
-+} while (0)
-+
-+#define EMIT2_off32(b1, b2, _off) \
-+do { \
-+ if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */ \
-+ DILUTE_CONST_SEQUENCE(_off, randkey); \
-+ /* imul eax, ecx */ \
-+ EMIT3(0x0f, 0xaf, 0xc1); \
-+ } else { \
-+ BUILD_BUG_ON(1); \
-+ } \
-+} while (0)
-+#else
- #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
-+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
-+#define SHORT_JMP_LENGTH 2
-+#define NEAR_JMP_LENGTH 5
-+#endif
-
- #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
- #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
-@@ -68,6 +152,7 @@ static inline bool is_near(int offset)
-
- #define EMIT_JMP(offset) \
- do { \
-+ BUG_ON((int)(offset) < 0); \
- if (offset) { \
- if (is_near(offset)) \
- EMIT2(0xeb, offset); /* jmp .+off8 */ \
-@@ -86,13 +171,33 @@ do { \
- #define X86_JBE 0x76
- #define X86_JA 0x77
-
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+#define APPEND_FLOW_VERIFY() \
-+do { \
-+ /* mov ecx, randkey */ \
-+ EMIT1(0xb9); \
-+ EMIT(randkey, 4); \
-+ /* cmp ecx, randkey */ \
-+ EMIT2(0x81, 0xf9); \
-+ EMIT(randkey, 4); \
-+ /* jz after 8 int 3s */ \
-+ EMIT2(0x74, 0x08); \
-+ EMIT(0xcccccccc, 4); \
-+ EMIT(0xcccccccc, 4); \
-+} while (0)
-+#else
-+#define APPEND_FLOW_VERIFY() do { } while (0)
-+#endif
-+
- #define EMIT_COND_JMP(op, offset) \
- do { \
-+ BUG_ON((int)(offset) < 0); \
- if (is_near(offset)) \
- EMIT2(op, offset); /* jxx .+off8 */ \
- else { \
- EMIT2(0x0f, op + 0x10); \
-- EMIT(offset, 4); /* jxx .+off32 */ \
-+ EMIT((offset) + 21, 4); /* jxx .+off32 */ \
-+ APPEND_FLOW_VERIFY(); \
- } \
- } while (0)
-
-@@ -117,10 +222,14 @@ static inline void bpf_flush_icache(void *start, void *end)
- set_fs(old_fs);
- }
-
-+struct bpf_jit_work {
-+ struct work_struct work;
-+ void *image;
-+};
-
- void bpf_jit_compile(struct sk_filter *fp)
- {
-- u8 temp[64];
-+ u8 temp[MAX_INSTR_CODE_SIZE];
- u8 *prog;
- unsigned int proglen, oldproglen = 0;
- int ilen, i;
-@@ -133,6 +242,9 @@ void bpf_jit_compile(struct sk_filter *fp)
- unsigned int *addrs;
- const struct sock_filter *filter = fp->insns;
- int flen = fp->len;
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+ unsigned int randkey;
-+#endif
-
- if (!bpf_jit_enable)
- return;
-@@ -141,11 +253,15 @@ void bpf_jit_compile(struct sk_filter *fp)
- if (addrs == NULL)
- return;
-
-+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
-+ if (!fp->work)
-+ goto out;
-+
- /* Before first pass, make a rough estimation of addrs[]
-- * each bpf instruction is translated to less than 64 bytes
-+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
- */
- for (proglen = 0, i = 0; i < flen; i++) {
-- proglen += 64;
-+ proglen += MAX_INSTR_CODE_SIZE;
- addrs[i] = proglen;
- }
- cleanup_addr = proglen; /* epilogue address */
-@@ -221,6 +337,10 @@ void bpf_jit_compile(struct sk_filter *fp)
- for (i = 0; i < flen; i++) {
- unsigned int K = filter[i].k;
-
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+ randkey = prandom_u32();
-+#endif
-+
- switch (filter[i].code) {
- case BPF_S_ALU_ADD_X: /* A += X; */
- seen |= SEEN_XREG;
-@@ -253,10 +373,8 @@ void bpf_jit_compile(struct sk_filter *fp)
- case BPF_S_ALU_MUL_K: /* A *= K */
- if (is_imm8(K))
- EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
-- else {
-- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
-- EMIT(K, 4);
-- }
-+ else
-+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
- break;
- case BPF_S_ALU_DIV_X: /* A /= X; */
- seen |= SEEN_XREG;
-@@ -269,15 +387,21 @@ void bpf_jit_compile(struct sk_filter *fp)
- EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
- (addrs[i] - 4));
- } else {
-- EMIT_COND_JMP(X86_JNE, 2 + 5);
-+ EMIT_COND_JMP(X86_JNE, 2 + NEAR_JMP_LENGTH);
- CLEAR_A();
- EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
- }
- EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
- break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
-+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
-+ DILUTE_CONST_SEQUENCE(K, randkey);
-+ // imul rax, rcx
-+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
-+#else
- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
- EMIT(K, 4);
-+#endif
- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
- break;
- case BPF_S_ALU_AND_X:
-@@ -477,7 +601,7 @@ void bpf_jit_compile(struct sk_filter *fp)
- common_load: seen |= SEEN_DATAREF;
- if ((int)K < 0) {
- /* Abort the JIT because __load_pointer() is needed. */
-- goto out;
-+ goto error;
- }
- t_offset = func - (image + addrs[i]);
- EMIT1_off32(0xbe, K); /* mov imm32,%esi */
-@@ -492,7 +616,7 @@ common_load: seen |= SEEN_DATAREF;
- case BPF_S_LDX_B_MSH:
- if ((int)K < 0) {
- /* Abort the JIT because __load_pointer() is needed. */
-- goto out;
-+ goto error;
- }
- seen |= SEEN_DATAREF | SEEN_XREG;
- t_offset = sk_load_byte_msh - (image + addrs[i]);
-@@ -572,7 +696,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
- }
- if (filter[i].jt != 0) {
- if (filter[i].jf && f_offset)
-- t_offset += is_near(f_offset) ? 2 : 5;
-+ t_offset += is_near(f_offset) ? SHORT_JMP_LENGTH : NEAR_JMP_LENGTH;
- EMIT_COND_JMP(t_op, t_offset);
- if (filter[i].jf)
- EMIT_JMP(f_offset);
-@@ -582,17 +706,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
- break;
- default:
- /* hmm, too complex filter, give up with jit compiler */
-- goto out;
-+ goto error;
- }
- ilen = prog - temp;
- if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
- pr_err("bpb_jit_compile fatal error\n");
-- kfree(addrs);
-- module_free(NULL, image);
-- return;
-+ module_free_exec(NULL, image);
-+ goto error;
- }
-+ pax_open_kernel();
- memcpy(image + proglen, temp, ilen);
-+ pax_close_kernel();
- }
- proglen += ilen;
- addrs[i] = proglen;
-@@ -613,11 +738,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
- break;
- }
- if (proglen == oldproglen) {
-- image = module_alloc(max_t(unsigned int,
-- proglen,
-- sizeof(struct work_struct)));
-+ image = module_alloc_exec(proglen);
- if (!image)
-- goto out;
-+ goto error;
- }
- oldproglen = proglen;
- }
-@@ -633,7 +756,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
- bpf_flush_icache(image, image + proglen);
-
- fp->bpf_func = (void *)image;
-- }
-+ } else
-+error:
-+ kfree(fp->work);
-+
- out:
- kfree(addrs);
- return;
-@@ -641,18 +767,20 @@ out:
-
- static void jit_free_defer(struct work_struct *arg)
- {
-- module_free(NULL, arg);
-+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
-+ kfree(arg);
- }
-
- /* run from softirq, we must use a work_struct to call
-- * module_free() from process context
-+ * module_free_exec() from process context
- */
- void bpf_jit_free(struct sk_filter *fp)
- {
- if (fp->bpf_func != sk_run_filter) {
-- struct work_struct *work = (struct work_struct *)fp->bpf_func;
-+ struct work_struct *work = &fp->work->work;
-
- INIT_WORK(work, jit_free_defer);
-+ fp->work->image = fp->bpf_func;
- schedule_work(work);
- }
- }
-diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
-index bff89df..377758a 100644
---- a/arch/x86/oprofile/backtrace.c
-+++ b/arch/x86/oprofile/backtrace.c
-@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
- struct stack_frame_ia32 *fp;
- unsigned long bytes;
-
-- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
-+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
- return NULL;
-
-- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
-+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
-
- oprofile_add_trace(bufhead[0].return_address);
-
-@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
- struct stack_frame bufhead[2];
- unsigned long bytes;
-
-- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
-+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
- if (bytes != sizeof(bufhead))
- return NULL;
-
-@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
- {
- struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
-
-- if (!user_mode_vm(regs)) {
-+ if (!user_mode(regs)) {
- unsigned long stack = kernel_stack_pointer(regs);
- if (depth)
- dump_trace(NULL, regs, (unsigned long *)stack, 0,
-diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
-index 6bc0899..13d2579 100644
---- a/arch/x86/oprofile/nmi_int.c
-+++ b/arch/x86/oprofile/nmi_int.c
-@@ -23,6 +23,7 @@
- #include <asm/nmi.h>
- #include <asm/msr.h>
- #include <asm/apic.h>
-+#include <asm/pgtable.h>
-
- #include "op_counter.h"
- #include "op_x86_model.h"
-@@ -759,8 +760,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
- if (ret)
- return ret;
-
-- if (!model->num_virt_counters)
-- model->num_virt_counters = model->num_counters;
-+ if (!model->num_virt_counters) {
-+ pax_open_kernel();
-+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
-+ pax_close_kernel();
-+ }
-
- mux_init(ops);
-
-diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
-index 303f086..d020916 100644
---- a/arch/x86/oprofile/op_model_amd.c
-+++ b/arch/x86/oprofile/op_model_amd.c
-@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
- num_counters = AMD64_NUM_COUNTERS;
- }
-
-- op_amd_spec.num_counters = num_counters;
-- op_amd_spec.num_controls = num_counters;
-- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
-+ pax_open_kernel();
-+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
-+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
-+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
-+ pax_close_kernel();
-
- return 0;
- }
-diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
-index d90528e..0127e2b 100644
---- a/arch/x86/oprofile/op_model_ppro.c
-+++ b/arch/x86/oprofile/op_model_ppro.c
-@@ -19,6 +19,7 @@
- #include <asm/msr.h>
- #include <asm/apic.h>
- #include <asm/nmi.h>
-+#include <asm/pgtable.h>
-
- #include "op_x86_model.h"
- #include "op_counter.h"
-@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
-
- num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
-
-- op_arch_perfmon_spec.num_counters = num_counters;
-- op_arch_perfmon_spec.num_controls = num_counters;
-+ pax_open_kernel();
-+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
-+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
-+ pax_close_kernel();
- }
-
- static int arch_perfmon_init(struct oprofile_operations *ignore)
-diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
-index 71e8a67..6a313bb 100644
---- a/arch/x86/oprofile/op_x86_model.h
-+++ b/arch/x86/oprofile/op_x86_model.h
-@@ -52,7 +52,7 @@ struct op_x86_model_spec {
- void (*switch_ctrl)(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs);
- #endif
--};
-+} __do_const;
-
- struct op_counter_config;
-
-diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
-index 385a940..b11662d 100644
---- a/arch/x86/pci/amd_bus.c
-+++ b/arch/x86/pci/amd_bus.c
-@@ -355,7 +355,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata amd_cpu_notifier = {
-+static struct notifier_block amd_cpu_notifier = {
- .notifier_call = amd_cpu_notify,
- };
-
-diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
-index 372e9b8..e775a6c 100644
---- a/arch/x86/pci/irq.c
-+++ b/arch/x86/pci/irq.c
-@@ -50,7 +50,7 @@ struct irq_router {
- struct irq_router_handler {
- u16 vendor;
- int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
--};
-+} __do_const;
-
- int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
- void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
-@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
- return 0;
- }
-
--static __initdata struct irq_router_handler pirq_routers[] = {
-+static __initconst const struct irq_router_handler pirq_routers[] = {
- { PCI_VENDOR_ID_INTEL, intel_router_probe },
- { PCI_VENDOR_ID_AL, ali_router_probe },
- { PCI_VENDOR_ID_ITE, ite_router_probe },
-@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
- static void __init pirq_find_router(struct irq_router *r)
- {
- struct irq_routing_table *rt = pirq_table;
-- struct irq_router_handler *h;
-+ const struct irq_router_handler *h;
-
- #ifdef CONFIG_PCI_BIOS
- if (!rt->signature) {
-@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata pciirq_dmi_table[] = {
-+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
- {
- .callback = fix_broken_hp_bios_irq9,
- .ident = "HP Pavilion N5400 Series Laptop",
-diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
-index cb29191..036766d 100644
---- a/arch/x86/pci/mrst.c
-+++ b/arch/x86/pci/mrst.c
-@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
- printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
- pci_mmcfg_late_init();
- pcibios_enable_irq = mrst_pci_irq_enable;
-- pci_root_ops = pci_mrst_ops;
-+ pax_open_kernel();
-+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
-+ pax_close_kernel();
- /* Continue with standard init */
- return 1;
- }
-diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
-index db0e9a5..0372c14 100644
---- a/arch/x86/pci/pcbios.c
-+++ b/arch/x86/pci/pcbios.c
-@@ -79,50 +79,93 @@ union bios32 {
- static struct {
- unsigned long address;
- unsigned short segment;
--} bios32_indirect = { 0, __KERNEL_CS };
-+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
-
- /*
- * Returns the entry point for the given service, NULL on error
- */
-
--static unsigned long bios32_service(unsigned long service)
-+static unsigned long __devinit bios32_service(unsigned long service)
- {
- unsigned char return_code; /* %al */
- unsigned long address; /* %ebx */
- unsigned long length; /* %ecx */
- unsigned long entry; /* %edx */
- unsigned long flags;
-+ struct desc_struct d, *gdt;
-
- local_irq_save(flags);
-- __asm__("lcall *(%%edi); cld"
-+
-+ gdt = get_cpu_gdt_table(smp_processor_id());
-+
-+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
-+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
-+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
-+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
-+
-+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
- : "=a" (return_code),
- "=b" (address),
- "=c" (length),
- "=d" (entry)
- : "0" (service),
- "1" (0),
-- "D" (&bios32_indirect));
-+ "D" (&bios32_indirect),
-+ "r"(__PCIBIOS_DS)
-+ : "memory");
-+
-+ pax_open_kernel();
-+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
-+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
-+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
-+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
-+ pax_close_kernel();
-+
- local_irq_restore(flags);
-
- switch (return_code) {
-- case 0:
-- return address + entry;
-- case 0x80: /* Not present */
-- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
-- return 0;
-- default: /* Shouldn't happen */
-- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
-- service, return_code);
-+ case 0: {
-+ int cpu;
-+ unsigned char flags;
-+
-+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
-+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
-+ printk(KERN_WARNING "bios32_service: not valid\n");
- return 0;
-+ }
-+ address = address + PAGE_OFFSET;
-+ length += 16UL; /* some BIOSs underreport this... */
-+ flags = 4;
-+ if (length >= 64*1024*1024) {
-+ length >>= PAGE_SHIFT;
-+ flags |= 8;
-+ }
-+
-+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-+ gdt = get_cpu_gdt_table(cpu);
-+ pack_descriptor(&d, address, length, 0x9b, flags);
-+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
-+ pack_descriptor(&d, address, length, 0x93, flags);
-+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
-+ }
-+ return entry;
-+ }
-+ case 0x80: /* Not present */
-+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
-+ return 0;
-+ default: /* Shouldn't happen */
-+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
-+ service, return_code);
-+ return 0;
- }
- }
-
- static struct {
- unsigned long address;
- unsigned short segment;
--} pci_indirect = { 0, __KERNEL_CS };
-+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
-
--static int pci_bios_present;
-+static int pci_bios_present __read_only;
-
- static int __devinit check_pcibios(void)
- {
-@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
- unsigned long flags, pcibios_entry;
-
- if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
-- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
-+ pci_indirect.address = pcibios_entry;
-
- local_irq_save(flags);
-- __asm__(
-- "lcall *(%%edi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%edi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
- "=b" (ebx),
- "=c" (ecx)
- : "1" (PCIBIOS_PCI_BIOS_PRESENT),
-- "D" (&pci_indirect)
-+ "D" (&pci_indirect),
-+ "r" (__PCIBIOS_DS)
- : "memory");
- local_irq_restore(flags);
-
-@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
-
- switch (len) {
- case 1:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
- : "1" (PCIBIOS_READ_CONFIG_BYTE),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- /*
- * Zero-extend the result beyond 8 bits, do not trust the
- * BIOS having done it:
-@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
- *value &= 0xff;
- break;
- case 2:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
- : "1" (PCIBIOS_READ_CONFIG_WORD),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- /*
- * Zero-extend the result beyond 16 bits, do not trust the
- * BIOS having done it:
-@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
- *value &= 0xffff;
- break;
- case 4:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
- : "1" (PCIBIOS_READ_CONFIG_DWORD),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- break;
- }
-
-@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
-
- switch (len) {
- case 1:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
- "c" (value),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- break;
- case 2:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
- "c" (value),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- break;
- case 4:
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w6, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n\t"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
- "c" (value),
- "b" (bx),
- "D" ((long)reg),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- break;
- }
-
-@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
-
- DBG("PCI: Fetching IRQ routing table... ");
- __asm__("push %%es\n\t"
-+ "movw %w8, %%ds\n\t"
- "push %%ds\n\t"
- "pop %%es\n\t"
-- "lcall *(%%esi); cld\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
- "pop %%es\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
- "1" (0),
- "D" ((long) &opt),
- "S" (&pci_indirect),
-- "m" (opt)
-+ "m" (opt),
-+ "r" (__PCIBIOS_DS)
- : "memory");
- DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
- if (ret & 0xff00)
-@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
- {
- int ret;
-
-- __asm__("lcall *(%%esi); cld\n\t"
-+ __asm__("movw %w5, %%ds\n\t"
-+ "lcall *%%ss:(%%esi); cld\n\t"
-+ "push %%ss\n\t"
-+ "pop %%ds\n"
- "jc 1f\n\t"
- "xor %%ah, %%ah\n"
- "1:"
-@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
- : "0" (PCIBIOS_SET_PCI_HW_INT),
- "b" ((dev->bus->number << 8) | dev->devfn),
- "c" ((irq << 8) | (pin + 10)),
-- "S" (&pci_indirect));
-+ "S" (&pci_indirect),
-+ "r" (__PCIBIOS_DS));
- return !(ret & 0xff00);
- }
- EXPORT_SYMBOL(pcibios_set_irq_routing);
-diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
-index e56da77..cba13dc 100644
---- a/arch/x86/platform/efi/efi.c
-+++ b/arch/x86/platform/efi/efi.c
-@@ -747,6 +747,8 @@ void __init efi_enter_virtual_mode(void)
-
- BUG_ON(!efi.systab);
-
-+ efi_setup_pgd();
-+
- status = phys_efi_set_virtual_address_map(
- memmap.desc_size * count,
- memmap.desc_size,
-diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
-index 40e4469..94d16d7 100644
---- a/arch/x86/platform/efi/efi_32.c
-+++ b/arch/x86/platform/efi/efi_32.c
-@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
- {
- struct desc_ptr gdt_descr;
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ struct desc_struct d;
-+#endif
-+
- local_irq_save(efi_rt_eflags);
-
- load_cr3(initial_page_table);
- __flush_tlb_all();
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
-+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
-+#endif
-+
- gdt_descr.address = __pa(get_cpu_gdt_table(0));
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
-@@ -58,12 +69,29 @@ void efi_call_phys_epilog(void)
- {
- struct desc_ptr gdt_descr;
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ struct desc_struct d;
-+
-+ memset(&d, 0, sizeof d);
-+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
-+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
-+#endif
-+
- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
-
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ load_cr3(get_cpu_pgd(smp_processor_id()));
-+#else
- load_cr3(swapper_pg_dir);
-+#endif
-+
- __flush_tlb_all();
-
- local_irq_restore(efi_rt_eflags);
- }
-+
-+void __init efi_setup_pgd(void)
-+{
-+}
-diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
-index 0fba86d..2363f0b 100644
---- a/arch/x86/platform/efi/efi_64.c
-+++ b/arch/x86/platform/efi/efi_64.c
-@@ -75,6 +75,11 @@ void __init efi_call_phys_prelog(void)
- vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
- set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
- }
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ load_cr3(swapper_pg_dir);
-+#endif
-+
- __flush_tlb_all();
- }
-
-@@ -88,6 +93,11 @@ void __init efi_call_phys_epilog(void)
- for (pgd = 0; pgd < n_pgds; pgd++)
- set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
- kfree(save_pgd);
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ load_cr3(get_cpu_pgd(smp_processor_id()));
-+#endif
-+
- __flush_tlb_all();
- local_irq_restore(efi_flags);
- early_code_mapping_set_exec(0);
-@@ -109,3 +119,19 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
-
- return (void __iomem *)__va(phys_addr);
- }
-+
-+void __init efi_setup_pgd(void)
-+{
-+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
-+ * able to execute the EFI services.
-+ */
-+ if (__supported_pte_mask & _PAGE_NX) {
-+ unsigned long addr = (unsigned long) __va(0);
-+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
-+ pr_info("PAX: Disabling NX protection for low memory map.\n");
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ set_pgd(pgd_offset_cpu(0, addr), pe);
-+#endif
-+ set_pgd(pgd_offset_k(addr), pe);
-+ }
-+}
-diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
-index fbe66e6..eae5e38 100644
---- a/arch/x86/platform/efi/efi_stub_32.S
-+++ b/arch/x86/platform/efi/efi_stub_32.S
-@@ -6,7 +6,9 @@
- */
-
- #include <linux/linkage.h>
-+#include <linux/init.h>
- #include <asm/page_types.h>
-+#include <asm/segment.h>
-
- /*
- * efi_call_phys(void *, ...) is a function with variable parameters.
-@@ -20,7 +22,7 @@
- * service functions will comply with gcc calling convention, too.
- */
-
--.text
-+__INIT
- ENTRY(efi_call_phys)
- /*
- * 0. The function can only be called in Linux kernel. So CS has been
-@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
- * The mapping of lower virtual memory has been created in prelog and
- * epilog.
- */
-- movl $1f, %edx
-- subl $__PAGE_OFFSET, %edx
-- jmp *%edx
-+#ifdef CONFIG_PAX_KERNEXEC
-+ movl $(__KERNEXEC_EFI_DS), %edx
-+ mov %edx, %ds
-+ mov %edx, %es
-+ mov %edx, %ss
-+ addl $2f,(1f)
-+ ljmp *(1f)
-+
-+__INITDATA
-+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
-+.previous
-+
-+2:
-+ subl $2b,(1b)
-+#else
-+ jmp 1f-__PAGE_OFFSET
- 1:
-+#endif
-
- /*
- * 2. Now on the top of stack is the return
-@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
- * parameter 2, ..., param n. To make things easy, we save the return
- * address of efi_call_phys in a global variable.
- */
-- popl %edx
-- movl %edx, saved_return_addr
-- /* get the function pointer into ECX*/
-- popl %ecx
-- movl %ecx, efi_rt_function_ptr
-- movl $2f, %edx
-- subl $__PAGE_OFFSET, %edx
-- pushl %edx
-+ popl (saved_return_addr)
-+ popl (efi_rt_function_ptr)
-
- /*
- * 3. Clear PG bit in %CR0.
-@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
- /*
- * 5. Call the physical function.
- */
-- jmp *%ecx
-+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
-
--2:
- /*
- * 6. After EFI runtime service returns, control will return to
- * following instruction. We'd better readjust stack pointer first.
-@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
- movl %cr0, %edx
- orl $0x80000000, %edx
- movl %edx, %cr0
-- jmp 1f
--1:
-+
- /*
- * 8. Now restore the virtual mode from flat mode by
- * adding EIP with PAGE_OFFSET.
- */
-- movl $1f, %edx
-- jmp *%edx
-+#ifdef CONFIG_PAX_KERNEXEC
-+ movl $(__KERNEL_DS), %edx
-+ mov %edx, %ds
-+ mov %edx, %es
-+ mov %edx, %ss
-+ ljmp $(__KERNEL_CS),$1f
-+#else
-+ jmp 1f+__PAGE_OFFSET
-+#endif
- 1:
-
- /*
- * 9. Balance the stack. And because EAX contain the return value,
- * we'd better not clobber it.
- */
-- leal efi_rt_function_ptr, %edx
-- movl (%edx), %ecx
-- pushl %ecx
-+ pushl (efi_rt_function_ptr)
-
- /*
-- * 10. Push the saved return address onto the stack and return.
-+ * 10. Return to the saved return address.
- */
-- leal saved_return_addr, %edx
-- movl (%edx), %ecx
-- pushl %ecx
-- ret
-+ jmpl *(saved_return_addr)
- ENDPROC(efi_call_phys)
- .previous
-
--.data
-+__INITDATA
- saved_return_addr:
- .long 0
- efi_rt_function_ptr:
-diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
-index 4c07cca..2c8427d 100644
---- a/arch/x86/platform/efi/efi_stub_64.S
-+++ b/arch/x86/platform/efi/efi_stub_64.S
-@@ -7,6 +7,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- #define SAVE_XMM \
- mov %rsp, %rax; \
-@@ -40,6 +41,7 @@ ENTRY(efi_call0)
- call *%rdi
- addq $32, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call0)
-
-@@ -50,6 +52,7 @@ ENTRY(efi_call1)
- call *%rdi
- addq $32, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call1)
-
-@@ -60,6 +63,7 @@ ENTRY(efi_call2)
- call *%rdi
- addq $32, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call2)
-
-@@ -71,6 +75,7 @@ ENTRY(efi_call3)
- call *%rdi
- addq $32, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call3)
-
-@@ -83,6 +88,7 @@ ENTRY(efi_call4)
- call *%rdi
- addq $32, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call4)
-
-@@ -96,6 +102,7 @@ ENTRY(efi_call5)
- call *%rdi
- addq $48, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call5)
-
-@@ -112,5 +119,6 @@ ENTRY(efi_call6)
- call *%rdi
- addq $48, %rsp
- RESTORE_XMM
-+ pax_force_retaddr 0, 1
- ret
- ENDPROC(efi_call6)
-diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
-index ad4ec1c..686479e 100644
---- a/arch/x86/platform/mrst/mrst.c
-+++ b/arch/x86/platform/mrst/mrst.c
-@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
- EXPORT_SYMBOL_GPL(sfi_mrtc_array);
- int sfi_mrtc_num;
-
--static void mrst_power_off(void)
-+static __noreturn void mrst_power_off(void)
- {
- if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
- intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
-+ BUG();
- }
-
--static void mrst_reboot(void)
-+static __noreturn void mrst_reboot(void)
- {
- if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
- intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
- else
- intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
-+ BUG();
- }
-
- /* parse all the mtimer info to a static mtimer array */
-diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
-index d6ee929..3637cb5 100644
---- a/arch/x86/platform/olpc/olpc_dt.c
-+++ b/arch/x86/platform/olpc/olpc_dt.c
-@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
- return res;
- }
-
--static struct of_pdt_ops prom_olpc_ops __initdata = {
-+static struct of_pdt_ops prom_olpc_ops __initconst = {
- .nextprop = olpc_dt_nextprop,
- .getproplen = olpc_dt_getproplen,
- .getproperty = olpc_dt_getproperty,
-diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
-index 43c9f6a..2b63a0b 100644
---- a/arch/x86/power/cpu.c
-+++ b/arch/x86/power/cpu.c
-@@ -132,7 +132,7 @@ static void do_fpu_end(void)
- static void fix_processor_context(void)
- {
- int cpu = smp_processor_id();
-- struct tss_struct *t = &per_cpu(init_tss, cpu);
-+ struct tss_struct *t = init_tss + cpu;
-
- set_tss_desc(cpu, t); /*
- * This just modifies memory; should not be
-@@ -142,8 +142,6 @@ static void fix_processor_context(void)
- */
-
- #ifdef CONFIG_X86_64
-- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
--
- syscall_init(); /* This sets MSR_*STAR and related */
- #endif
- load_TR_desc(); /* This does ltr */
-diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
-index e529730..8d08690 100644
---- a/arch/x86/tools/relocs.c
-+++ b/arch/x86/tools/relocs.c
-@@ -11,10 +11,13 @@
- #include <endian.h>
- #include <regex.h>
-
-+#include "../../../include/generated/autoconf.h"
-+
- static void die(char *fmt, ...);
-
- #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
- static Elf32_Ehdr ehdr;
-+static Elf32_Phdr *phdr;
- static unsigned long reloc_count, reloc_idx;
- static unsigned long *relocs;
- static unsigned long reloc16_count, reloc16_idx;
-@@ -322,9 +325,39 @@ static void read_ehdr(FILE *fp)
- }
- }
-
-+static void read_phdrs(FILE *fp)
-+{
-+ unsigned int i;
-+
-+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
-+ if (!phdr) {
-+ die("Unable to allocate %d program headers\n",
-+ ehdr.e_phnum);
-+ }
-+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
-+ die("Seek to %d failed: %s\n",
-+ ehdr.e_phoff, strerror(errno));
-+ }
-+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
-+ die("Cannot read ELF program headers: %s\n",
-+ strerror(errno));
-+ }
-+ for(i = 0; i < ehdr.e_phnum; i++) {
-+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
-+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
-+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
-+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
-+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
-+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
-+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
-+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
-+ }
-+
-+}
-+
- static void read_shdrs(FILE *fp)
- {
-- int i;
-+ unsigned int i;
- Elf32_Shdr shdr;
-
- secs = calloc(ehdr.e_shnum, sizeof(struct section));
-@@ -359,7 +392,7 @@ static void read_shdrs(FILE *fp)
-
- static void read_strtabs(FILE *fp)
- {
-- int i;
-+ unsigned int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
- struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_STRTAB) {
-@@ -384,7 +417,7 @@ static void read_strtabs(FILE *fp)
-
- static void read_symtabs(FILE *fp)
- {
-- int i,j;
-+ unsigned int i,j;
- for (i = 0; i < ehdr.e_shnum; i++) {
- struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_SYMTAB) {
-@@ -417,7 +450,9 @@ static void read_symtabs(FILE *fp)
-
- static void read_relocs(FILE *fp)
- {
-- int i,j;
-+ unsigned int i,j;
-+ uint32_t base;
-+
- for (i = 0; i < ehdr.e_shnum; i++) {
- struct section *sec = &secs[i];
- if (sec->shdr.sh_type != SHT_REL) {
-@@ -437,9 +472,22 @@ static void read_relocs(FILE *fp)
- die("Cannot read symbol table: %s\n",
- strerror(errno));
- }
-+ base = 0;
-+
-+#ifdef CONFIG_X86_32
-+ for (j = 0; j < ehdr.e_phnum; j++) {
-+ if (phdr[j].p_type != PT_LOAD )
-+ continue;
-+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
-+ continue;
-+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
-+ break;
-+ }
-+#endif
-+
- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
- Elf32_Rel *rel = &sec->reltab[j];
-- rel->r_offset = elf32_to_cpu(rel->r_offset);
-+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
- rel->r_info = elf32_to_cpu(rel->r_info);
- }
- }
-@@ -448,13 +496,13 @@ static void read_relocs(FILE *fp)
-
- static void print_absolute_symbols(void)
- {
-- int i;
-+ unsigned int i;
- printf("Absolute symbols\n");
- printf(" Num: Value Size Type Bind Visibility Name\n");
- for (i = 0; i < ehdr.e_shnum; i++) {
- struct section *sec = &secs[i];
- char *sym_strtab;
-- int j;
-+ unsigned int j;
-
- if (sec->shdr.sh_type != SHT_SYMTAB) {
- continue;
-@@ -481,14 +529,14 @@ static void print_absolute_symbols(void)
-
- static void print_absolute_relocs(void)
- {
-- int i, printed = 0;
-+ unsigned int i, printed = 0;
-
- for (i = 0; i < ehdr.e_shnum; i++) {
- struct section *sec = &secs[i];
- struct section *sec_applies, *sec_symtab;
- char *sym_strtab;
- Elf32_Sym *sh_symtab;
-- int j;
-+ unsigned int j;
- if (sec->shdr.sh_type != SHT_REL) {
- continue;
- }
-@@ -550,13 +598,13 @@ static void print_absolute_relocs(void)
- static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
- int use_real_mode)
- {
-- int i;
-+ unsigned int i;
- /* Walk through the relocations */
- for (i = 0; i < ehdr.e_shnum; i++) {
- char *sym_strtab;
- Elf32_Sym *sh_symtab;
- struct section *sec_applies, *sec_symtab;
-- int j;
-+ unsigned int j;
- struct section *sec = &secs[i];
-
- if (sec->shdr.sh_type != SHT_REL) {
-@@ -580,6 +628,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
- sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
- r_type = ELF32_R_TYPE(rel->r_info);
-
-+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
-+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
-+ continue;
-+
-+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
-+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
-+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
-+ continue;
-+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
-+ continue;
-+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
-+ continue;
-+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
-+ continue;
-+#endif
-+
- shn_abs = sym->st_shndx == SHN_ABS;
-
- switch (r_type) {
-@@ -676,7 +740,7 @@ static int write32(unsigned int v, FILE *f)
-
- static void emit_relocs(int as_text, int use_real_mode)
- {
-- int i;
-+ unsigned int i;
- /* Count how many relocations I have and allocate space for them. */
- reloc_count = 0;
- walk_relocs(count_reloc, use_real_mode);
-@@ -803,6 +867,7 @@ int main(int argc, char **argv)
- fname, strerror(errno));
- }
- read_ehdr(fp);
-+ read_phdrs(fp);
- read_shdrs(fp);
- read_strtabs(fp);
- read_symtabs(fp);
-diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
-index c6c7131..2851e03 100644
---- a/arch/x86/um/tls_32.c
-+++ b/arch/x86/um/tls_32.c
-@@ -259,7 +259,7 @@ out:
- if (unlikely(task == current &&
- !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
- printk(KERN_ERR "get_tls_entry: task with pid %d got here "
-- "without flushed TLS.", current->pid);
-+ "without flushed TLS.", task_pid_nr(current));
- }
-
- return 0;
-diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
-index 5d17950..2253fc9 100644
---- a/arch/x86/vdso/Makefile
-+++ b/arch/x86/vdso/Makefile
-@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-
--VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
- GCOV_PROFILE := n
-
- #
-diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
-index 468d591..8e80a0a 100644
---- a/arch/x86/vdso/vdso32-setup.c
-+++ b/arch/x86/vdso/vdso32-setup.c
-@@ -25,6 +25,7 @@
- #include <asm/tlbflush.h>
- #include <asm/vdso.h>
- #include <asm/proto.h>
-+#include <asm/mman.h>
-
- enum {
- VDSO_DISABLED = 0,
-@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
- void enable_sep_cpu(void)
- {
- int cpu = get_cpu();
-- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+ struct tss_struct *tss = init_tss + cpu;
-
- if (!boot_cpu_has(X86_FEATURE_SEP)) {
- put_cpu();
-@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
- gate_vma.vm_start = FIXADDR_USER_START;
- gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-- gate_vma.vm_page_prot = __P101;
-+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
- /*
- * Make sure the vDSO gets into every core dump.
- * Dumping its contents makes post-mortem fully interpretable later
-@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- if (compat)
- addr = VDSO_HIGH_BASE;
- else {
-- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
-+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
- }
- }
-
-- current->mm->context.vdso = (void *)addr;
-+ current->mm->context.vdso = addr;
-
- if (compat_uses_vma || !compat) {
- /*
-@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- }
-
- current_thread_info()->sysenter_return =
-- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
-+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
-
- up_fail:
- if (ret)
-- current->mm->context.vdso = NULL;
-+ current->mm->context.vdso = 0;
-
- up_write(&mm->mmap_sem);
-
-@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
-
- const char *arch_vma_name(struct vm_area_struct *vma)
- {
-- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
-+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
- return "[vdso]";
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
-+ return "[vdso]";
-+#endif
-+
- return NULL;
- }
-
-@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
- * Check to see if the corresponding task was created in compat vdso
- * mode.
- */
-- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
-+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
- return &gate_vma;
- return NULL;
- }
-diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
-index 0ff8815..7abe843 100644
---- a/arch/x86/vdso/vma.c
-+++ b/arch/x86/vdso/vma.c
-@@ -16,8 +16,6 @@
- #include <asm/vdso.h>
- #include <asm/page.h>
-
--unsigned int __read_mostly vdso_enabled = 1;
--
- extern char vdso_start[], vdso_end[];
- extern unsigned short vdso_sync_cpuid;
-
-@@ -119,13 +117,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
- int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- {
- struct mm_struct *mm = current->mm;
-- unsigned long addr;
-+ unsigned long addr = 0;
- int ret;
-
-- if (!vdso_enabled)
-- return 0;
--
- down_write(&mm->mmap_sem);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- addr = vdso_addr(mm->start_stack, vdso_size);
- addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
- if (IS_ERR_VALUE(addr)) {
-@@ -133,26 +133,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- goto up_fail;
- }
-
-- current->mm->context.vdso = (void *)addr;
-+ mm->context.vdso = addr;
-
- ret = install_special_mapping(mm, addr, vdso_size,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
- VM_ALWAYSDUMP,
- vdso_pages);
-- if (ret) {
-- current->mm->context.vdso = NULL;
-- goto up_fail;
-- }
-+
-+ if (ret)
-+ mm->context.vdso = 0;
-
- up_fail:
- up_write(&mm->mmap_sem);
- return ret;
- }
--
--static __init int vdso_setup(char *s)
--{
-- vdso_enabled = simple_strtoul(s, NULL, 0);
-- return 0;
--}
--__setup("vdso=", vdso_setup);
-diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
-index 26c731a..fb510c7 100644
---- a/arch/x86/xen/Kconfig
-+++ b/arch/x86/xen/Kconfig
-@@ -8,6 +8,7 @@ config XEN
- select PARAVIRT_CLOCK
- depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
- depends on X86_CMPXCHG && X86_TSC
-+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
- help
- This is the Linux Xen port. Enabling this will allow the
- kernel to boot in a paravirtualized environment under the
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 5189fe8..1bf8944 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
-
- struct shared_info xen_dummy_shared_info;
-
--void *xen_initial_gdt;
--
- RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
- __read_mostly int xen_have_vector_callback;
- EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-@@ -382,8 +380,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
- {
- unsigned long va = dtr->address;
- unsigned int size = dtr->size + 1;
-- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-- unsigned long frames[pages];
-+ unsigned long frames[65536 / PAGE_SIZE];
- int f;
-
- /*
-@@ -431,8 +428,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
- {
- unsigned long va = dtr->address;
- unsigned int size = dtr->size + 1;
-- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-- unsigned long frames[pages];
-+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
- int f;
-
- /*
-@@ -440,7 +436,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
- * 8-byte entries, or 16 4k pages..
- */
-
-- BUG_ON(size > 65536);
-+ BUG_ON(size > GDT_SIZE);
- BUG_ON(va & ~PAGE_MASK);
-
- for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-@@ -1072,30 +1068,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
- #endif
- };
-
--static void xen_reboot(int reason)
-+static __noreturn void xen_reboot(int reason)
- {
- struct sched_shutdown r = { .reason = reason };
-
-- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
-- BUG();
-+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
-+ BUG();
- }
-
--static void xen_restart(char *msg)
-+static __noreturn void xen_restart(char *msg)
- {
- xen_reboot(SHUTDOWN_reboot);
- }
-
--static void xen_emergency_restart(void)
-+static __noreturn void xen_emergency_restart(void)
- {
- xen_reboot(SHUTDOWN_reboot);
- }
-
--static void xen_machine_halt(void)
-+static __noreturn void xen_machine_halt(void)
- {
- xen_reboot(SHUTDOWN_poweroff);
- }
-
--static void xen_machine_power_off(void)
-+static void __noreturn xen_machine_power_off(void)
- {
- if (pm_power_off)
- pm_power_off();
-@@ -1144,6 +1140,9 @@ static void __init xen_setup_stackprotector(void)
- pv_cpu_ops.load_gdt = xen_load_gdt_boot;
-
- setup_stack_canary_segment(0);
-+#ifdef CONFIG_X86_64
-+ load_percpu_segment(0);
-+#endif
- switch_to_new_gdt(0);
-
- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
-@@ -1196,7 +1195,17 @@ asmlinkage void __init xen_start_kernel(void)
- __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
-
- /* Work out if we support NX */
-- x86_configure_nx();
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
-+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
-+ unsigned l, h;
-+
-+ __supported_pte_mask |= _PAGE_NX;
-+ rdmsr(MSR_EFER, l, h);
-+ l |= EFER_NX;
-+ wrmsr(MSR_EFER, l, h);
-+ }
-+#endif
-
- xen_setup_features();
-
-@@ -1227,13 +1236,6 @@ asmlinkage void __init xen_start_kernel(void)
-
- machine_ops = xen_machine_ops;
-
-- /*
-- * The only reliable way to retain the initial address of the
-- * percpu gdt_page is to remember it here, so we can go and
-- * mark it RW later, when the initial percpu area is freed.
-- */
-- xen_initial_gdt = &per_cpu(gdt_page, 0);
--
- xen_smp_init();
-
- #ifdef CONFIG_ACPI_NUMA
-@@ -1418,7 +1420,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
-+static struct notifier_block xen_hvm_cpu_notifier = {
- .notifier_call = xen_hvm_cpu_notify,
- };
-
-diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index fe00be69..c51170f 100644
---- a/arch/x86/xen/mmu.c
-+++ b/arch/x86/xen/mmu.c
-@@ -365,7 +365,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
- return val;
- }
-
--static pteval_t pte_pfn_to_mfn(pteval_t val)
-+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
- {
- if (val & _PAGE_PRESENT) {
- unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
-@@ -1757,6 +1757,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
- convert_pfn_mfn(init_level4_pgt);
- convert_pfn_mfn(level3_ident_pgt);
- convert_pfn_mfn(level3_kernel_pgt);
-+ convert_pfn_mfn(level3_vmalloc_start_pgt);
-+ convert_pfn_mfn(level3_vmalloc_end_pgt);
-+ convert_pfn_mfn(level3_vmemmap_pgt);
-
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
- l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
-@@ -1775,7 +1778,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
- set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
-+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
-+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
-+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
-+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-
-@@ -1986,6 +1993,7 @@ static void __init xen_post_allocator_init(void)
- pv_mmu_ops.set_pud = xen_set_pud;
- #if PAGETABLE_LEVELS == 4
- pv_mmu_ops.set_pgd = xen_set_pgd;
-+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
- #endif
-
- /* This will work as long as patching hasn't happened yet
-@@ -2067,6 +2075,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
- .pud_val = PV_CALLEE_SAVE(xen_pud_val),
- .make_pud = PV_CALLEE_SAVE(xen_make_pud),
- .set_pgd = xen_set_pgd_hyper,
-+ .set_pgd_batched = xen_set_pgd_hyper,
-
- .alloc_pud = xen_alloc_pmd_init,
- .release_pud = xen_release_pmd_init,
-diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index 6e4d5dc..78c131b 100644
---- a/arch/x86/xen/smp.c
-+++ b/arch/x86/xen/smp.c
-@@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
- {
- BUG_ON(smp_processor_id() != 0);
- native_smp_prepare_boot_cpu();
--
-- /* We've switched to the "real" per-cpu gdt, so make sure the
-- old memory can be recycled */
-- make_lowmem_page_readwrite(xen_initial_gdt);
--
- xen_filter_cpu_maps();
- xen_setup_vcpu_info_placement();
- }
-@@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
- gdt = get_cpu_gdt_table(cpu);
-
- ctxt->flags = VGCF_IN_KERNEL;
-- ctxt->user_regs.ds = __USER_DS;
-- ctxt->user_regs.es = __USER_DS;
-+ ctxt->user_regs.ds = __KERNEL_DS;
-+ ctxt->user_regs.es = __KERNEL_DS;
- ctxt->user_regs.ss = __KERNEL_DS;
- #ifdef CONFIG_X86_32
- ctxt->user_regs.fs = __KERNEL_PERCPU;
-- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
-+ savesegment(gs, ctxt->user_regs.gs);
- #else
- ctxt->gs_base_kernel = per_cpu_offset(cpu);
- #endif
-@@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
- int rc;
-
- per_cpu(current_task, cpu) = idle;
-+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
- #ifdef CONFIG_X86_32
- irq_ctx_init(cpu);
- #else
- clear_tsk_thread_flag(idle, TIF_FORK);
-- per_cpu(kernel_stack, cpu) =
-- (unsigned long)task_stack_page(idle) -
-- KERNEL_STACK_OFFSET + THREAD_SIZE;
-+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
- #endif
- xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
-@@ -536,7 +530,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
-
- void __init xen_smp_init(void)
- {
-- smp_ops = xen_smp_ops;
-+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
- xen_fill_possible_map();
- xen_init_spinlocks();
- }
-diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
-index 7328f71..c457aa7 100644
---- a/arch/x86/xen/xen-asm_32.S
-+++ b/arch/x86/xen/xen-asm_32.S
-@@ -83,14 +83,14 @@ ENTRY(xen_iret)
- ESP_OFFSET=4 # bytes pushed onto stack
-
- /*
-- * Store vcpu_info pointer for easy access. Do it this way to
-- * avoid having to reload %fs
-+ * Store vcpu_info pointer for easy access.
- */
- #ifdef CONFIG_SMP
-- GET_THREAD_INFO(%eax)
-- movl %ss:TI_cpu(%eax), %eax
-- movl %ss:__per_cpu_offset(,%eax,4), %eax
-- mov %ss:xen_vcpu(%eax), %eax
-+ push %fs
-+ mov $(__KERNEL_PERCPU), %eax
-+ mov %eax, %fs
-+ mov PER_CPU_VAR(xen_vcpu), %eax
-+ pop %fs
- #else
- movl %ss:xen_vcpu, %eax
- #endif
-diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
-index aaa7291..3f77960 100644
---- a/arch/x86/xen/xen-head.S
-+++ b/arch/x86/xen/xen-head.S
-@@ -19,6 +19,17 @@ ENTRY(startup_xen)
- #ifdef CONFIG_X86_32
- mov %esi,xen_start_info
- mov $init_thread_union+THREAD_SIZE,%esp
-+#ifdef CONFIG_SMP
-+ movl $cpu_gdt_table,%edi
-+ movl $__per_cpu_load,%eax
-+ movw %ax,__KERNEL_PERCPU + 2(%edi)
-+ rorl $16,%eax
-+ movb %al,__KERNEL_PERCPU + 4(%edi)
-+ movb %ah,__KERNEL_PERCPU + 7(%edi)
-+ movl $__per_cpu_end - 1,%eax
-+ subl $__per_cpu_start,%eax
-+ movw %ax,__KERNEL_PERCPU + 0(%edi)
-+#endif
- #else
- mov %rsi,xen_start_info
- mov $init_thread_union+THREAD_SIZE,%rsp
-diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
-index b095739..8c17bcd 100644
---- a/arch/x86/xen/xen-ops.h
-+++ b/arch/x86/xen/xen-ops.h
-@@ -10,8 +10,6 @@
- extern const char xen_hypervisor_callback[];
- extern const char xen_failsafe_callback[];
-
--extern void *xen_initial_gdt;
--
- struct trap_info;
- void xen_copy_trap_info(struct trap_info *traps);
-
-diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
-index 525bd3d..ef888b1 100644
---- a/arch/xtensa/variants/dc232b/include/variant/core.h
-+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
-@@ -119,9 +119,9 @@
- ----------------------------------------------------------------------*/
-
- #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
--#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
- #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
- #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
-+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
-
- #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
- #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
-diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
-index 2f33760..835e50a 100644
---- a/arch/xtensa/variants/fsf/include/variant/core.h
-+++ b/arch/xtensa/variants/fsf/include/variant/core.h
-@@ -11,6 +11,7 @@
- #ifndef _XTENSA_CORE_H
- #define _XTENSA_CORE_H
-
-+#include <linux/const.h>
-
- /****************************************************************************
- Parameters Useful for Any Code, USER or PRIVILEGED
-@@ -112,9 +113,9 @@
- ----------------------------------------------------------------------*/
-
- #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
--#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
- #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
- #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
-+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
-
- #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
- #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
-diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
-index af00795..2bb8105 100644
---- a/arch/xtensa/variants/s6000/include/variant/core.h
-+++ b/arch/xtensa/variants/s6000/include/variant/core.h
-@@ -11,6 +11,7 @@
- #ifndef _XTENSA_CORE_CONFIGURATION_H
- #define _XTENSA_CORE_CONFIGURATION_H
-
-+#include <linux/const.h>
-
- /****************************************************************************
- Parameters Useful for Any Code, USER or PRIVILEGED
-@@ -118,9 +119,9 @@
- ----------------------------------------------------------------------*/
-
- #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
--#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
- #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
- #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
-+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
-
- #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
- #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
-diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
-index 58916af..9b538a6 100644
---- a/block/blk-iopoll.c
-+++ b/block/blk-iopoll.c
-@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
- }
- EXPORT_SYMBOL(blk_iopoll_complete);
-
--static void blk_iopoll_softirq(struct softirq_action *h)
-+static __latent_entropy void blk_iopoll_softirq(void)
- {
- struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
- int rearm = 0, budget = blk_iopoll_budget;
-@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
-+static struct notifier_block blk_iopoll_cpu_notifier = {
- .notifier_call = blk_iopoll_cpu_notify,
- };
-
-diff --git a/block/blk-map.c b/block/blk-map.c
-index 623e1cd..ca1e109 100644
---- a/block/blk-map.c
-+++ b/block/blk-map.c
-@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
- if (!len || !kbuf)
- return -EINVAL;
-
-- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
-+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
- if (do_copy)
- bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
- else
-diff --git a/block/blk-softirq.c b/block/blk-softirq.c
-index 1366a89..88178fe 100644
---- a/block/blk-softirq.c
-+++ b/block/blk-softirq.c
-@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
--static void blk_done_softirq(struct softirq_action *h)
-+static __latent_entropy void blk_done_softirq(void)
- {
- struct list_head *cpu_list, local_list;
-
-@@ -97,7 +97,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata blk_cpu_notifier = {
-+static struct notifier_block blk_cpu_notifier = {
- .notifier_call = blk_cpu_notify,
- };
-
-diff --git a/block/blk-throttle.c b/block/blk-throttle.c
-index 5eed6a7..0e8abe9 100644
---- a/block/blk-throttle.c
-+++ b/block/blk-throttle.c
-@@ -361,6 +361,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
- /* Group allocation failed. Account the IO to root group */
- if (!tg) {
- tg = td->root_tg;
-+ rcu_read_unlock();
- return tg;
- }
-
-diff --git a/block/bsg.c b/block/bsg.c
-index c0ab25c..9d49f8f 100644
---- a/block/bsg.c
-+++ b/block/bsg.c
-@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
- struct sg_io_v4 *hdr, struct bsg_device *bd,
- fmode_t has_write_perm)
- {
-+ unsigned char tmpcmd[sizeof(rq->__cmd)];
-+ unsigned char *cmdptr;
-+
- if (hdr->request_len > BLK_MAX_CDB) {
- rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
- if (!rq->cmd)
- return -ENOMEM;
-- }
-+ cmdptr = rq->cmd;
-+ } else
-+ cmdptr = tmpcmd;
-
-- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
-+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
- hdr->request_len))
- return -EFAULT;
-
-+ if (cmdptr != rq->cmd)
-+ memcpy(rq->cmd, cmdptr, hdr->request_len);
-+
- if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_verify_command(rq->cmd, has_write_perm))
- return -EPERM;
-diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
-index 7b72502..3d7b647 100644
---- a/block/compat_ioctl.c
-+++ b/block/compat_ioctl.c
-@@ -155,7 +155,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
- cgc = compat_alloc_user_space(sizeof(*cgc));
- cgc32 = compat_ptr(arg);
-
-- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
-+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
- get_user(data, &cgc32->buffer) ||
- put_user(compat_ptr(data), &cgc->buffer) ||
- copy_in_user(&cgc->buflen, &cgc32->buflen,
-@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
- err |= __get_user(f->spec1, &uf->spec1);
- err |= __get_user(f->fmt_gap, &uf->fmt_gap);
- err |= __get_user(name, &uf->name);
-- f->name = compat_ptr(name);
-+ f->name = (void __force_kernel *)compat_ptr(name);
- if (err) {
- err = -EFAULT;
- goto out;
-diff --git a/block/genhd.c b/block/genhd.c
-index 424d1fa..8e99b22 100644
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -472,21 +472,24 @@ static char *bdevt_str(dev_t devt, char *buf)
-
- /*
- * Register device numbers dev..(dev+range-1)
-- * range must be nonzero
-+ * Noop if @range is zero.
- * The hash chain is sorted on range, so that subranges can override.
- */
- void blk_register_region(dev_t devt, unsigned long range, struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *), void *data)
- {
-- kobj_map(bdev_map, devt, range, module, probe, lock, data);
-+ if (range)
-+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
- }
-
- EXPORT_SYMBOL(blk_register_region);
-
-+/* undo blk_register_region(), noop if @range is zero */
- void blk_unregister_region(dev_t devt, unsigned long range)
- {
-- kobj_unmap(bdev_map, devt, range);
-+ if (range)
-+ kobj_unmap(bdev_map, devt, range);
- }
-
- EXPORT_SYMBOL(blk_unregister_region);
-diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index f124268..e5bfd12 100644
---- a/block/scsi_ioctl.c
-+++ b/block/scsi_ioctl.c
-@@ -66,7 +66,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
- return put_user(0, p);
- }
-
--static int sg_get_timeout(struct request_queue *q)
-+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
- {
- return jiffies_to_clock_t(q->sg_timeout);
- }
-@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
- static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
- struct sg_io_hdr *hdr, fmode_t mode)
- {
-- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
-+ unsigned char tmpcmd[sizeof(rq->__cmd)];
-+ unsigned char *cmdptr;
-+
-+ if (rq->cmd != rq->__cmd)
-+ cmdptr = rq->cmd;
-+ else
-+ cmdptr = tmpcmd;
-+
-+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
- return -EFAULT;
-+
-+ if (cmdptr != rq->cmd)
-+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
-+
- if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
- return -EPERM;
-
-@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
- int err;
- unsigned int in_len, out_len, bytes, opcode, cmdlen;
- char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
-+ unsigned char tmpcmd[sizeof(rq->__cmd)];
-+ unsigned char *cmdptr;
-
- if (!sic)
- return -EINVAL;
-@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
- */
- err = -EFAULT;
- rq->cmd_len = cmdlen;
-- if (copy_from_user(rq->cmd, sic->data, cmdlen))
-+
-+ if (rq->cmd != rq->__cmd)
-+ cmdptr = rq->cmd;
-+ else
-+ cmdptr = tmpcmd;
-+
-+ if (copy_from_user(cmdptr, sic->data, cmdlen))
- goto error;
-
-+ if (rq->cmd != cmdptr)
-+ memcpy(rq->cmd, cmdptr, cmdlen);
-+
- if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
- goto error;
-
-diff --git a/crypto/api.c b/crypto/api.c
-index ac80794..dd053f8 100644
---- a/crypto/api.c
-+++ b/crypto/api.c
-@@ -42,6 +42,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
- return alg;
- }
-
-+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
-+
- struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
- {
- return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
-diff --git a/crypto/cryptd.c b/crypto/cryptd.c
-index 75c415d..0b21cd8 100644
---- a/crypto/cryptd.c
-+++ b/crypto/cryptd.c
-@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
-
- struct cryptd_blkcipher_request_ctx {
- crypto_completion_t complete;
--};
-+} __no_const;
-
- struct cryptd_hash_ctx {
- struct crypto_shash *child;
-@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
-
- struct cryptd_aead_request_ctx {
- crypto_completion_t complete;
--};
-+} __no_const;
-
- static void cryptd_queue_worker(struct work_struct *work);
-
-diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
-index 5b63b8d..6f46ba0 100644
---- a/crypto/crypto_user.c
-+++ b/crypto/crypto_user.c
-@@ -26,6 +26,8 @@
- #include <net/net_namespace.h>
- #include "internal.h"
-
-+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
-+
- DEFINE_MUTEX(crypto_cfg_mutex);
-
- /* The crypto netlink socket */
-@@ -192,7 +194,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
- struct crypto_dump_info info;
- int err;
-
-- if (!p->cru_driver_name)
-+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-+ return -EINVAL;
-+
-+ if (!p->cru_driver_name[0])
- return -EINVAL;
-
- alg = crypto_alg_match(p, 1);
-@@ -256,6 +261,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
- LIST_HEAD(list);
-
-+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-+ return -EINVAL;
-+
- if (priority && !strlen(p->cru_driver_name))
- return -EINVAL;
-
-@@ -283,6 +291,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct crypto_alg *alg;
- struct crypto_user_alg *p = nlmsg_data(nlh);
-
-+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-+ return -EINVAL;
-+
- alg = crypto_alg_match(p, 1);
- if (!alg)
- return -ENOENT;
-@@ -310,6 +321,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct crypto_user_alg *p = nlmsg_data(nlh);
- struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
-
-+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-+ return -EINVAL;
-+
- if (strlen(p->cru_driver_name))
- exact = 1;
-
-diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
-index ba92046..2d5921a 100644
---- a/crypto/pcrypt.c
-+++ b/crypto/pcrypt.c
-@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
- int ret;
-
- pinst->kobj.kset = pcrypt_kset;
-- ret = kobject_add(&pinst->kobj, NULL, name);
-+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
- if (!ret)
- kobject_uevent(&pinst->kobj, KOBJ_ADD);
-
-diff --git a/crypto/zlib.c b/crypto/zlib.c
-index d980788..2422b3d 100644
---- a/crypto/zlib.c
-+++ b/crypto/zlib.c
-@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
- zlib_comp_exit(ctx);
-
- window_bits = tb[ZLIB_COMP_WINDOWBITS]
-- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
-+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
- : MAX_WBITS;
- mem_level = tb[ZLIB_COMP_MEMLEVEL]
-- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
-+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
- : DEF_MEM_LEVEL;
-
- workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
-diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
-index f57050e..7ccfc74 100644
---- a/drivers/acpi/apei/apei-internal.h
-+++ b/drivers/acpi/apei/apei-internal.h
-@@ -18,7 +18,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
- struct apei_exec_ins_type {
- u32 flags;
- apei_exec_ins_func_t run;
--};
-+} __do_const;
-
- struct apei_exec_context {
- u32 ip;
-diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
-index 5d41894..22021e4 100644
---- a/drivers/acpi/apei/cper.c
-+++ b/drivers/acpi/apei/cper.c
-@@ -38,12 +38,12 @@
- */
- u64 cper_next_record_id(void)
- {
-- static atomic64_t seq;
-+ static atomic64_unchecked_t seq;
-
-- if (!atomic64_read(&seq))
-- atomic64_set(&seq, ((u64)get_seconds()) << 32);
-+ if (!atomic64_read_unchecked(&seq))
-+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
-
-- return atomic64_inc_return(&seq);
-+ return atomic64_inc_return_unchecked(&seq);
- }
- EXPORT_SYMBOL_GPL(cper_next_record_id);
-
-diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
-index cfc0cc1..61fdbaa 100644
---- a/drivers/acpi/atomicio.c
-+++ b/drivers/acpi/atomicio.c
-@@ -286,6 +286,7 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
- break;
- #endif
- default:
-+ rcu_read_unlock();
- return -EINVAL;
- }
- rcu_read_unlock();
-@@ -315,6 +316,7 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
- break;
- #endif
- default:
-+ rcu_read_unlock();
- return -EINVAL;
- }
- rcu_read_unlock();
-diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
-index cb96296..b81293b 100644
---- a/drivers/acpi/blacklist.c
-+++ b/drivers/acpi/blacklist.c
-@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
- u32 is_critical_error;
- };
-
--static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
-+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
-
- /*
- * POLICY: If *anything* doesn't work, put it on the blacklist.
-@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
-+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Fujitsu Siemens",
-diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
-index 3a0e92a..4348f2a 100644
---- a/drivers/acpi/bus.c
-+++ b/drivers/acpi/bus.c
-@@ -72,7 +72,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
- }
- #endif
-
--static struct dmi_system_id dsdt_dmi_table[] __initdata = {
-+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
- /*
- * Invoke DSDT corruption work-around on all Toshiba Satellite.
- * https://bugzilla.kernel.org/show_bug.cgi?id=14679
-@@ -88,7 +88,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
- {}
- };
- #else
--static struct dmi_system_id dsdt_dmi_table[] __initdata = {
-+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
- {}
- };
- #endif
-diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
-index 5d42c24..4964b94 100644
---- a/drivers/acpi/custom_method.c
-+++ b/drivers/acpi/custom_method.c
-@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
- struct acpi_table_header table;
- acpi_status status;
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ return -EPERM;
-+#endif
-+
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
-diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index 30229af..eaddfaf 100644
---- a/drivers/acpi/ec.c
-+++ b/drivers/acpi/ec.c
-@@ -1018,7 +1018,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
- return 0;
- }
-
--static struct dmi_system_id __initdata ec_dmi_table[] = {
-+static const struct dmi_system_id __initconst ec_dmi_table[] = {
- {
- ec_skip_dsdt_scan, "Compal JFL92", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
-diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
-index 6c47ae9..abfdd63 100644
---- a/drivers/acpi/ec_sys.c
-+++ b/drivers/acpi/ec_sys.c
-@@ -12,6 +12,7 @@
- #include <linux/acpi.h>
- #include <linux/debugfs.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
- #include "internal.h"
-
- MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
-@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
- * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
- */
- unsigned int size = EC_SPACE_SIZE;
-- u8 *data = (u8 *) buf;
-+ u8 data;
- loff_t init_off = *off;
- int err = 0;
-
-@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
- size = count;
-
- while (size) {
-- err = ec_read(*off, &data[*off - init_off]);
-+ err = ec_read(*off, &data);
- if (err)
- return err;
-+ if (put_user(data, &buf[*off - init_off]))
-+ return -EFAULT;
- *off += 1;
- size--;
- }
-@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
-
- unsigned int size = count;
- loff_t init_off = *off;
-- u8 *data = (u8 *) buf;
- int err = 0;
-
- if (*off >= EC_SPACE_SIZE)
-@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
- }
-
- while (size) {
-- u8 byte_write = data[*off - init_off];
-+ u8 byte_write;
-+ if (get_user(byte_write, &buf[*off - init_off]))
-+ return -EFAULT;
- err = ec_write(*off, byte_write);
- if (err)
- return err;
-diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
-index 07f7fea..118388f 100644
---- a/drivers/acpi/pci_slot.c
-+++ b/drivers/acpi/pci_slot.c
-@@ -336,7 +336,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
-+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
- /*
- * Fujitsu Primequest machines will return 1023 to indicate an
- * error if the _SUN method is evaluated on SxFy objects that
-diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
-index 251c7b62..feab1d6 100644
---- a/drivers/acpi/proc.c
-+++ b/drivers/acpi/proc.c
-@@ -345,16 +345,13 @@ acpi_system_write_wakeup_device(struct file *file,
- struct list_head *node, *next;
- char strbuf[5];
- char str[5] = "";
-- unsigned int len = count;
-
-- if (len > 4)
-- len = 4;
-- if (len < 0)
-- return -EFAULT;
-+ if (count > 4)
-+ count = 4;
-
-- if (copy_from_user(strbuf, buffer, len))
-+ if (copy_from_user(strbuf, buffer, count))
- return -EFAULT;
-- strbuf[len] = '\0';
-+ strbuf[count] = '\0';
- sscanf(strbuf, "%s", str);
-
- mutex_lock(&acpi_device_lock);
-diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
-index eff7222..fea8ae4 100644
---- a/drivers/acpi/processor_core.c
-+++ b/drivers/acpi/processor_core.c
-@@ -28,7 +28,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
- return 0;
- }
-
--static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
-+static const struct dmi_system_id __initconst processor_idle_dmi_table[] = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
-index ac28db3..0848b37 100644
---- a/drivers/acpi/processor_driver.c
-+++ b/drivers/acpi/processor_driver.c
-@@ -474,7 +474,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
- return 0;
- #endif
-
-- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
-+ BUG_ON(pr->id >= nr_cpu_ids);
-
- /*
- * Buggy BIOS check
-diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 388ba10..d509dbb 100644
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -1036,7 +1036,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
- {
- int i, count = CPUIDLE_DRIVER_STATE_START;
- struct acpi_processor_cx *cx;
-- struct cpuidle_state *state;
-+ cpuidle_state_no_const *state;
- struct cpuidle_driver *drv = &acpi_idle_driver;
-
- if (!pr->flags.power_setup_done)
-diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
-index cc9d020..b72fc03 100644
---- a/drivers/acpi/sleep.c
-+++ b/drivers/acpi/sleep.c
-@@ -120,7 +120,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
-+static const struct dmi_system_id __initconst acpisleep_dmi_table[] = {
- {
- .callback = init_old_suspend_ordering,
- .ident = "Abit KN9 (nForce4 variant)",
-diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
-index 240a244..bc6239e 100644
---- a/drivers/acpi/sysfs.c
-+++ b/drivers/acpi/sysfs.c
-@@ -420,11 +420,11 @@ static u32 num_counters;
- static struct attribute **all_attrs;
- static u32 acpi_gpe_count;
-
--static struct attribute_group interrupt_stats_attr_group = {
-+static attribute_group_no_const interrupt_stats_attr_group = {
- .name = "interrupts",
- };
-
--static struct kobj_attribute *counter_attrs;
-+static kobj_attribute_no_const *counter_attrs;
-
- static void delete_gpe_attr_array(void)
- {
-diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
-index 48fbc64..d0a31d4 100644
---- a/drivers/acpi/thermal.c
-+++ b/drivers/acpi/thermal.c
-@@ -1110,7 +1110,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
- return 0;
- }
-
--static struct dmi_system_id thermal_dmi_table[] __initdata = {
-+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
- /*
- * Award BIOS on this AOpen makes thermal control almost worthless.
- * http://bugzilla.kernel.org/show_bug.cgi?id=8842
-diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
-index cb842a8..6688e24 100644
---- a/drivers/acpi/video.c
-+++ b/drivers/acpi/video.c
-@@ -395,7 +395,7 @@ static int video_ignore_initial_backlight(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id video_dmi_table[] __initdata = {
-+static const struct dmi_system_id video_dmi_table[] __initconst = {
- /*
- * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
- */
-diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
-index de2802c..2260da9 100644
---- a/drivers/ata/libahci.c
-+++ b/drivers/ata/libahci.c
-@@ -1211,7 +1211,7 @@ int ahci_kick_engine(struct ata_port *ap)
- }
- EXPORT_SYMBOL_GPL(ahci_kick_engine);
-
--static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
-+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
- struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
- {
-diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 5d8fc3d..d537f03 100644
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -4790,7 +4790,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- unsigned int tag;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- ap = qc->ap;
-
- qc->flags = 0;
-@@ -4806,7 +4806,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- struct ata_link *link;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
- ap = qc->ap;
- link = qc->dev->link;
-@@ -5811,6 +5811,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
- return;
-
- spin_lock(&lock);
-+ pax_open_kernel();
-
- for (cur = ops->inherits; cur; cur = cur->inherits) {
- void **inherit = (void **)cur;
-@@ -5824,8 +5825,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
- if (IS_ERR(*pp))
- *pp = NULL;
-
-- ops->inherits = NULL;
-+ *(struct ata_port_operations **)&ops->inherits = NULL;
-
-+ pax_close_kernel();
- spin_unlock(&lock);
- }
-
-diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
-index e8574bb..f9f6a72 100644
---- a/drivers/ata/pata_arasan_cf.c
-+++ b/drivers/ata/pata_arasan_cf.c
-@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
- /* Handle platform specific quirks */
- if (pdata->quirk) {
- if (pdata->quirk & CF_BROKEN_PIO) {
-- ap->ops->set_piomode = NULL;
-+ pax_open_kernel();
-+ *(void **)&ap->ops->set_piomode = NULL;
-+ pax_close_kernel();
- ap->pio_mask = 0;
- }
- if (pdata->quirk & CF_BROKEN_MWDMA)
-diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
-index f9b983a..887b9d8 100644
---- a/drivers/atm/adummy.c
-+++ b/drivers/atm/adummy.c
-@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_any(skb);
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-
- return 0;
- }
-diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
-index 89b30f3..7964211d4 100644
---- a/drivers/atm/ambassador.c
-+++ b/drivers/atm/ambassador.c
-@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
- PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
-
- // VC layer stats
-- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the descriptor
- kfree (tx_descr);
-@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
- dump_skb ("<<<", vc, skb);
-
- // VC layer stats
-- atomic_inc(&atm_vcc->stats->rx);
-+ atomic_inc_unchecked(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- atm_vcc->push (atm_vcc, skb);
-@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
- } else {
- PRINTK (KERN_INFO, "dropped over-size frame");
- // should we count this?
-- atomic_inc(&atm_vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
- }
-
- } else {
-@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
- }
-
- if (check_area (skb->data, skb->len)) {
-- atomic_inc(&atm_vcc->stats->tx_err);
-+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
- return -ENOMEM; // ?
- }
-
-diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
-index b22d71c..d6e1049 100644
---- a/drivers/atm/atmtcp.c
-+++ b/drivers/atm/atmtcp.c
-@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
- if (dev_data) return 0;
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- return -ENOLINK;
- }
- size = skb->len+sizeof(struct atmtcp_hdr);
-@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
- if (!new_skb) {
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- return -ENOBUFS;
- }
- hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
-@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
- out_vcc->push(out_vcc,new_skb);
-- atomic_inc(&vcc->stats->tx);
-- atomic_inc(&out_vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-+ atomic_inc_unchecked(&out_vcc->stats->rx);
- return 0;
- }
-
-@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
- out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
- read_unlock(&vcc_sklist_lock);
- if (!out_vcc) {
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- goto done;
- }
- skb_pull(skb,sizeof(struct atmtcp_hdr));
-@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
- __net_timestamp(new_skb);
- skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
- out_vcc->push(out_vcc,new_skb);
-- atomic_inc(&vcc->stats->tx);
-- atomic_inc(&out_vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-+ atomic_inc_unchecked(&out_vcc->stats->rx);
- done:
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb(skb);
-diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
-index 956e9ac..133516d 100644
---- a/drivers/atm/eni.c
-+++ b/drivers/atm/eni.c
-@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
- DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
- vcc->dev->number);
- length = 0;
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- }
- else {
- length = ATM_CELL_SIZE-1; /* no HEC */
-@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
- size);
- }
- eff = length = 0;
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- }
- else {
- size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
-@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
- "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
- vcc->dev->number,vcc->vci,length,size << 2,descr);
- length = eff = 0;
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- }
- }
- skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
-@@ -771,7 +771,7 @@ rx_dequeued++;
- vcc->push(vcc,skb);
- pushed++;
- }
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
- wake_up(&eni_dev->rx_wait);
- }
-@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
- PCI_DMA_TODEVICE);
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb_irq(skb);
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- wake_up(&eni_dev->tx_wait);
- dma_complete++;
- }
-@@ -1569,7 +1569,7 @@ tx_complete++;
- /*--------------------------------- entries ---------------------------------*/
-
-
--static const char *media_name[] __devinitdata = {
-+static const char *media_name[] __devinitconst = {
- "MMF", "SMF", "MMF", "03?", /* 0- 3 */
- "UTP", "05?", "06?", "07?", /* 4- 7 */
- "TAXI","09?", "10?", "11?", /* 8-11 */
-diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
-index 5072f8a..fa52520d 100644
---- a/drivers/atm/firestream.c
-+++ b/drivers/atm/firestream.c
-@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
- }
- }
-
-- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
-
- fs_dprintk (FS_DEBUG_TXMEM, "i");
- fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
-@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
- #endif
- skb_put (skb, qe->p1 & 0xffff);
- ATM_SKB(skb)->vcc = atm_vcc;
-- atomic_inc(&atm_vcc->stats->rx);
-+ atomic_inc_unchecked(&atm_vcc->stats->rx);
- __net_timestamp(skb);
- fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
- atm_vcc->push (atm_vcc, skb);
-@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
- kfree (pe);
- }
- if (atm_vcc)
-- atomic_inc(&atm_vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
- break;
- case 0x1f: /* Reassembly abort: no buffers. */
- /* Silently increment error counter. */
- if (atm_vcc)
-- atomic_inc(&atm_vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
- break;
- default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
- printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
-diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
-index 361f5ae..7fc552d 100644
---- a/drivers/atm/fore200e.c
-+++ b/drivers/atm/fore200e.c
-@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
- #endif
- /* check error condition */
- if (*entry->status & STATUS_ERROR)
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- else
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- }
- }
-
-@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
- if (skb == NULL) {
- DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
-
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- return -ENOMEM;
- }
-
-@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
-
- dev_kfree_skb_any(skb);
-
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- return -ENOMEM;
- }
-
- ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
-
-@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
- DPRINTK(2, "damaged PDU on %d.%d.%d\n",
- fore200e->atm_dev->number,
- entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- }
- }
-
-@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
- goto retry_here;
- }
-
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
-
- fore200e->tx_sat++;
- DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
-diff --git a/drivers/atm/he.c b/drivers/atm/he.c
-index 9a51df4..f3bb5f8 100644
---- a/drivers/atm/he.c
-+++ b/drivers/atm/he.c
-@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
-
- if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
- hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- goto return_host_buffers;
- }
-
-@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
- RBRQ_LEN_ERR(he_dev->rbrq_head)
- ? "LEN_ERR" : "",
- vcc->vpi, vcc->vci);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto return_host_buffers;
- }
-
-@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
- vcc->push(vcc, skb);
- spin_lock(&he_dev->global_lock);
-
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- return_host_buffers:
- ++pdus_assembled;
-@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
- tpd->vcc->pop(tpd->vcc, tpd->skb);
- else
- dev_kfree_skb_any(tpd->skb);
-- atomic_inc(&tpd->vcc->stats->tx_err);
-+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
- }
- pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
- return;
-@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_any(skb);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- return -EINVAL;
- }
-
-@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_any(skb);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- return -EINVAL;
- }
- #endif
-@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_any(skb);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- spin_unlock_irqrestore(&he_dev->global_lock, flags);
- return -ENOMEM;
- }
-@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
- vcc->pop(vcc, skb);
- else
- dev_kfree_skb_any(skb);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- spin_unlock_irqrestore(&he_dev->global_lock, flags);
- return -ENOMEM;
- }
-@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
- __enqueue_tpd(he_dev, tpd, cid);
- spin_unlock_irqrestore(&he_dev->global_lock, flags);
-
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-
- return 0;
- }
-diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
-index b812103..e391a49 100644
---- a/drivers/atm/horizon.c
-+++ b/drivers/atm/horizon.c
-@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
- {
- struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
- // VC layer stats
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- __net_timestamp(skb);
- // end of our responsibility
- vcc->push (vcc, skb);
-@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
- dev->tx_iovec = NULL;
-
- // VC layer stats
-- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
-+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
-
- // free the skb
- hrz_kfree_skb (skb);
-diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
-index 81845fa..a4367d7 100644
---- a/drivers/atm/idt77252.c
-+++ b/drivers/atm/idt77252.c
-@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
- else
- dev_kfree_skb(skb);
-
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- }
-
- atomic_dec(&scq->used);
-@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- if ((sb = dev_alloc_skb(64)) == NULL) {
- printk("%s: Can't allocate buffers for aal0.\n",
- card->name);
-- atomic_add(i, &vcc->stats->rx_drop);
-+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
- break;
- }
- if (!atm_charge(vcc, sb->truesize)) {
- RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
- card->name);
-- atomic_add(i - 1, &vcc->stats->rx_drop);
-+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
- dev_kfree_skb(sb);
- break;
- }
-@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- cell += ATM_CELL_PAYLOAD;
- }
-@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- "(CDC: %08x)\n",
- card->name, len, rpp->len, readl(SAR_REG_CDC));
- recycle_rx_pool_skb(card, rpp);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- return;
- }
- if (stat & SAR_RSQE_CRC) {
- RXPRINTK("%s: AAL5 CRC error.\n", card->name);
- recycle_rx_pool_skb(card, rpp);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- return;
- }
- if (skb_queue_len(&rpp->queue) > 1) {
-@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- RXPRINTK("%s: Can't alloc RX skb.\n",
- card->name);
- recycle_rx_pool_skb(card, rpp);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- return;
- }
- if (!atm_charge(vcc, skb->truesize)) {
-@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- __net_timestamp(skb);
-
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- return;
- }
-@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
- __net_timestamp(skb);
-
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- if (skb->truesize > SAR_FB_SIZE_3)
- add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
-@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
- if (vcc->qos.aal != ATM_AAL0) {
- RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
- card->name, vpi, vci);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- goto drop;
- }
-
- if ((sb = dev_alloc_skb(64)) == NULL) {
- printk("%s: Can't allocate buffers for AAL0.\n",
- card->name);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto drop;
- }
-
-@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
-
- drop:
- skb_pull(queue, 64);
-@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
-
- if (vc == NULL) {
- printk("%s: NULL connection in send().\n", card->name);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
- if (!test_bit(VCF_TX, &vc->flags)) {
- printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
- break;
- default:
- printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-
- if (skb_shinfo(skb)->nr_frags != 0) {
- printk("%s: No scatter-gather yet.\n", card->name);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
-
- err = queue_skb(card, vc, skb, oam);
- if (err) {
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb(skb);
- return err;
- }
-@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
- skb = dev_alloc_skb(64);
- if (!skb) {
- printk("%s: Out of memory in send_oam().\n", card->name);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- return -ENOMEM;
- }
- atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
-diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
-index 3d0c2b0..45441fa 100644
---- a/drivers/atm/iphase.c
-+++ b/drivers/atm/iphase.c
-@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
- status = (u_short) (buf_desc_ptr->desc_mode);
- if (status & (RX_CER | RX_PTE | RX_OFL))
- {
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- IF_ERR(printk("IA: bad packet, dropping it");)
- if (status & RX_CER) {
- IF_ERR(printk(" cause: packet CRC error\n");)
-@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
- len = dma_addr - buf_addr;
- if (len > iadev->rx_buf_sz) {
- printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto out_free_desc;
- }
-
-@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
- ia_vcc = INPH_IA_VCC(vcc);
- if (ia_vcc == NULL)
- {
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
- goto INCR_DLE;
-@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
- if ((length > iadev->rx_buf_sz) || (length >
- (skb->len - sizeof(struct cpcs_trailer))))
- {
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
- length, skb->len);)
- dev_kfree_skb_any(skb);
-@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
-
- IF_RX(printk("rx_dle_intr: skb push");)
- vcc->push(vcc,skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- iadev->rx_pkt_cnt++;
- }
- INCR_DLE:
-@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
- {
- struct k_sonet_stats *stats;
- stats = &PRIV(_ia_dev[board])->sonet_stats;
-- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
-- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
-- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
-- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
-- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
-- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
-- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
-- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
-- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
-+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
-+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
-+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
-+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
-+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
-+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
-+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
-+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
-+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
- }
- ia_cmds.status = 0;
- break;
-@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
- if ((desc == 0) || (desc > iadev->num_tx_desc))
- {
- IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- if (vcc->pop)
- vcc->pop(vcc, skb);
- else
-@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
- ATM_DESC(skb) = vcc->vci;
- skb_queue_tail(&iadev->tx_dma_q, skb);
-
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- iadev->tx_pkt_cnt++;
- /* Increment transaction counter */
- writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
-
- #if 0
- /* add flow control logic */
-- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
-+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
- if (iavcc->vc_desc_cnt > 10) {
- vcc->tx_quota = vcc->tx_quota * 3 / 4;
- printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
-diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
-index f5569699..0da15eb 100644
---- a/drivers/atm/lanai.c
-+++ b/drivers/atm/lanai.c
-@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
- vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
- lanai_endtx(lanai, lvcc);
- lanai_free_skb(lvcc->tx.atmvcc, skb);
-- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
-+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
- }
-
- /* Try to fill the buffer - don't call unless there is backlog */
-@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
- ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
- __net_timestamp(skb);
- lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
-- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
-+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
- out:
- lvcc->rx.buf.ptr = end;
- cardvcc_write(lvcc, endptr, vcc_rxreadptr);
-@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
- DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
- "vcc %d\n", lanai->number, (unsigned int) s, vci);
- lanai->stats.service_rxnotaal5++;
-- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
-+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
- return 0;
- }
- if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
-@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
- int bytes;
- read_unlock(&vcc_sklist_lock);
- DPRINTK("got trashed rx pdu on vci %d\n", vci);
-- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
-+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
- lvcc->stats.x.aal5.service_trash++;
- bytes = (SERVICE_GET_END(s) * 16) -
- (((unsigned long) lvcc->rx.buf.ptr) -
-@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
- }
- if (s & SERVICE_STREAM) {
- read_unlock(&vcc_sklist_lock);
-- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
-+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
- lvcc->stats.x.aal5.service_stream++;
- printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
- "PDU on VCI %d!\n", lanai->number, vci);
-@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
- return 0;
- }
- DPRINTK("got rx crc error on vci %d\n", vci);
-- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
-+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
- lvcc->stats.x.aal5.service_rxcrc++;
- lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
- cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
-diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
-index 1c70c45..300718d 100644
---- a/drivers/atm/nicstar.c
-+++ b/drivers/atm/nicstar.c
-@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
- if ((vc = (vc_map *) vcc->dev_data) == NULL) {
- printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
- card->index);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
- if (!vc->tx) {
- printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
- card->index);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
- if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
- printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
- card->index);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (skb_shinfo(skb)->nr_frags != 0) {
- printk("nicstar%d: No scatter-gather yet.\n", card->index);
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
- }
-
- if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
-- atomic_inc(&vcc->stats->tx_err);
-+ atomic_inc_unchecked(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EIO;
- }
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-
- return 0;
- }
-@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- printk
- ("nicstar%d: Can't allocate buffers for aal0.\n",
- card->index);
-- atomic_add(i, &vcc->stats->rx_drop);
-+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
- break;
- }
- if (!atm_charge(vcc, sb->truesize)) {
- RXPRINTK
- ("nicstar%d: atm_charge() dropped aal0 packets.\n",
- card->index);
-- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
-+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
- dev_kfree_skb_any(sb);
- break;
- }
-@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- cell += ATM_CELL_PAYLOAD;
- }
-
-@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- if (iovb == NULL) {
- printk("nicstar%d: Out of iovec buffers.\n",
- card->index);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- recycle_rx_buf(card, skb);
- return;
- }
-@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- small or large buffer itself. */
- } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
- printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
- NS_MAX_IOVECS);
- NS_PRV_IOVCNT(iovb) = 0;
-@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ("nicstar%d: Expected a small buffer, and this is not one.\n",
- card->index);
- which_list(card, skb);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- recycle_rx_buf(card, skb);
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
-@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ("nicstar%d: Expected a large buffer, and this is not one.\n",
- card->index);
- which_list(card, skb);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
- NS_PRV_IOVCNT(iovb));
- vc->rx_iov = NULL;
-@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- printk(" - PDU size mismatch.\n");
- else
- printk(".\n");
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
- NS_PRV_IOVCNT(iovb));
- vc->rx_iov = NULL;
-@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- /* skb points to a small buffer */
- if (!atm_charge(vcc, skb->truesize)) {
- push_rxbufs(card, skb);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- } else {
- skb_put(skb, len);
- dequeue_sm_buf(card, skb);
-@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ATM_SKB(skb)->vcc = vcc;
- __net_timestamp(skb);
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
- } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
- struct sk_buff *sb;
-@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- if (len <= NS_SMBUFSIZE) {
- if (!atm_charge(vcc, sb->truesize)) {
- push_rxbufs(card, sb);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- } else {
- skb_put(sb, len);
- dequeue_sm_buf(card, sb);
-@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
-
- push_rxbufs(card, skb);
-@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
-
- if (!atm_charge(vcc, skb->truesize)) {
- push_rxbufs(card, skb);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- } else {
- dequeue_lg_buf(card, skb);
- #ifdef NS_USE_DESTRUCTORS
-@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- ATM_SKB(skb)->vcc = vcc;
- __net_timestamp(skb);
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
-
- push_rxbufs(card, sb);
-@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- printk
- ("nicstar%d: Out of huge buffers.\n",
- card->index);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- recycle_iovec_rx_bufs(card,
- (struct iovec *)
- iovb->data,
-@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- card->hbpool.count++;
- } else
- dev_kfree_skb_any(hb);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- } else {
- /* Copy the small buffer to the huge buffer */
- sb = (struct sk_buff *)iov->iov_base;
-@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
- #endif /* NS_USE_DESTRUCTORS */
- __net_timestamp(hb);
- vcc->push(vcc, hb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
- }
-
-diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
-index adfce9f..35501e1 100644
---- a/drivers/atm/solos-pci.c
-+++ b/drivers/atm/solos-pci.c
-@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
- }
- atm_charge(vcc, skb->truesize);
- vcc->push(vcc, skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- break;
-
- case PKT_STATUS:
-@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
- vcc = SKB_CB(oldskb)->vcc;
-
- if (vcc) {
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- solos_pop(vcc, oldskb);
- } else
- dev_kfree_skb_irq(oldskb);
-diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
-index 90f1ccc..04c4a1e 100644
---- a/drivers/atm/suni.c
-+++ b/drivers/atm/suni.c
-@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
-
-
- #define ADD_LIMITED(s,v) \
-- atomic_add((v),&stats->s); \
-- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
-+ atomic_add_unchecked((v),&stats->s); \
-+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
-
-
- static void suni_hz(unsigned long from_timer)
-diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
-index 5120a96..e2572bd 100644
---- a/drivers/atm/uPD98402.c
-+++ b/drivers/atm/uPD98402.c
-@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
- struct sonet_stats tmp;
- int error = 0;
-
-- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
-+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
- sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
- if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
- if (zero && !error) {
-@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
-
-
- #define ADD_LIMITED(s,v) \
-- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
-- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
-- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
-+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
-+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
-+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
-
-
- static void stat_event(struct atm_dev *dev)
-@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
- if (reason & uPD98402_INT_PFM) stat_event(dev);
- if (reason & uPD98402_INT_PCO) {
- (void) GET(PCOCR); /* clear interrupt cause */
-- atomic_add(GET(HECCT),
-+ atomic_add_unchecked(GET(HECCT),
- &PRIV(dev)->sonet_stats.uncorr_hcs);
- }
- if ((reason & uPD98402_INT_RFO) &&
-@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
- PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
- uPD98402_INT_LOS),PIMR); /* enable them */
- (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
-- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
-- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
-- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
-+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
-+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
-+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
- return 0;
- }
-
-diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
-index d889f56..17eb71e 100644
---- a/drivers/atm/zatm.c
-+++ b/drivers/atm/zatm.c
-@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- }
- if (!size) {
- dev_kfree_skb_irq(skb);
-- if (vcc) atomic_inc(&vcc->stats->rx_err);
-+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
- continue;
- }
- if (!atm_charge(vcc,skb->truesize)) {
-@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
- skb->len = size;
- ATM_SKB(skb)->vcc = vcc;
- vcc->push(vcc,skb);
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- }
- zout(pos & 0xffff,MTA(mbx));
- #if 0 /* probably a stupid idea */
-@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
- skb_queue_head(&zatm_vcc->backlog,skb);
- break;
- }
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
- wake_up(&zatm_vcc->tx_wait);
- }
-
-diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
-index 8fc200b..32763bb 100644
---- a/drivers/base/attribute_container.c
-+++ b/drivers/base/attribute_container.c
-@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
- ic->classdev.parent = get_device(dev);
- ic->classdev.class = cont->class;
- cont->class->dev_release = attribute_container_release;
-- dev_set_name(&ic->classdev, dev_name(dev));
-+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
- if (fn)
- fn(cont, dev, &ic->classdev);
- else
-diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
-index a4760e0..ea524a0 100644
---- a/drivers/base/devtmpfs.c
-+++ b/drivers/base/devtmpfs.c
-@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
- if (!thread)
- return 0;
-
-- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
-+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
- if (err)
- printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
- else
-@@ -393,11 +393,11 @@ static int devtmpfsd(void *p)
- *err = sys_unshare(CLONE_NEWNS);
- if (*err)
- goto out;
-- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
-+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
- if (*err)
- goto out;
-- sys_chdir("/.."); /* will traverse into overmounted root */
-- sys_chroot(".");
-+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
-+ sys_chroot((char __force_user *)".");
- complete(&setup_done);
- while (1) {
- spin_lock(&req_lock);
-diff --git a/drivers/base/node.c b/drivers/base/node.c
-index 5693ece..e39a621 100644
---- a/drivers/base/node.c
-+++ b/drivers/base/node.c
-@@ -587,18 +587,16 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
- {
- int n;
-
-- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
-- if (n > 0 && PAGE_SIZE > n + 1) {
-- *(buf + n++) = '\n';
-- *(buf + n++) = '\0';
-- }
-+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
-+ buf[n++] = '\n';
-+ buf[n] = '\0';
- return n;
- }
-
- struct node_attr {
- struct sysdev_class_attribute attr;
- enum node_states state;
--};
-+} __do_const;
-
- static ssize_t show_node_state(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
-diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
-index adf41be0..b044daf 100644
---- a/drivers/base/power/sysfs.c
-+++ b/drivers/base/power/sysfs.c
-@@ -184,7 +184,7 @@ static ssize_t rtpm_status_show(struct device *dev,
- return -EIO;
- }
- }
-- return sprintf(buf, p);
-+ return sprintf(buf, "%s", p);
- }
-
- static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
-diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
-index caf995f..6f76697 100644
---- a/drivers/base/power/wakeup.c
-+++ b/drivers/base/power/wakeup.c
-@@ -30,14 +30,14 @@ bool events_check_enabled;
- * They need to be modified together atomically, so it's better to use one
- * atomic variable to hold them both.
- */
--static atomic_t combined_event_count = ATOMIC_INIT(0);
-+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
-
- #define IN_PROGRESS_BITS (sizeof(int) * 4)
- #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
-
- static void split_counters(unsigned int *cnt, unsigned int *inpr)
- {
-- unsigned int comb = atomic_read(&combined_event_count);
-+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
-
- *cnt = (comb >> IN_PROGRESS_BITS);
- *inpr = comb & MAX_IN_PROGRESS;
-@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
- ws->last_time = ktime_get();
-
- /* Increment the counter of events in progress. */
-- atomic_inc(&combined_event_count);
-+ atomic_inc_unchecked(&combined_event_count);
- }
-
- /**
-@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
- * Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
- */
-- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
-+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
- }
-
- /**
-diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
-index e8d11b6..7b1b36f 100644
---- a/drivers/base/syscore.c
-+++ b/drivers/base/syscore.c
-@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
- void register_syscore_ops(struct syscore_ops *ops)
- {
- mutex_lock(&syscore_ops_lock);
-- list_add_tail(&ops->node, &syscore_ops_list);
-+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
- }
- EXPORT_SYMBOL_GPL(register_syscore_ops);
-@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
- void unregister_syscore_ops(struct syscore_ops *ops)
- {
- mutex_lock(&syscore_ops_lock);
-- list_del(&ops->node);
-+ pax_list_del((struct list_head *)&ops->node);
- mutex_unlock(&syscore_ops_lock);
- }
- EXPORT_SYMBOL_GPL(unregister_syscore_ops);
-diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index d7ad865..61ddf2c 100644
---- a/drivers/block/cciss.c
-+++ b/drivers/block/cciss.c
-@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
- while (!list_empty(&h->reqQ)) {
- c = list_entry(h->reqQ.next, CommandList_struct, list);
- /* can't do anything if fifo is full */
-- if ((h->access.fifo_full(h))) {
-+ if ((h->access->fifo_full(h))) {
- dev_warn(&h->pdev->dev, "fifo full\n");
- break;
- }
-@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
- h->Qdepth--;
-
- /* Tell the controller execute command */
-- h->access.submit_command(h, c);
-+ h->access->submit_command(h, c);
-
- /* Put job onto the completed Q */
- addQ(&h->cmpQ, c);
-@@ -3444,17 +3444,17 @@ startio:
-
- static inline unsigned long get_next_completion(ctlr_info_t *h)
- {
-- return h->access.command_completed(h);
-+ return h->access->command_completed(h);
- }
-
- static inline int interrupt_pending(ctlr_info_t *h)
- {
-- return h->access.intr_pending(h);
-+ return h->access->intr_pending(h);
- }
-
- static inline long interrupt_not_for_us(ctlr_info_t *h)
- {
-- return ((h->access.intr_pending(h) == 0) ||
-+ return ((h->access->intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0));
- }
-
-@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
- u32 a;
-
- if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-- return h->access.command_completed(h);
-+ return h->access->command_completed(h);
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-@@ -4045,7 +4045,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
- trans_support & CFGTBL_Trans_use_short_tags);
-
- /* Change the access methods to the performant access methods */
-- h->access = SA5_performant_access;
-+ h->access = &SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
-
- return;
-@@ -4317,7 +4317,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
- if (prod_index < 0)
- return -ENODEV;
- h->product_name = products[prod_index].product_name;
-- h->access = *(products[prod_index].access);
-+ h->access = products[prod_index].access;
-
- if (cciss_board_disabled(h)) {
- dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-@@ -5042,7 +5042,7 @@ reinit_after_soft_reset:
- }
-
- /* make sure the board interrupts are off */
-- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
- rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
- if (rc)
- goto clean2;
-@@ -5094,7 +5094,7 @@ reinit_after_soft_reset:
- * fake ones to scoop up any residual completions.
- */
- spin_lock_irqsave(&h->lock, flags);
-- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
- spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[h->intr_mode], h);
- rc = cciss_request_irq(h, cciss_msix_discard_completions,
-@@ -5114,9 +5114,9 @@ reinit_after_soft_reset:
- dev_info(&h->pdev->dev, "Board READY.\n");
- dev_info(&h->pdev->dev,
- "Waiting for stale completions to drain.\n");
-- h->access.set_intr_mask(h, CCISS_INTR_ON);
-+ h->access->set_intr_mask(h, CCISS_INTR_ON);
- msleep(10000);
-- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
-
- rc = controller_reset_failed(h->cfgtable);
- if (rc)
-@@ -5139,7 +5139,7 @@ reinit_after_soft_reset:
- cciss_scsi_setup(h);
-
- /* Turn the interrupts on so we can service requests */
-- h->access.set_intr_mask(h, CCISS_INTR_ON);
-+ h->access->set_intr_mask(h, CCISS_INTR_ON);
-
- /* Get the firmware version */
- inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-@@ -5212,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
- kfree(flush_buf);
- if (return_code != IO_OK)
- dev_warn(&h->pdev->dev, "Error flushing cache\n");
-- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[h->intr_mode], h);
- }
-
-diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
-index 7fda30e..2f27946 100644
---- a/drivers/block/cciss.h
-+++ b/drivers/block/cciss.h
-@@ -101,7 +101,7 @@ struct ctlr_info
- /* information about each logical volume */
- drive_info_struct *drv[CISS_MAX_LUN];
-
-- struct access_method access;
-+ struct access_method *access;
-
- /* queue and queue Info */
- struct list_head reqQ;
-@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
- }
-
- static struct access_method SA5_access = {
-- SA5_submit_command,
-- SA5_intr_mask,
-- SA5_fifo_full,
-- SA5_intr_pending,
-- SA5_completed,
-+ .submit_command = SA5_submit_command,
-+ .set_intr_mask = SA5_intr_mask,
-+ .fifo_full = SA5_fifo_full,
-+ .intr_pending = SA5_intr_pending,
-+ .command_completed = SA5_completed,
- };
-
- static struct access_method SA5B_access = {
-- SA5_submit_command,
-- SA5B_intr_mask,
-- SA5_fifo_full,
-- SA5B_intr_pending,
-- SA5_completed,
-+ .submit_command = SA5_submit_command,
-+ .set_intr_mask = SA5B_intr_mask,
-+ .fifo_full = SA5_fifo_full,
-+ .intr_pending = SA5B_intr_pending,
-+ .command_completed = SA5_completed,
- };
-
- static struct access_method SA5_performant_access = {
-- SA5_submit_command,
-- SA5_performant_intr_mask,
-- SA5_fifo_full,
-- SA5_performant_intr_pending,
-- SA5_performant_completed,
-+ .submit_command = SA5_submit_command,
-+ .set_intr_mask = SA5_performant_intr_mask,
-+ .fifo_full = SA5_fifo_full,
-+ .intr_pending = SA5_performant_intr_pending,
-+ .command_completed = SA5_performant_completed,
- };
-
- struct board_type {
-diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
-index 504bc16..e13b631 100644
---- a/drivers/block/cpqarray.c
-+++ b/drivers/block/cpqarray.c
-@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
- if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
- goto Enomem4;
- }
-- hba[i]->access.set_intr_mask(hba[i], 0);
-+ hba[i]->access->set_intr_mask(hba[i], 0);
- if (request_irq(hba[i]->intr, do_ida_intr,
- IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
- {
-@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
- add_timer(&hba[i]->timer);
-
- /* Enable IRQ now that spinlock and rate limit timer are set up */
-- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
-+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
-
- for(j=0; j<NWD; j++) {
- struct gendisk *disk = ida_gendisk[i][j];
-@@ -694,7 +694,7 @@ DBGINFO(
- for(i=0; i<NR_PRODUCTS; i++) {
- if (board_id == products[i].board_id) {
- c->product_name = products[i].product_name;
-- c->access = *(products[i].access);
-+ c->access = products[i].access;
- break;
- }
- }
-@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
- hba[ctlr]->intr = intr;
- sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
- hba[ctlr]->product_name = products[j].product_name;
-- hba[ctlr]->access = *(products[j].access);
-+ hba[ctlr]->access = products[j].access;
- hba[ctlr]->ctlr = ctlr;
- hba[ctlr]->board_id = board_id;
- hba[ctlr]->pci_dev = NULL; /* not PCI */
-@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
-
- while((c = h->reqQ) != NULL) {
- /* Can't do anything if we're busy */
-- if (h->access.fifo_full(h) == 0)
-+ if (h->access->fifo_full(h) == 0)
- return;
-
- /* Get the first entry from the request Q */
-@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
- h->Qdepth--;
-
- /* Tell the controller to do our bidding */
-- h->access.submit_command(h, c);
-+ h->access->submit_command(h, c);
-
- /* Get onto the completion Q */
- addQ(&h->cmpQ, c);
-@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
- unsigned long flags;
- __u32 a,a1;
-
-- istat = h->access.intr_pending(h);
-+ istat = h->access->intr_pending(h);
- /* Is this interrupt for us? */
- if (istat == 0)
- return IRQ_NONE;
-@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
- */
- spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
- if (istat & FIFO_NOT_EMPTY) {
-- while((a = h->access.command_completed(h))) {
-+ while((a = h->access->command_completed(h))) {
- a1 = a; a &= ~3;
- if ((c = h->cmpQ) == NULL)
- {
-@@ -1450,11 +1450,11 @@ static int sendcmd(
- /*
- * Disable interrupt
- */
-- info_p->access.set_intr_mask(info_p, 0);
-+ info_p->access->set_intr_mask(info_p, 0);
- /* Make sure there is room in the command FIFO */
- /* Actually it should be completely empty at this time. */
- for (i = 200000; i > 0; i--) {
-- temp = info_p->access.fifo_full(info_p);
-+ temp = info_p->access->fifo_full(info_p);
- if (temp != 0) {
- break;
- }
-@@ -1467,7 +1467,7 @@ DBG(
- /*
- * Send the cmd
- */
-- info_p->access.submit_command(info_p, c);
-+ info_p->access->submit_command(info_p, c);
- complete = pollcomplete(ctlr);
-
- pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
-@@ -1550,9 +1550,9 @@ static int revalidate_allvol(ctlr_info_t *host)
- * we check the new geometry. Then turn interrupts back on when
- * we're done.
- */
-- host->access.set_intr_mask(host, 0);
-+ host->access->set_intr_mask(host, 0);
- getgeometry(ctlr);
-- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
-+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
-
- for(i=0; i<NWD; i++) {
- struct gendisk *disk = ida_gendisk[ctlr][i];
-@@ -1592,7 +1592,7 @@ static int pollcomplete(int ctlr)
- /* Wait (up to 2 seconds) for a command to complete */
-
- for (i = 200000; i > 0; i--) {
-- done = hba[ctlr]->access.command_completed(hba[ctlr]);
-+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
- if (done == 0) {
- udelay(10); /* a short fixed delay */
- } else
-diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
-index be73e9d..7fbf140 100644
---- a/drivers/block/cpqarray.h
-+++ b/drivers/block/cpqarray.h
-@@ -99,7 +99,7 @@ struct ctlr_info {
- drv_info_t drv[NWD];
- struct proc_dir_entry *proc;
-
-- struct access_method access;
-+ struct access_method *access;
-
- cmdlist_t *reqQ;
- cmdlist_t *cmpQ;
-diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
-index 912f585..610551e 100644
---- a/drivers/block/drbd/drbd_bitmap.c
-+++ b/drivers/block/drbd/drbd_bitmap.c
-@@ -992,7 +992,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
- submit_bio(rw, bio);
- /* this should not count as user activity and cause the
- * resync to throttle -- see drbd_rs_should_slow_down(). */
-- atomic_add(len >> 9, &mdev->rs_sect_ev);
-+ atomic_add_unchecked(len >> 9, &mdev->rs_sect_ev);
- }
- }
-
-diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index 9cf2035..c8cbfe1 100644
---- a/drivers/block/drbd/drbd_int.h
-+++ b/drivers/block/drbd/drbd_int.h
-@@ -736,7 +736,7 @@ struct drbd_request;
- struct drbd_epoch {
- struct list_head list;
- unsigned int barrier_nr;
-- atomic_t epoch_size; /* increased on every request added. */
-+ atomic_unchecked_t epoch_size; /* increased on every request added. */
- atomic_t active; /* increased on every req. added, and dec on every finished. */
- unsigned long flags;
- };
-@@ -1108,7 +1108,7 @@ struct drbd_conf {
- void *int_dig_in;
- void *int_dig_vv;
- wait_queue_head_t seq_wait;
-- atomic_t packet_seq;
-+ atomic_unchecked_t packet_seq;
- unsigned int peer_seq;
- spinlock_t peer_seq_lock;
- unsigned int minor;
-@@ -1118,8 +1118,8 @@ struct drbd_conf {
- u64 ed_uuid; /* UUID of the exposed data */
- struct mutex state_mutex;
- char congestion_reason; /* Why we where congested... */
-- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
-- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
-+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
-+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
- int rs_last_sect_ev; /* counter to compare with */
- int rs_last_events; /* counter of read or write "events" (unit sectors)
- * on the lower level device when we last looked. */
-@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
-
- static inline void drbd_tcp_cork(struct socket *sock)
- {
-- int __user val = 1;
-+ int val = 1;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
-- (char __user *)&val, sizeof(val));
-+ (char __force_user *)&val, sizeof(val));
- }
-
- static inline void drbd_tcp_uncork(struct socket *sock)
- {
-- int __user val = 0;
-+ int val = 0;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
-- (char __user *)&val, sizeof(val));
-+ (char __force_user *)&val, sizeof(val));
- }
-
- static inline void drbd_tcp_nodelay(struct socket *sock)
- {
-- int __user val = 1;
-+ int val = 1;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
-- (char __user *)&val, sizeof(val));
-+ (char __force_user *)&val, sizeof(val));
- }
-
- static inline void drbd_tcp_quickack(struct socket *sock)
- {
-- int __user val = 2;
-+ int val = 2;
- (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
-- (char __user *)&val, sizeof(val));
-+ (char __force_user *)&val, sizeof(val));
- }
-
- void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
-diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 0358e55..4e25ed1 100644
---- a/drivers/block/drbd/drbd_main.c
-+++ b/drivers/block/drbd/drbd_main.c
-@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
- p.sector = sector;
- p.block_id = block_id;
- p.blksize = blksize;
-- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
-+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
-
- if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
- return false;
-@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
- p.sector = cpu_to_be64(req->sector);
- p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(req->seq_num =
-- atomic_add_return(1, &mdev->packet_seq));
-+ atomic_add_return_unchecked(1, &mdev->packet_seq));
-
- dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
-
-@@ -2981,11 +2981,11 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
- atomic_set(&mdev->unacked_cnt, 0);
- atomic_set(&mdev->local_cnt, 0);
- atomic_set(&mdev->net_cnt, 0);
-- atomic_set(&mdev->packet_seq, 0);
-+ atomic_set_unchecked(&mdev->packet_seq, 0);
- atomic_set(&mdev->pp_in_use, 0);
- atomic_set(&mdev->pp_in_use_by_net, 0);
-- atomic_set(&mdev->rs_sect_in, 0);
-- atomic_set(&mdev->rs_sect_ev, 0);
-+ atomic_set_unchecked(&mdev->rs_sect_in, 0);
-+ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
- atomic_set(&mdev->ap_in_flight, 0);
-
- mutex_init(&mdev->md_io_mutex);
-@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
- mdev->receiver.t_state);
-
- /* no need to lock it, I'm the only thread alive */
-- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
-- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
-+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
-+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
- mdev->al_writ_cnt =
- mdev->bm_writ_cnt =
- mdev->read_cnt =
-diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
-index af2a250..0fdeb75 100644
---- a/drivers/block/drbd/drbd_nl.c
-+++ b/drivers/block/drbd/drbd_nl.c
-@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
- return;
- }
-
-- if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
-+ if (!capable(CAP_SYS_ADMIN)) {
- retcode = ERR_PERM;
- goto fail;
- }
-@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
- module_put(THIS_MODULE);
- }
-
--static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-
- static unsigned short *
- __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
-@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
-- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
-@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
-- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
-@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
-- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
-+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
- cn_reply->ack = 0; // not used here.
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char*)tl - (char*)reply->tag_list);
-@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
- cn_reply->id.idx = CN_IDX_DRBD;
- cn_reply->id.val = CN_VAL_DRBD;
-
-- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
-+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
- cn_reply->ack = 0; /* not used here. */
- cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
- (int)((char *)tl - (char *)reply->tag_list);
-diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
-index 13cbdd3..9c663ab 100644
---- a/drivers/block/drbd/drbd_receiver.c
-+++ b/drivers/block/drbd/drbd_receiver.c
-@@ -894,7 +894,7 @@ retry:
- sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
-
-- atomic_set(&mdev->packet_seq, 0);
-+ atomic_set_unchecked(&mdev->packet_seq, 0);
- mdev->peer_seq = 0;
-
- drbd_thread_start(&mdev->asender);
-@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
- do {
- next_epoch = NULL;
-
-- epoch_size = atomic_read(&epoch->epoch_size);
-+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
-
- switch (ev & ~EV_CLEANUP) {
- case EV_PUT:
-@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
- rv = FE_DESTROYED;
- } else {
- epoch->flags = 0;
-- atomic_set(&epoch->epoch_size, 0);
-+ atomic_set_unchecked(&epoch->epoch_size, 0);
- /* atomic_set(&epoch->active, 0); is already zero */
- if (rv == FE_STILL_LIVE)
- rv = FE_RECYCLED;
-@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- drbd_flush(mdev);
-
-- if (atomic_read(&mdev->current_epoch->epoch_size)) {
-+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
- epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
- if (epoch)
- break;
- }
-
- epoch = mdev->current_epoch;
-- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
-
- D_ASSERT(atomic_read(&epoch->active) == 0);
- D_ASSERT(epoch->flags == 0);
-@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
- }
-
- epoch->flags = 0;
-- atomic_set(&epoch->epoch_size, 0);
-+ atomic_set_unchecked(&epoch->epoch_size, 0);
- atomic_set(&epoch->active, 0);
-
- spin_lock(&mdev->epoch_lock);
-- if (atomic_read(&mdev->current_epoch->epoch_size)) {
-+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
- list_add(&epoch->list, &mdev->current_epoch->list);
- mdev->current_epoch = epoch;
- mdev->epochs++;
-@@ -1449,7 +1449,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
- list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
-
-- atomic_add(data_size >> 9, &mdev->rs_sect_ev);
-+ atomic_add_unchecked(data_size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
- return true;
-
-@@ -1519,7 +1519,7 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
- }
-
-- atomic_add(data_size >> 9, &mdev->rs_sect_in);
-+ atomic_add_unchecked(data_size >> 9, &mdev->rs_sect_in);
-
- return ok;
- }
-@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
- spin_unlock(&mdev->peer_seq_lock);
-
- drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
-- atomic_inc(&mdev->current_epoch->epoch_size);
-+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
- return drbd_drain_block(mdev, data_size);
- }
-
-@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
-
- spin_lock(&mdev->epoch_lock);
- e->epoch = mdev->current_epoch;
-- atomic_inc(&e->epoch->epoch_size);
-+ atomic_inc_unchecked(&e->epoch->epoch_size);
- atomic_inc(&e->epoch->active);
- spin_unlock(&mdev->epoch_lock);
-
-@@ -1906,7 +1906,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
-
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
-- atomic_read(&mdev->rs_sect_ev);
-+ atomic_read_unchecked(&mdev->rs_sect_ev);
-
- if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
- unsigned long rs_left;
-@@ -2034,7 +2034,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
- mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
- } else if (cmd == P_OV_REPLY) {
- /* track progress, we may need to throttle */
-- atomic_add(size >> 9, &mdev->rs_sect_in);
-+ atomic_add_unchecked(size >> 9, &mdev->rs_sect_in);
- e->w.cb = w_e_end_ov_reply;
- dec_rs_pending(mdev);
- /* drbd_rs_begin_io done when we sent this request,
-@@ -2098,7 +2098,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
- goto out_free_e;
-
- submit_for_resync:
-- atomic_add(size >> 9, &mdev->rs_sect_ev);
-+ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
-
- submit:
- inc_unacked(mdev);
-@@ -3637,7 +3637,7 @@ struct data_cmd {
- int expect_payload;
- size_t pkt_size;
- drbd_cmd_handler_f function;
--};
-+} __do_const;
-
- static struct data_cmd drbd_cmd_handler[] = {
- [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
-@@ -3884,7 +3884,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
- D_ASSERT(list_empty(&mdev->done_ee));
-
- /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
-- atomic_set(&mdev->current_epoch->epoch_size, 0);
-+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
- D_ASSERT(list_empty(&mdev->current_epoch->list));
- }
-
-@@ -4240,7 +4240,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
- put_ldev(mdev);
- }
- dec_rs_pending(mdev);
-- atomic_add(blksize >> 9, &mdev->rs_sect_in);
-+ atomic_add_unchecked(blksize >> 9, &mdev->rs_sect_in);
-
- return true;
- }
-@@ -4492,7 +4492,7 @@ static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
- struct asender_cmd {
- size_t pkt_size;
- int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
--};
-+} __do_const;
-
- static struct asender_cmd *get_asender_cmd(int cmd)
- {
-diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
-index 4d3e6f6..5555fc4 100644
---- a/drivers/block/drbd/drbd_worker.c
-+++ b/drivers/block/drbd/drbd_worker.c
-@@ -368,7 +368,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
- list_add(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
-
-- atomic_add(size >> 9, &mdev->rs_sect_ev);
-+ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
- if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
- return 0;
-
-@@ -448,7 +448,7 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
- int curr_corr;
- int max_sect;
-
-- sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
-+ sect_in = atomic_xchg_unchecked(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
- mdev->rs_in_flight -= sect_in;
-
- spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
-@@ -1455,8 +1455,8 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
-
- void drbd_rs_controller_reset(struct drbd_conf *mdev)
- {
-- atomic_set(&mdev->rs_sect_in, 0);
-- atomic_set(&mdev->rs_sect_ev, 0);
-+ atomic_set_unchecked(&mdev->rs_sect_in, 0);
-+ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
- mdev->rs_in_flight = 0;
- mdev->rs_planed = 0;
- spin_lock(&mdev->peer_seq_lock);
-diff --git a/drivers/block/loop.c b/drivers/block/loop.c
-index d659135..45fe633 100644
---- a/drivers/block/loop.c
-+++ b/drivers/block/loop.c
-@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
- mm_segment_t old_fs = get_fs();
-
- set_fs(get_ds());
-- bw = file->f_op->write(file, buf, len, &pos);
-+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
- set_fs(old_fs);
- if (likely(bw == len))
- return 0;
-diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
-index a63b0a2..30228d1 100644
---- a/drivers/block/pktcdvd.c
-+++ b/drivers/block/pktcdvd.c
-@@ -83,7 +83,7 @@
-
- #define MAX_SPEED 0xffff
-
--#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
-+#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1UL))
-
- static DEFINE_MUTEX(pktcdvd_mutex);
- static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
-index e5565fb..71be10b4 100644
---- a/drivers/block/smart1,2.h
-+++ b/drivers/block/smart1,2.h
-@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
- }
-
- static struct access_method smart4_access = {
-- smart4_submit_command,
-- smart4_intr_mask,
-- smart4_fifo_full,
-- smart4_intr_pending,
-- smart4_completed,
-+ .submit_command = smart4_submit_command,
-+ .set_intr_mask = smart4_intr_mask,
-+ .fifo_full = smart4_fifo_full,
-+ .intr_pending = smart4_intr_pending,
-+ .command_completed = smart4_completed,
- };
-
- /*
-@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
- }
-
- static struct access_method smart2_access = {
-- smart2_submit_command,
-- smart2_intr_mask,
-- smart2_fifo_full,
-- smart2_intr_pending,
-- smart2_completed,
-+ .submit_command = smart2_submit_command,
-+ .set_intr_mask = smart2_intr_mask,
-+ .fifo_full = smart2_fifo_full,
-+ .intr_pending = smart2_intr_pending,
-+ .command_completed = smart2_completed,
- };
-
- /*
-@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
- }
-
- static struct access_method smart2e_access = {
-- smart2e_submit_command,
-- smart2e_intr_mask,
-- smart2e_fifo_full,
-- smart2e_intr_pending,
-- smart2e_completed,
-+ .submit_command = smart2e_submit_command,
-+ .set_intr_mask = smart2e_intr_mask,
-+ .fifo_full = smart2e_fifo_full,
-+ .intr_pending = smart2e_intr_pending,
-+ .command_completed = smart2e_completed,
- };
-
- /*
-@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
- }
-
- static struct access_method smart1_access = {
-- smart1_submit_command,
-- smart1_intr_mask,
-- smart1_fifo_full,
-- smart1_intr_pending,
-- smart1_completed,
-+ .submit_command = smart1_submit_command,
-+ .set_intr_mask = smart1_intr_mask,
-+ .fifo_full = smart1_fifo_full,
-+ .intr_pending = smart1_intr_pending,
-+ .command_completed = smart1_completed,
- };
-diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
-index b5f83b4..2f49d18 100644
---- a/drivers/bluetooth/btwilink.c
-+++ b/drivers/bluetooth/btwilink.c
-@@ -301,7 +301,7 @@ static void ti_st_destruct(struct hci_dev *hdev)
-
- static int bt_ti_probe(struct platform_device *pdev)
- {
-- static struct ti_st *hst;
-+ struct ti_st *hst;
- struct hci_dev *hdev;
- int err;
-
-diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
-index 1331740..a691234 100644
---- a/drivers/cdrom/cdrom.c
-+++ b/drivers/cdrom/cdrom.c
-@@ -419,7 +419,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
- ENSURE(reset, CDC_RESET);
- ENSURE(generic_packet, CDC_GENERIC_PACKET);
- cdi->mc_flags = 0;
-- cdo->n_minors = 0;
- cdi->options = CDO_USE_FFLAGS;
-
- if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
-@@ -439,8 +438,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
- else
- cdi->cdda_method = CDDA_OLD;
-
-- if (!cdo->generic_packet)
-- cdo->generic_packet = cdrom_dummy_generic_packet;
-+ if (!cdo->generic_packet) {
-+ pax_open_kernel();
-+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
-+ pax_close_kernel();
-+ }
-
- cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
- mutex_lock(&cdrom_mutex);
-@@ -461,7 +463,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
- if (cdi->exit)
- cdi->exit(cdi);
-
-- cdi->ops->n_minors--;
- cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
- }
-
-@@ -2110,7 +2111,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
- */
- nr = nframes;
- do {
-- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
-+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
- if (cgc.buffer)
- break;
-
-@@ -3432,7 +3433,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
- struct cdrom_device_info *cdi;
- int ret;
-
-- ret = scnprintf(info + *pos, max_size - *pos, header);
-+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
- if (!ret)
- return 1;
-
-diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
-index 3ceaf00..e3c3d38 100644
---- a/drivers/cdrom/gdrom.c
-+++ b/drivers/cdrom/gdrom.c
-@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
- .audio_ioctl = gdrom_audio_ioctl,
- .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
- CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
-- .n_minors = 1,
- };
-
- static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
-diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
-index 4364303..9adf4ee 100644
---- a/drivers/char/Kconfig
-+++ b/drivers/char/Kconfig
-@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
-
- config DEVKMEM
- bool "/dev/kmem virtual device support"
-- default y
-+ default n
-+ depends on !GRKERNSEC_KMEM
- help
- Say Y here if you want to support the /dev/kmem device. The
- /dev/kmem device is rarely used, but can be used for certain
-@@ -596,6 +597,7 @@ config DEVPORT
- bool
- depends on !M68K
- depends on ISA || PCI
-+ depends on !GRKERNSEC_KMEM
- default y
-
- source "drivers/s390/char/Kconfig"
-diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
-index a48e05b..6bac831 100644
---- a/drivers/char/agp/compat_ioctl.c
-+++ b/drivers/char/agp/compat_ioctl.c
-@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
- return -ENOMEM;
- }
-
-- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
-+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
- sizeof(*usegment) * ureserve.seg_count)) {
- kfree(usegment);
- kfree(ksegment);
-diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
-index 2e04433..3b8afe7 100644
---- a/drivers/char/agp/frontend.c
-+++ b/drivers/char/agp/frontend.c
-@@ -729,6 +729,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-
- agp_copy_info(agp_bridge, &kerninfo);
-
-+ memset(&userinfo, 0, sizeof(userinfo));
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
-@@ -817,7 +818,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
- if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
- return -EFAULT;
-
-- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
-+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
- return -EFAULT;
-
- client = agp_find_client_by_pid(reserve.pid);
-@@ -847,7 +848,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
- if (segment == NULL)
- return -ENOMEM;
-
-- if (copy_from_user(segment, (void __user *) reserve.seg_list,
-+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
- sizeof(struct agp_segment) * reserve.seg_count)) {
- kfree(segment);
- return -EFAULT;
-diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
-index 095ab90..afad0a4 100644
---- a/drivers/char/briq_panel.c
-+++ b/drivers/char/briq_panel.c
-@@ -9,6 +9,7 @@
- #include <linux/types.h>
- #include <linux/errno.h>
- #include <linux/tty.h>
-+#include <linux/mutex.h>
- #include <linux/timer.h>
- #include <linux/kernel.h>
- #include <linux/wait.h>
-@@ -34,6 +35,7 @@ static int vfd_is_open;
- static unsigned char vfd[40];
- static int vfd_cursor;
- static unsigned char ledpb, led;
-+static DEFINE_MUTEX(vfd_mutex);
-
- static void update_vfd(void)
- {
-@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
- if (!vfd_is_open)
- return -EBUSY;
-
-+ mutex_lock(&vfd_mutex);
- for (;;) {
- char c;
- if (!indx)
- break;
-- if (get_user(c, buf))
-+ if (get_user(c, buf)) {
-+ mutex_unlock(&vfd_mutex);
- return -EFAULT;
-+ }
- if (esc) {
- set_led(c);
- esc = 0;
-@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
- buf++;
- }
- update_vfd();
-+ mutex_unlock(&vfd_mutex);
-
- return len;
- }
-diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
-index f773a9d..65cd683 100644
---- a/drivers/char/genrtc.c
-+++ b/drivers/char/genrtc.c
-@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
- switch (cmd) {
-
- case RTC_PLL_GET:
-+ memset(&pll, 0, sizeof(pll));
- if (get_rtc_pll(&pll))
- return -EINVAL;
- else
-diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
-index 14d49e4..d331fd8 100644
---- a/drivers/char/hpet.c
-+++ b/drivers/char/hpet.c
-@@ -560,7 +560,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
- }
-
- static int
--hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
-+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
- struct hpet_info *info)
- {
- struct hpet_timer __iomem *timer;
-diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
-index 86fe45c..c0ea948 100644
---- a/drivers/char/hw_random/intel-rng.c
-+++ b/drivers/char/hw_random/intel-rng.c
-@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
-
- if (no_fwh_detect)
- return -ENODEV;
-- printk(warning);
-+ printk("%s", warning);
- return -EBUSY;
- }
-
-diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
-index 51efcbc..980f934 100644
---- a/drivers/char/i8k.c
-+++ b/drivers/char/i8k.c
-@@ -607,7 +607,7 @@ static void __exit i8k_exit_hwmon(void)
- hwmon_device_unregister(i8k_hwmon_dev);
- }
-
--static struct dmi_system_id __initdata i8k_dmi_table[] = {
-+static const struct dmi_system_id __initconst i8k_dmi_table[] = {
- {
- .ident = "Dell Inspiron",
- .matches = {
-diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
-index 58c0e63..46c16bf 100644
---- a/drivers/char/ipmi/ipmi_msghandler.c
-+++ b/drivers/char/ipmi/ipmi_msghandler.c
-@@ -415,7 +415,7 @@ struct ipmi_smi {
- struct proc_dir_entry *proc_dir;
- char proc_dir_name[10];
-
-- atomic_t stats[IPMI_NUM_STATS];
-+ atomic_unchecked_t stats[IPMI_NUM_STATS];
-
- /*
- * run_to_completion duplicate of smb_info, smi_info
-@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
-
-
- #define ipmi_inc_stat(intf, stat) \
-- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
-+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
- #define ipmi_get_stat(intf, stat) \
-- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
-+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
-
- static int is_lan_addr(struct ipmi_addr *addr)
- {
-@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
- INIT_LIST_HEAD(&intf->cmd_rcvrs);
- init_waitqueue_head(&intf->waitq);
- for (i = 0; i < IPMI_NUM_STATS; i++)
-- atomic_set(&intf->stats[i], 0);
-+ atomic_set_unchecked(&intf->stats[i], 0);
-
- intf->proc_dir = NULL;
-
-diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
-index 9397ab4..d01bee1 100644
---- a/drivers/char/ipmi/ipmi_si_intf.c
-+++ b/drivers/char/ipmi/ipmi_si_intf.c
-@@ -277,7 +277,7 @@ struct smi_info {
- unsigned char slave_addr;
-
- /* Counters and things for the proc filesystem. */
-- atomic_t stats[SI_NUM_STATS];
-+ atomic_unchecked_t stats[SI_NUM_STATS];
-
- struct task_struct *thread;
-
-@@ -286,9 +286,9 @@ struct smi_info {
- };
-
- #define smi_inc_stat(smi, stat) \
-- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
-+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
- #define smi_get_stat(smi, stat) \
-- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
-+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
-
- #define SI_MAX_PARMS 4
-
-@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
- atomic_set(&new_smi->req_events, 0);
- new_smi->run_to_completion = 0;
- for (i = 0; i < SI_NUM_STATS; i++)
-- atomic_set(&new_smi->stats[i], 0);
-+ atomic_set_unchecked(&new_smi->stats[i], 0);
-
- new_smi->interrupt_disabled = 1;
- atomic_set(&new_smi->stop_operation, 0);
-diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
-index 1aeaaba..e018570 100644
---- a/drivers/char/mbcs.c
-+++ b/drivers/char/mbcs.c
-@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
- return 0;
- }
-
--static const struct cx_device_id __devinitdata mbcs_id_table[] = {
-+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
- {
- .part_num = MBCS_PART_NUM,
- .mfg_num = MBCS_MFG_NUM,
-diff --git a/drivers/char/mem.c b/drivers/char/mem.c
-index 1451790..a57c233 100644
---- a/drivers/char/mem.c
-+++ b/drivers/char/mem.c
-@@ -18,6 +18,7 @@
- #include <linux/raw.h>
- #include <linux/tty.h>
- #include <linux/capability.h>
-+#include <linux/security.h>
- #include <linux/ptrace.h>
- #include <linux/device.h>
- #include <linux/highmem.h>
-@@ -35,6 +36,10 @@
- # include <linux/efi.h>
- #endif
-
-+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
-+extern const struct file_operations grsec_fops;
-+#endif
-+
- static inline unsigned long size_inside_page(unsigned long start,
- unsigned long size)
- {
-@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn)) {
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ gr_handle_mem_readwrite(from, to);
-+#else
- printk(KERN_INFO
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
- current->comm, from, to);
-+#endif
- return 0;
- }
- cursor += PAGE_SIZE;
-@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
- }
- return 1;
- }
-+#elif defined(CONFIG_GRKERNSEC_KMEM)
-+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-+{
-+ return 0;
-+}
- #else
- static inline int range_is_allowed(unsigned long pfn, unsigned long size)
- {
-@@ -117,7 +131,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
- #endif
-
- while (count > 0) {
-- unsigned long remaining;
-+ unsigned long remaining = 0;
-+ char *temp;
-
- sz = size_inside_page(p, count);
-
-@@ -133,7 +148,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
- if (!ptr)
- return -EFAULT;
-
-- remaining = copy_to_user(buf, ptr, sz);
-+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
-+ if (!temp) {
-+ unxlate_dev_mem_ptr(p, ptr);
-+ return -ENOMEM;
-+ }
-+ remaining = probe_kernel_read(temp, ptr, sz);
-+#else
-+ temp = ptr;
-+#endif
-+
-+ if (!remaining)
-+ remaining = copy_to_user(buf, temp, sz);
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+ kfree(temp);
-+#endif
-+
- unxlate_dev_mem_ptr(p, ptr);
- if (remaining)
- return -EFAULT;
-@@ -376,7 +408,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
- else
- csize = count;
-
-- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
-+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
- if (rc < 0)
- return rc;
- buf += csize;
-@@ -396,9 +428,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
- {
- unsigned long p = *ppos;
-- ssize_t low_count, read, sz;
-+ ssize_t low_count, read, sz, err = 0;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-- int err = 0;
-
- read = 0;
- if (p < (unsigned long) high_memory) {
-@@ -420,6 +451,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
- }
- #endif
- while (low_count > 0) {
-+ char *temp;
-+
- sz = size_inside_page(p, low_count);
-
- /*
-@@ -429,7 +462,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
- */
- kbuf = xlate_dev_kmem_ptr((char *)p);
-
-- if (copy_to_user(buf, kbuf, sz))
-+#ifdef CONFIG_PAX_USERCOPY
-+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
-+ if (!temp)
-+ return -ENOMEM;
-+ err = probe_kernel_read(temp, kbuf, sz);
-+#else
-+ temp = kbuf;
-+#endif
-+
-+ if (!err)
-+ err = copy_to_user(buf, temp, sz);
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+ kfree(temp);
-+#endif
-+
-+ if (err)
- return -EFAULT;
- buf += sz;
- p += sz;
-@@ -815,6 +864,11 @@ static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
- ssize_t ret = -EFAULT;
- size_t len = iov_length(iv, count);
-
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+ if (!capable(CAP_SYSLOG))
-+ return -EPERM;
-+#endif
-+
- line = kmalloc(len + 1, GFP_KERNEL);
- if (line == NULL)
- return -ENOMEM;
-@@ -867,6 +921,9 @@ static const struct memdev {
- #ifdef CONFIG_CRASH_DUMP
- [12] = { "oldmem", 0, &oldmem_fops, NULL },
- #endif
-+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
-+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
-+#endif
- };
-
- static int memory_open(struct inode *inode, struct file *filp)
-@@ -931,7 +988,7 @@ static int __init chr_dev_init(void)
- if (!devlist[minor].name)
- continue;
- device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
-- NULL, devlist[minor].name);
-+ NULL, "%s", devlist[minor].name);
- }
-
- return tty_init();
-diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
-index c689697..04e6d6a2 100644
---- a/drivers/char/mwave/tp3780i.c
-+++ b/drivers/char/mwave/tp3780i.c
-@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
-
-+ memset(pAbilities, 0, sizeof(*pAbilities));
- /* fill out standard constant fields */
- pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
- pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
-diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
-index da3cfee..a5a6606 100644
---- a/drivers/char/nvram.c
-+++ b/drivers/char/nvram.c
-@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
-
- spin_unlock_irq(&rtc_lock);
-
-- if (copy_to_user(buf, contents, tmp - contents))
-+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
- return -EFAULT;
-
- *ppos = i;
-diff --git a/drivers/char/random.c b/drivers/char/random.c
-index edf45ae..2b94e16 100644
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -255,10 +255,8 @@
- #include <linux/fips.h>
- #include <linux/ptrace.h>
- #include <linux/kmemcheck.h>
--
--#ifdef CONFIG_GENERIC_HARDIRQS
--# include <linux/irq.h>
--#endif
-+#include <linux/workqueue.h>
-+#include <linux/irq.h>
-
- #include <asm/processor.h>
- #include <asm/uaccess.h>
-@@ -266,129 +264,135 @@
- #include <asm/irq_regs.h>
- #include <asm/io.h>
-
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/random.h>
-+
- /*
- * Configuration information
- */
--#define INPUT_POOL_WORDS 128
--#define OUTPUT_POOL_WORDS 32
--#define SEC_XFER_SIZE 512
--#define EXTRACT_SIZE 10
-+#define INPUT_POOL_SHIFT 12
-+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-+#define OUTPUT_POOL_SHIFT 10
-+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-+#define SEC_XFER_SIZE 512
-+#define EXTRACT_SIZE 10
-+
-+#define DEBUG_RANDOM_BOOT 0
-
- #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
-
- /*
-+ * To allow fractional bits to be tracked, the entropy_count field is
-+ * denominated in units of 1/8th bits.
-+ */
-+#define ENTROPY_SHIFT 3
-+#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-+
-+/*
- * The minimum number of bits of entropy before we wake up a read on
- * /dev/random. Should be enough to do a significant reseed.
- */
--static int random_read_wakeup_thresh = 64;
-+static int random_read_wakeup_bits = 64;
-
- /*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
--static int random_write_wakeup_thresh = 128;
-+static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
-
- /*
-- * When the input pool goes over trickle_thresh, start dropping most
-- * samples to avoid wasting CPU time and reduce lock contention.
-+ * The minimum number of seconds between urandom pool reseeding. We
-+ * do this to limit the amount of entropy that can be drained from the
-+ * input pool even if there are heavy demands on /dev/urandom.
- */
--
--static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
--
--static DEFINE_PER_CPU(int, trickle_count);
-+static int random_min_urandom_seed = 60;
-
- /*
-- * A pool of size .poolwords is stirred with a primitive polynomial
-- * of degree .poolwords over GF(2). The taps for various sizes are
-- * defined below. They are chosen to be evenly spaced (minimum RMS
-- * distance from evenly spaced; the numbers in the comments are a
-- * scaled squared error sum) except for the last tap, which is 1 to
-- * get the twisting happening as fast as possible.
-+ * Originally, we used a primitive polynomial of degree .poolwords
-+ * over GF(2). The taps for various sizes are defined below. They
-+ * were chosen to be evenly spaced except for the last tap, which is 1
-+ * to get the twisting happening as fast as possible.
-+ *
-+ * For the purposes of better mixing, we use the CRC-32 polynomial as
-+ * well to make a (modified) twisted Generalized Feedback Shift
-+ * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
-+ * generators. ACM Transactions on Modeling and Computer Simulation
-+ * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
-+ * GFSR generators II. ACM Transactions on Modeling and Computer
-+ * Simulation 4:254-266)
-+ *
-+ * Thanks to Colin Plumb for suggesting this.
-+ *
-+ * The mixing operation is much less sensitive than the output hash,
-+ * where we use SHA-1. All that we want of mixing operation is that
-+ * it be a good non-cryptographic hash; i.e. it not produce collisions
-+ * when fed "random" data of the sort we expect to see. As long as
-+ * the pool state differs for different inputs, we have preserved the
-+ * input entropy and done a good job. The fact that an intelligent
-+ * attacker can construct inputs that will produce controlled
-+ * alterations to the pool's state is not important because we don't
-+ * consider such inputs to contribute any randomness. The only
-+ * property we need with respect to them is that the attacker can't
-+ * increase his/her knowledge of the pool's state. Since all
-+ * additions are reversible (knowing the final state and the input,
-+ * you can reconstruct the initial state), if an attacker has any
-+ * uncertainty about the initial state, he/she can only shuffle that
-+ * uncertainty about, but never cause any collisions (which would
-+ * decrease the uncertainty).
-+ *
-+ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
-+ * Videau in their paper, "The Linux Pseudorandom Number Generator
-+ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
-+ * paper, they point out that we are not using a true Twisted GFSR,
-+ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
-+ * is, with only three taps, instead of the six that we are using).
-+ * As a result, the resulting polynomial is neither primitive nor
-+ * irreducible, and hence does not have a maximal period over
-+ * GF(2**32). They suggest a slight change to the generator
-+ * polynomial which improves the resulting TGFSR polynomial to be
-+ * irreducible, which we have made here.
- */
- static struct poolinfo {
-- int poolwords;
-+ int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
-+#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
- } poolinfo_table[] = {
-- /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
-- { 128, 103, 76, 51, 25, 1 },
-- /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
-- { 32, 26, 20, 14, 7, 1 },
-+ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
-+ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
-+ { S(128), 104, 76, 51, 25, 1 },
-+ /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
-+ /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
-+ { S(32), 26, 19, 14, 7, 1 },
- #if 0
- /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
-- { 2048, 1638, 1231, 819, 411, 1 },
-+ { S(2048), 1638, 1231, 819, 411, 1 },
-
- /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
-- { 1024, 817, 615, 412, 204, 1 },
-+ { S(1024), 817, 615, 412, 204, 1 },
-
- /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
-- { 1024, 819, 616, 410, 207, 2 },
-+ { S(1024), 819, 616, 410, 207, 2 },
-
- /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
-- { 512, 411, 308, 208, 104, 1 },
-+ { S(512), 411, 308, 208, 104, 1 },
-
- /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
-- { 512, 409, 307, 206, 102, 2 },
-+ { S(512), 409, 307, 206, 102, 2 },
- /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
-- { 512, 409, 309, 205, 103, 2 },
-+ { S(512), 409, 309, 205, 103, 2 },
-
- /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
-- { 256, 205, 155, 101, 52, 1 },
-+ { S(256), 205, 155, 101, 52, 1 },
-
- /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
-- { 128, 103, 78, 51, 27, 2 },
-+ { S(128), 103, 78, 51, 27, 2 },
-
- /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
-- { 64, 52, 39, 26, 14, 1 },
-+ { S(64), 52, 39, 26, 14, 1 },
- #endif
- };
-
--#define POOLBITS poolwords*32
--#define POOLBYTES poolwords*4
--
--/*
-- * For the purposes of better mixing, we use the CRC-32 polynomial as
-- * well to make a twisted Generalized Feedback Shift Reigster
-- *
-- * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
-- * Transactions on Modeling and Computer Simulation 2(3):179-194.
-- * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
-- * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
-- *
-- * Thanks to Colin Plumb for suggesting this.
-- *
-- * We have not analyzed the resultant polynomial to prove it primitive;
-- * in fact it almost certainly isn't. Nonetheless, the irreducible factors
-- * of a random large-degree polynomial over GF(2) are more than large enough
-- * that periodicity is not a concern.
-- *
-- * The input hash is much less sensitive than the output hash. All
-- * that we want of it is that it be a good non-cryptographic hash;
-- * i.e. it not produce collisions when fed "random" data of the sort
-- * we expect to see. As long as the pool state differs for different
-- * inputs, we have preserved the input entropy and done a good job.
-- * The fact that an intelligent attacker can construct inputs that
-- * will produce controlled alterations to the pool's state is not
-- * important because we don't consider such inputs to contribute any
-- * randomness. The only property we need with respect to them is that
-- * the attacker can't increase his/her knowledge of the pool's state.
-- * Since all additions are reversible (knowing the final state and the
-- * input, you can reconstruct the initial state), if an attacker has
-- * any uncertainty about the initial state, he/she can only shuffle
-- * that uncertainty about, but never cause any collisions (which would
-- * decrease the uncertainty).
-- *
-- * The chosen system lets the state of the pool be (essentially) the input
-- * modulo the generator polymnomial. Now, for random primitive polynomials,
-- * this is a universal class of hash functions, meaning that the chance
-- * of a collision is limited by the attacker's knowledge of the generator
-- * polynomail, so if it is chosen at random, an attacker can never force
-- * a collision. Here, we use a fixed polynomial, but we *can* assume that
-- * ###--> it is unknown to the processes generating the input entropy. <-###
-- * Because of this important property, this is a good, collision-resistant
-- * hash; hash collisions will occur no more often than chance.
-- */
--
- /*
- * Static global variables
- */
-@@ -396,21 +400,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
- static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
- static struct fasync_struct *fasync;
-
--#if 0
--static int debug;
--module_param(debug, bool, 0644);
--#define DEBUG_ENT(fmt, arg...) do { \
-- if (debug) \
-- printk(KERN_DEBUG "random %04d %04d %04d: " \
-- fmt,\
-- input_pool.entropy_count,\
-- blocking_pool.entropy_count,\
-- nonblocking_pool.entropy_count,\
-- ## arg); } while (0)
--#else
--#define DEBUG_ENT(fmt, arg...) do {} while (0)
--#endif
--
- /**********************************************************************
- *
- * OS independent entropy store. Here are the functions which handle
-@@ -421,31 +410,35 @@ module_param(debug, bool, 0644);
- struct entropy_store;
- struct entropy_store {
- /* read-only data: */
-- struct poolinfo *poolinfo;
-+ const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
- struct entropy_store *pull;
-- int limit;
-+ struct work_struct push_work;
-
- /* read-write data: */
-+ unsigned long last_pulled;
- spinlock_t lock;
-- unsigned add_ptr;
-- unsigned input_rotate;
-+ unsigned short add_ptr;
-+ unsigned short input_rotate;
- int entropy_count;
- int entropy_total;
- unsigned int initialized:1;
-+ unsigned int limit:1;
-+ unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
- };
-
--static __u32 input_pool_data[INPUT_POOL_WORDS];
--static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
--static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
-+static void push_to_pool(struct work_struct *work);
-+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
-+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
-+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
-
- static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .limit = 1,
-- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
- };
-
-@@ -454,16 +447,20 @@ static struct entropy_store blocking_pool = {
- .name = "blocking",
- .limit = 1,
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
-- .pool = blocking_pool_data
-+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
-+ .pool = blocking_pool_data,
-+ .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
-+ push_to_pool),
- };
-
- static struct entropy_store nonblocking_pool = {
- .poolinfo = &poolinfo_table[1],
- .name = "nonblocking",
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
-- .pool = nonblocking_pool_data
-+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
-+ .pool = nonblocking_pool_data,
-+ .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
-+ push_to_pool),
- };
-
- static __u32 const twist_table[8] = {
-@@ -480,8 +477,8 @@ static __u32 const twist_table[8] = {
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
- */
--static void __mix_pool_bytes(struct entropy_store *r, const void *in,
-- int nbytes, __u8 out[64])
-+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
-+ int nbytes, __u8 out[64])
- {
- unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
- int input_rotate;
-@@ -501,7 +498,7 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
-- w = rol32(*bytes++, input_rotate & 31);
-+ w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
-
- /* XOR in the various taps */
-@@ -521,11 +518,11 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
-- input_rotate += i ? 7 : 14;
-+ input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
-
-- ACCESS_ONCE(r->input_rotate) = input_rotate;
-- ACCESS_ONCE(r->add_ptr) = i;
-+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
-+ ACCESS_ONCE_RW(r->add_ptr) = i;
- smp_wmb();
-
- if (out)
-@@ -533,13 +530,21 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
- }
-
-+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
-+ int nbytes, __u8 out[64])
-+{
-+ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
-+ _mix_pool_bytes(r, in, nbytes, out);
-+}
-+
- static void mix_pool_bytes(struct entropy_store *r, const void *in,
-- int nbytes, __u8 out[64])
-+ int nbytes, __u8 out[64])
- {
- unsigned long flags;
-
-+ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
-- __mix_pool_bytes(r, in, nbytes, out);
-+ _mix_pool_bytes(r, in, nbytes, out);
- spin_unlock_irqrestore(&r->lock, flags);
- }
-
-@@ -556,58 +561,151 @@ struct fast_pool {
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
- */
--static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
-+static void fast_mix(struct fast_pool *f, __u32 input[4])
- {
-- const char *bytes = in;
- __u32 w;
-- unsigned i = f->count;
- unsigned input_rotate = f->rotate;
-
-- while (nbytes--) {
-- w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
-- f->pool[(i + 1) & 3];
-- f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
-- input_rotate += (i++ & 3) ? 7 : 14;
-- }
-- f->count = i;
-+ w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
-+ f->pool[0] = (w >> 3) ^ twist_table[w & 7];
-+ input_rotate = (input_rotate + 14) & 31;
-+ w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
-+ f->pool[1] = (w >> 3) ^ twist_table[w & 7];
-+ input_rotate = (input_rotate + 7) & 31;
-+ w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
-+ f->pool[2] = (w >> 3) ^ twist_table[w & 7];
-+ input_rotate = (input_rotate + 7) & 31;
-+ w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
-+ f->pool[3] = (w >> 3) ^ twist_table[w & 7];
-+ input_rotate = (input_rotate + 7) & 31;
-+
- f->rotate = input_rotate;
-+ f->count++;
- }
-
- /*
-- * Credit (or debit) the entropy store with n bits of entropy
-+ * Credit (or debit) the entropy store with n bits of entropy.
-+ * Use credit_entropy_bits_safe() if the value comes from userspace
-+ * or otherwise should be checked for extreme values.
- */
- static void credit_entropy_bits(struct entropy_store *r, int nbits)
- {
- int entropy_count, orig;
-+ const int pool_size = r->poolinfo->poolfracbits;
-+ int nfrac = nbits << ENTROPY_SHIFT;
-
- if (!nbits)
- return;
-
-- DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
- retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-- entropy_count += nbits;
-- if (entropy_count < 0) {
-- DEBUG_ENT("negative entropy/overflow\n");
-+ if (nfrac < 0) {
-+ /* Debit */
-+ entropy_count += nfrac;
-+ } else {
-+ /*
-+ * Credit: we have to account for the possibility of
-+ * overwriting already present entropy. Even in the
-+ * ideal case of pure Shannon entropy, new contributions
-+ * approach the full value asymptotically:
-+ *
-+ * entropy <- entropy + (pool_size - entropy) *
-+ * (1 - exp(-add_entropy/pool_size))
-+ *
-+ * For add_entropy <= pool_size/2 then
-+ * (1 - exp(-add_entropy/pool_size)) >=
-+ * (add_entropy/pool_size)*0.7869...
-+ * so we can approximate the exponential with
-+ * 3/4*add_entropy/pool_size and still be on the
-+ * safe side by adding at most pool_size/2 at a time.
-+ *
-+ * The use of pool_size-2 in the while statement is to
-+ * prevent rounding artifacts from making the loop
-+ * arbitrarily long; this limits the loop to log2(pool_size)*2
-+ * turns no matter how large nbits is.
-+ */
-+ int pnfrac = nfrac;
-+ const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
-+ /* The +2 corresponds to the /4 in the denominator */
-+
-+ do {
-+ u64 anfrac = min(pnfrac, pool_size/2);
-+ unsigned int add =
-+ ((pool_size - entropy_count)*anfrac*3) >> s;
-+
-+ entropy_count += add;
-+ pnfrac -= anfrac;
-+ } while (unlikely(entropy_count < pool_size-2 && pnfrac));
-+ }
-+
-+ if (unlikely(entropy_count < 0)) {
-+ pr_warn("random: negative entropy/overflow: pool %s count %d\n",
-+ r->name, entropy_count);
-+ WARN_ON(1);
- entropy_count = 0;
-- } else if (entropy_count > r->poolinfo->POOLBITS)
-- entropy_count = r->poolinfo->POOLBITS;
-+ } else if (entropy_count > pool_size)
-+ entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
-- if (!r->initialized && nbits > 0) {
-- r->entropy_total += nbits;
-- if (r->entropy_total > 128)
-- r->initialized = 1;
-+ r->entropy_total += nbits;
-+ if (!r->initialized && r->entropy_total > 128) {
-+ r->initialized = 1;
-+ r->entropy_total = 0;
-+ if (r == &nonblocking_pool) {
-+ prandom_reseed_late();
-+ pr_notice("random: %s pool is initialized\n", r->name);
-+ }
- }
-
-- /* should we wake readers? */
-- if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
-- wake_up_interruptible(&random_read_wait);
-- kill_fasync(&fasync, SIGIO, POLL_IN);
-+ trace_credit_entropy_bits(r->name, nbits,
-+ entropy_count >> ENTROPY_SHIFT,
-+ r->entropy_total, _RET_IP_);
-+
-+ if (r == &input_pool) {
-+ int entropy_bits = entropy_count >> ENTROPY_SHIFT;
-+
-+ /* should we wake readers? */
-+ if (entropy_bits >= random_read_wakeup_bits) {
-+ wake_up_interruptible(&random_read_wait);
-+ kill_fasync(&fasync, SIGIO, POLL_IN);
-+ }
-+ /* If the input pool is getting full, send some
-+ * entropy to the two output pools, flipping back and
-+ * forth between them, until the output pools are 75%
-+ * full.
-+ */
-+ if (entropy_bits > random_write_wakeup_bits &&
-+ r->initialized &&
-+ r->entropy_total >= 2*random_read_wakeup_bits) {
-+ static struct entropy_store *last = &blocking_pool;
-+ struct entropy_store *other = &blocking_pool;
-+
-+ if (last == &blocking_pool)
-+ other = &nonblocking_pool;
-+ if (other->entropy_count <=
-+ 3 * other->poolinfo->poolfracbits / 4)
-+ last = other;
-+ if (last->entropy_count <=
-+ 3 * last->poolinfo->poolfracbits / 4) {
-+ schedule_work(&last->push_work);
-+ r->entropy_total = 0;
-+ }
-+ }
- }
- }
-
-+static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
-+{
-+ const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
-+
-+ /* Cap the value to avoid overflows */
-+ nbits = min(nbits, nbits_max);
-+ nbits = max(nbits, -nbits_max);
-+
-+ credit_entropy_bits(r, nbits);
-+}
-+
- /*********************************************************************
- *
- * Entropy input management
-@@ -621,42 +719,7 @@ struct timer_rand_state {
- unsigned dont_count_entropy:1;
- };
-
--#ifndef CONFIG_GENERIC_HARDIRQS
--
--static struct timer_rand_state *irq_timer_state[NR_IRQS];
--
--static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
--{
-- return irq_timer_state[irq];
--}
--
--static void set_timer_rand_state(unsigned int irq,
-- struct timer_rand_state *state)
--{
-- irq_timer_state[irq] = state;
--}
--
--#else
--
--static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
--{
-- struct irq_desc *desc;
--
-- desc = irq_to_desc(irq);
--
-- return desc->timer_rand_state;
--}
--
--static void set_timer_rand_state(unsigned int irq,
-- struct timer_rand_state *state)
--{
-- struct irq_desc *desc;
--
-- desc = irq_to_desc(irq);
--
-- desc->timer_rand_state = state;
--}
--#endif
-+#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
-
- /*
- * Add device- or boot-specific data to the input and nonblocking
-@@ -669,15 +732,22 @@ static void set_timer_rand_state(unsigned int irq,
- void add_device_randomness(const void *buf, unsigned int size)
- {
- unsigned long time = random_get_entropy() ^ jiffies;
-+ unsigned long flags;
-
-- mix_pool_bytes(&input_pool, buf, size, NULL);
-- mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
-- mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
-- mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
-+ trace_add_device_randomness(size, _RET_IP_);
-+ spin_lock_irqsave(&input_pool.lock, flags);
-+ _mix_pool_bytes(&input_pool, buf, size, NULL);
-+ _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
-+ spin_unlock_irqrestore(&input_pool.lock, flags);
-+
-+ spin_lock_irqsave(&nonblocking_pool.lock, flags);
-+ _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
-+ _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
-+ spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
- }
- EXPORT_SYMBOL(add_device_randomness);
-
--static struct timer_rand_state input_timer_state;
-+static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
-
- /*
- * This function adds entropy to the entropy "pool" by using timing
-@@ -691,6 +761,7 @@ static struct timer_rand_state input_timer_state;
- */
- static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
- {
-+ struct entropy_store *r;
- struct {
- long jiffies;
- unsigned cycles;
-@@ -699,15 +770,12 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
- long delta, delta2, delta3;
-
- preempt_disable();
-- /* if over the trickle threshold, use only 1 in 4096 samples */
-- if (input_pool.entropy_count > trickle_thresh &&
-- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
-- goto out;
-
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
-- mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
-+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
-+ mix_pool_bytes(r, &sample, sizeof(sample), NULL);
-
- /*
- * Calculate number of bits of randomness we probably added.
-@@ -741,10 +809,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
- * Round down by 1 bit on general principles,
- * and limit entropy entimate to 12 bits.
- */
-- credit_entropy_bits(&input_pool,
-- min_t(int, fls(delta>>1), 11));
-+ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
- }
--out:
- preempt_enable();
- }
-
-@@ -757,10 +823,10 @@ void add_input_randomness(unsigned int type, unsigned int code,
- if (value == last_value)
- return;
-
-- DEBUG_ENT("input event\n");
- last_value = value;
- add_timer_randomness(&input_timer_state,
- (type << 4) ^ code ^ (code >> 4) ^ value);
-+ trace_add_input_randomness(ENTROPY_BITS(&input_pool));
- }
- EXPORT_SYMBOL_GPL(add_input_randomness);
-
-@@ -772,20 +838,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
- struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
-- __u32 input[4], cycles = random_get_entropy();
-+ cycles_t cycles = random_get_entropy();
-+ __u32 input[4], c_high, j_high;
-+ __u64 ip;
-
-- input[0] = cycles ^ jiffies;
-- input[1] = irq;
-- if (regs) {
-- __u64 ip = instruction_pointer(regs);
-- input[2] = ip;
-- input[3] = ip >> 32;
-- }
-+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
-+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
-+ input[0] = cycles ^ j_high ^ irq;
-+ input[1] = now ^ c_high;
-+ ip = regs ? instruction_pointer(regs) : _RET_IP_;
-+ input[2] = ip;
-+ input[3] = ip >> 32;
-
-- fast_mix(fast_pool, input, sizeof(input));
-+ fast_mix(fast_pool, input);
-
-- if ((fast_pool->count & 1023) &&
-- !time_after(now, fast_pool->last + HZ))
-+ if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
- return;
-
- fast_pool->last = now;
-@@ -814,10 +881,8 @@ void add_disk_randomness(struct gendisk *disk)
- if (!disk || !disk->random)
- return;
- /* first major is 1, so we get >= 0x200 here */
-- DEBUG_ENT("disk event %d:%d\n",
-- MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
--
- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
-+ trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
- }
- #endif
-
-@@ -835,104 +900,141 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- * from the primary pool to the secondary extraction pool. We make
- * sure we pull enough for a 'catastrophic reseed'.
- */
-+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
- static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
- {
-- __u32 tmp[OUTPUT_POOL_WORDS];
-+ if (r->limit == 0 && random_min_urandom_seed) {
-+ unsigned long now = jiffies;
-
-- if (r->pull && r->entropy_count < nbytes * 8 &&
-- r->entropy_count < r->poolinfo->POOLBITS) {
-- /* If we're limited, always leave two wakeup worth's BITS */
-- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
-- int bytes = nbytes;
--
-- /* pull at least as many as BYTES as wakeup BITS */
-- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
-- /* but never more than the buffer size */
-- bytes = min_t(int, bytes, sizeof(tmp));
--
-- DEBUG_ENT("going to reseed %s with %d bits "
-- "(%d of %d requested)\n",
-- r->name, bytes * 8, nbytes * 8, r->entropy_count);
--
-- bytes = extract_entropy(r->pull, tmp, bytes,
-- random_read_wakeup_thresh / 8, rsvd);
-- mix_pool_bytes(r, tmp, bytes, NULL);
-- credit_entropy_bits(r, bytes*8);
-+ if (time_before(now,
-+ r->last_pulled + random_min_urandom_seed * HZ))
-+ return;
-+ r->last_pulled = now;
- }
-+ if (r->pull &&
-+ r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
-+ r->entropy_count < r->poolinfo->poolfracbits)
-+ _xfer_secondary_pool(r, nbytes);
-+}
-+
-+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-+{
-+ __u32 tmp[OUTPUT_POOL_WORDS];
-+ int bytes, min_bytes;
-+
-+ /* For /dev/random's pool, always leave two wakeups' worth */
-+ int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
-+
-+ /* pull at least as much as a wakeup */
-+ min_bytes = random_read_wakeup_bits / 8;
-+ /* but never more than the buffer size */
-+ bytes = min(sizeof(tmp), max_t(size_t, min_bytes, nbytes));
-+
-+ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
-+ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
-+ bytes = extract_entropy(r->pull, tmp, bytes, min_bytes, rsvd_bytes);
-+ mix_pool_bytes(r, tmp, bytes, NULL);
-+ credit_entropy_bits(r, bytes*8);
- }
-
- /*
-- * These functions extracts randomness from the "entropy pool", and
-- * returns it in a buffer.
-- *
-- * The min parameter specifies the minimum amount we can pull before
-- * failing to avoid races that defeat catastrophic reseeding while the
-- * reserved parameter indicates how much entropy we must leave in the
-- * pool after each pull to avoid starving other readers.
-- *
-- * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
-+ * Used as a workqueue function so that when the input pool is getting
-+ * full, we can "spill over" some entropy to the output pools. That
-+ * way the output pools can store some of the excess entropy instead
-+ * of letting it go to waste.
- */
-+static void push_to_pool(struct work_struct *work)
-+{
-+ struct entropy_store *r = container_of(work, struct entropy_store,
-+ push_work);
-+ BUG_ON(!r);
-+ _xfer_secondary_pool(r, random_read_wakeup_bits/8);
-+ trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
-+ r->pull->entropy_count >> ENTROPY_SHIFT);
-+}
-
-+/*
-+ * This function decides how many bytes to actually take from the
-+ * given pool, and also debits the entropy count accordingly.
-+ */
- static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
- {
-- unsigned long flags;
-+ int entropy_count, orig;
-+ size_t ibytes, nfrac;
-
-- /* Hold lock while accounting */
-- spin_lock_irqsave(&r->lock, flags);
--
-- BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
-- DEBUG_ENT("trying to extract %d bits from %s\n",
-- nbytes * 8, r->name);
-+ BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
-
- /* Can we pull enough? */
-- if (r->entropy_count / 8 < min + reserved) {
-- nbytes = 0;
-- } else {
-- int entropy_count, orig;
- retry:
-- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-- /* If limited, never pull more than available */
-- if (r->limit && nbytes + reserved >= entropy_count / 8)
-- nbytes = entropy_count/8 - reserved;
-+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-+ ibytes = nbytes;
-+ /* If limited, never pull more than available */
-+ if (r->limit) {
-+ int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
-- if (entropy_count / 8 >= nbytes + reserved) {
-- entropy_count -= nbytes*8;
-- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-- goto retry;
-- } else {
-- entropy_count = reserved;
-- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-- goto retry;
-- }
-+ if ((have_bytes -= reserved) < 0)
-+ have_bytes = 0;
-+ ibytes = min_t(size_t, ibytes, have_bytes);
-+ }
-+ if (ibytes < min)
-+ ibytes = 0;
-
-- if (entropy_count < random_write_wakeup_thresh) {
-- wake_up_interruptible(&random_write_wait);
-- kill_fasync(&fasync, SIGIO, POLL_OUT);
-- }
-+ if (unlikely(entropy_count < 0)) {
-+ pr_warn("random: negative entropy count: pool %s count %d\n",
-+ r->name, entropy_count);
-+ WARN_ON(1);
-+ entropy_count = 0;
- }
-+ nfrac = ibytes << (ENTROPY_SHIFT + 3);
-+ if ((size_t) entropy_count > nfrac)
-+ entropy_count -= nfrac;
-+ else
-+ entropy_count = 0;
-
-- DEBUG_ENT("debiting %d entropy credits from %s%s\n",
-- nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
-+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
-+ goto retry;
-
-- spin_unlock_irqrestore(&r->lock, flags);
-+ trace_debit_entropy(r->name, 8 * ibytes);
-+ if (ibytes &&
-+ (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
-+ wake_up_interruptible(&random_write_wait);
-+ kill_fasync(&fasync, SIGIO, POLL_OUT);
-+ }
-
-- return nbytes;
-+ return ibytes;
- }
-
-+/*
-+ * This function does the actual extraction for extract_entropy and
-+ * extract_entropy_user.
-+ *
-+ * Note: we assume that .poolwords is a multiple of 16 words.
-+ */
- static void extract_buf(struct entropy_store *r, __u8 *out)
- {
- int i;
- union {
- __u32 w[5];
-- unsigned long l[LONGS(EXTRACT_SIZE)];
-+ unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA_WORKSPACE_WORDS];
- __u8 extract[64];
- unsigned long flags;
-
-- /* Generate a hash across the pool, 16 words (512 bits) at a time */
-+ /*
-+ * If we have an architectural hardware random number
-+ * generator, use it for SHA's initial vector
-+ */
- sha_init(hash.w);
-+ for (i = 0; i < LONGS(20); i++) {
-+ unsigned long v;
-+ if (!arch_get_random_long(&v))
-+ break;
-+ hash.l[i] = v;
-+ }
-+
-+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
-@@ -966,27 +1068,43 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
-
-- /*
-- * If we have a architectural hardware random number
-- * generator, mix that in, too.
-- */
-- for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
-- unsigned long v;
-- if (!arch_get_random_long(&v))
-- break;
-- hash.l[i] ^= v;
-- }
--
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
- }
-
-+/*
-+ * This function extracts randomness from the "entropy pool", and
-+ * returns it in a buffer.
-+ *
-+ * The min parameter specifies the minimum amount we can pull before
-+ * failing to avoid races that defeat catastrophic reseeding while the
-+ * reserved parameter indicates how much entropy we must leave in the
-+ * pool after each pull to avoid starving other readers.
-+ */
- static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
- {
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
-+ unsigned long flags;
-
-+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
-+ if (fips_enabled) {
-+ spin_lock_irqsave(&r->lock, flags);
-+ if (!r->last_data_init) {
-+ r->last_data_init = 1;
-+ spin_unlock_irqrestore(&r->lock, flags);
-+ trace_extract_entropy(r->name, EXTRACT_SIZE,
-+ ENTROPY_BITS(r), _RET_IP_);
-+ xfer_secondary_pool(r, EXTRACT_SIZE);
-+ extract_buf(r, tmp);
-+ spin_lock_irqsave(&r->lock, flags);
-+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
-+ }
-+ spin_unlock_irqrestore(&r->lock, flags);
-+ }
-+
-+ trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- xfer_secondary_pool(r, nbytes);
- nbytes = account(r, nbytes, min, reserved);
-
-@@ -994,8 +1112,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- extract_buf(r, tmp);
-
- if (fips_enabled) {
-- unsigned long flags;
--
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
-@@ -1015,12 +1131,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- return ret;
- }
-
-+/*
-+ * This function extracts randomness from the "entropy pool", and
-+ * returns it in a userspace buffer.
-+ */
- static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
- size_t nbytes)
- {
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
-
-+ trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- xfer_secondary_pool(r, nbytes);
- nbytes = account(r, nbytes, 0, 0);
-
-@@ -1036,7 +1157,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
-
- extract_buf(r, tmp);
- i = min_t(int, nbytes, EXTRACT_SIZE);
-- if (copy_to_user(buf, tmp, i)) {
-+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
- }
-@@ -1055,11 +1176,20 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
- /*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
-- * TCP sequence numbers, etc. It does not use the hw random number
-- * generator, if available; use get_random_bytes_arch() for that.
-+ * TCP sequence numbers, etc. It does not rely on the hardware random
-+ * number generator. For random bytes direct from the hardware RNG
-+ * (when available), use get_random_bytes_arch().
- */
- void get_random_bytes(void *buf, int nbytes)
- {
-+#if DEBUG_RANDOM_BOOT > 0
-+ if (unlikely(nonblocking_pool.initialized == 0))
-+ printk(KERN_NOTICE "random: %pF get_random_bytes called "
-+ "with %d bits of entropy available\n",
-+ (void *) _RET_IP_,
-+ nonblocking_pool.entropy_total);
-+#endif
-+ trace_get_random_bytes(nbytes, _RET_IP_);
- extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
- }
- EXPORT_SYMBOL(get_random_bytes);
-@@ -1078,6 +1208,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
- {
- char *p = buf;
-
-+ trace_get_random_bytes_arch(nbytes, _RET_IP_);
- while (nbytes) {
- unsigned long v;
- int chunk = min(nbytes, (int)sizeof(unsigned long));
-@@ -1111,12 +1242,11 @@ static void init_std_data(struct entropy_store *r)
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
-- r->entropy_count = 0;
-- r->entropy_total = 0;
-+ r->last_pulled = jiffies;
- mix_pool_bytes(r, &now, sizeof(now), NULL);
-- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
-+ for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_long(&rv))
-- break;
-+ rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv), NULL);
- }
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
-@@ -1139,25 +1269,7 @@ static int rand_initialize(void)
- init_std_data(&nonblocking_pool);
- return 0;
- }
--module_init(rand_initialize);
--
--void rand_initialize_irq(int irq)
--{
-- struct timer_rand_state *state;
--
-- state = get_timer_rand_state(irq);
--
-- if (state)
-- return;
--
-- /*
-- * If kzalloc returns null, we just won't use that entropy
-- * source.
-- */
-- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
-- if (state)
-- set_timer_rand_state(irq, state);
--}
-+early_initcall(rand_initialize);
-
- #ifdef CONFIG_BLOCK
- void rand_initialize_disk(struct gendisk *disk)
-@@ -1169,71 +1281,60 @@ void rand_initialize_disk(struct gendisk *disk)
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
-- if (state)
-+ if (state) {
-+ state->last_time = INITIAL_JIFFIES;
- disk->random = state;
-+ }
- }
- #endif
-
- static ssize_t
- random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
- {
-- ssize_t n, retval = 0, count = 0;
-+ ssize_t n;
-
- if (nbytes == 0)
- return 0;
-
-- while (nbytes > 0) {
-- n = nbytes;
-- if (n > SEC_XFER_SIZE)
-- n = SEC_XFER_SIZE;
--
-- DEBUG_ENT("reading %d bits\n", n*8);
--
-- n = extract_entropy_user(&blocking_pool, buf, n);
--
-- DEBUG_ENT("read got %d bits (%d still needed)\n",
-- n*8, (nbytes-n)*8);
--
-- if (n == 0) {
-- if (file->f_flags & O_NONBLOCK) {
-- retval = -EAGAIN;
-- break;
-- }
--
-- DEBUG_ENT("sleeping?\n");
--
-- wait_event_interruptible(random_read_wait,
-- input_pool.entropy_count >=
-- random_read_wakeup_thresh);
--
-- DEBUG_ENT("awake\n");
--
-- if (signal_pending(current)) {
-- retval = -ERESTARTSYS;
-- break;
-- }
--
-- continue;
-- }
--
-- if (n < 0) {
-- retval = n;
-- break;
-- }
-- count += n;
-- buf += n;
-- nbytes -= n;
-- break; /* This break makes the device work */
-- /* like a named pipe */
-+ nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
-+ while (1) {
-+ n = extract_entropy_user(&blocking_pool, buf, nbytes);
-+ if (n < 0)
-+ return n;
-+ trace_random_read(n*8, (nbytes-n)*8,
-+ ENTROPY_BITS(&blocking_pool),
-+ ENTROPY_BITS(&input_pool));
-+ if (n > 0)
-+ return n;
-+ /* Pool is (near) empty. Maybe wait and retry. */
-+
-+ if (file->f_flags & O_NONBLOCK)
-+ return -EAGAIN;
-+
-+ wait_event_interruptible(random_read_wait,
-+ ENTROPY_BITS(&input_pool) >=
-+ random_read_wakeup_bits);
-+ if (signal_pending(current))
-+ return -ERESTARTSYS;
- }
--
-- return (count ? count : retval);
- }
-
- static ssize_t
- urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
- {
-- return extract_entropy_user(&nonblocking_pool, buf, nbytes);
-+ int ret;
-+
-+ if (unlikely(nonblocking_pool.initialized == 0))
-+ printk_once(KERN_NOTICE "random: %s urandom read "
-+ "with %d bits of entropy available\n",
-+ current->comm, nonblocking_pool.entropy_total);
-+
-+ nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
-+ ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
-+
-+ trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
-+ ENTROPY_BITS(&input_pool));
-+ return ret;
- }
-
- static unsigned int
-@@ -1244,9 +1345,9 @@ random_poll(struct file *file, poll_table * wait)
- poll_wait(file, &random_read_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
-- if (input_pool.entropy_count >= random_read_wakeup_thresh)
-+ if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
- mask |= POLLIN | POLLRDNORM;
-- if (input_pool.entropy_count < random_write_wakeup_thresh)
-+ if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
- mask |= POLLOUT | POLLWRNORM;
- return mask;
- }
-@@ -1297,7 +1398,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
- switch (cmd) {
- case RNDGETENTCNT:
- /* inherently racy, no point locking */
-- if (put_user(input_pool.entropy_count, p))
-+ ent_count = ENTROPY_BITS(&input_pool);
-+ if (put_user(ent_count, p))
- return -EFAULT;
- return 0;
- case RNDADDTOENTCNT:
-@@ -1305,7 +1407,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
- return -EPERM;
- if (get_user(ent_count, p))
- return -EFAULT;
-- credit_entropy_bits(&input_pool, ent_count);
-+ credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
- case RNDADDENTROPY:
- if (!capable(CAP_SYS_ADMIN))
-@@ -1320,14 +1422,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
- size);
- if (retval < 0)
- return retval;
-- credit_entropy_bits(&input_pool, ent_count);
-+ credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
- case RNDZAPENTCNT:
- case RNDCLEARPOOL:
-- /* Clear the entropy pool counters. */
-+ /*
-+ * Clear the entropy pool counters. We no longer clear
-+ * the entropy pool, as that's silly.
-+ */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-- rand_initialize();
-+ input_pool.entropy_count = 0;
-+ nonblocking_pool.entropy_count = 0;
-+ blocking_pool.entropy_count = 0;
- return 0;
- default:
- return -EINVAL;
-@@ -1387,23 +1494,23 @@ EXPORT_SYMBOL(generate_random_uuid);
- #include <linux/sysctl.h>
-
- static int min_read_thresh = 8, min_write_thresh;
--static int max_read_thresh = INPUT_POOL_WORDS * 32;
-+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
- static int max_write_thresh = INPUT_POOL_WORDS * 32;
- static char sysctl_bootid[16];
-
- /*
-- * These functions is used to return both the bootid UUID, and random
-+ * This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
- * then a new UUID is generated and returned to the user.
- *
-- * If the user accesses this via the proc interface, it will be returned
-- * as an ASCII string in the standard UUID format. If accesses via the
-- * sysctl system call, it is returned as 16 bytes of binary data.
-+ * If the user accesses this via the proc interface, the UUID will be
-+ * returned as an ASCII string in the standard UUID format; if via the
-+ * sysctl system call, as 16 bytes of binary data.
- */
--static int proc_do_uuid(ctl_table *table, int write,
-+static int proc_do_uuid(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- ctl_table fake_table;
-+ ctl_table_no_const fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
-
- uuid = table->data;
-@@ -1427,8 +1534,26 @@ static int proc_do_uuid(ctl_table *table, int write,
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
- }
-
-+/*
-+ * Return entropy available scaled to integral bits
-+ */
-+static int proc_do_entropy(ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ ctl_table_no_const fake_table;
-+ int entropy_count;
-+
-+ entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
-+
-+ fake_table.data = &entropy_count;
-+ fake_table.maxlen = sizeof(entropy_count);
-+
-+ return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
-+}
-+
- static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
--ctl_table random_table[] = {
-+extern struct ctl_table random_table[];
-+struct ctl_table random_table[] = {
- {
- .procname = "poolsize",
- .data = &sysctl_poolsize,
-@@ -1440,12 +1565,12 @@ ctl_table random_table[] = {
- .procname = "entropy_avail",
- .maxlen = sizeof(int),
- .mode = 0444,
-- .proc_handler = proc_dointvec,
-+ .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
- },
- {
- .procname = "read_wakeup_threshold",
-- .data = &random_read_wakeup_thresh,
-+ .data = &random_read_wakeup_bits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
-@@ -1454,7 +1579,7 @@ ctl_table random_table[] = {
- },
- {
- .procname = "write_wakeup_threshold",
-- .data = &random_write_wakeup_thresh,
-+ .data = &random_write_wakeup_bits,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
-@@ -1462,6 +1587,13 @@ ctl_table random_table[] = {
- .extra2 = &max_write_thresh,
- },
- {
-+ .procname = "urandom_min_reseed_secs",
-+ .data = &random_min_urandom_seed,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+ {
- .procname = "boot_id",
- .data = &sysctl_bootid,
- .maxlen = 16,
-@@ -1492,7 +1624,7 @@ int random_int_secret_init(void)
- * value is not cryptographically secure but for several uses the cost of
- * depleting entropy is too high
- */
--DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
-+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
- unsigned int get_random_int(void)
- {
- __u32 *hash;
-@@ -1510,6 +1642,7 @@ unsigned int get_random_int(void)
-
- return ret;
- }
-+EXPORT_SYMBOL(get_random_int);
-
- /*
- * randomize_range() returns a start address such that
-diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
-index 1ee8ce7..586718d 100644
---- a/drivers/char/sonypi.c
-+++ b/drivers/char/sonypi.c
-@@ -55,6 +55,7 @@
- #include <asm/uaccess.h>
- #include <asm/io.h>
- #include <asm/system.h>
-+#include <asm/local.h>
-
- #include <linux/sonypi.h>
-
-@@ -491,7 +492,7 @@ static struct sonypi_device {
- spinlock_t fifo_lock;
- wait_queue_head_t fifo_proc_list;
- struct fasync_struct *fifo_async;
-- int open_count;
-+ local_t open_count;
- int model;
- struct input_dev *input_jog_dev;
- struct input_dev *input_key_dev;
-@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
- static int sonypi_misc_release(struct inode *inode, struct file *file)
- {
- mutex_lock(&sonypi_device.lock);
-- sonypi_device.open_count--;
-+ local_dec(&sonypi_device.open_count);
- mutex_unlock(&sonypi_device.lock);
- return 0;
- }
-@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
- {
- mutex_lock(&sonypi_device.lock);
- /* Flush input queue on first open */
-- if (!sonypi_device.open_count)
-+ if (!local_read(&sonypi_device.open_count))
- kfifo_reset(&sonypi_device.fifo);
-- sonypi_device.open_count++;
-+ local_inc(&sonypi_device.open_count);
- mutex_unlock(&sonypi_device.lock);
-
- return 0;
-@@ -1497,7 +1498,7 @@ static struct platform_driver sonypi_driver = {
-
- static struct platform_device *sonypi_platform_device;
-
--static struct dmi_system_id __initdata sonypi_dmi_table[] = {
-+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
- {
- .ident = "Sony Vaio",
- .matches = {
-diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
-index f7395c4..93bcc43 100644
---- a/drivers/char/tpm/tpm.c
-+++ b/drivers/char/tpm/tpm.c
-@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- chip->vendor.req_complete_val)
- goto out_recv;
-
-- if ((status == chip->vendor.req_canceled)) {
-+ if (status == chip->vendor.req_canceled) {
- dev_err(chip->dev, "Operation Canceled\n");
- rc = -ECANCELED;
- goto out;
-diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
-index 0636520..169c1d0 100644
---- a/drivers/char/tpm/tpm_bios.c
-+++ b/drivers/char/tpm/tpm_bios.c
-@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
- event = addr;
-
- if ((event->event_type == 0 && event->event_size == 0) ||
-- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
-+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
- return NULL;
-
- return addr;
-@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
- return NULL;
-
- if ((event->event_type == 0 && event->event_size == 0) ||
-- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
-+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
- return NULL;
-
- (*pos)++;
-@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
- int i;
-
- for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
-- seq_putc(m, data[i]);
-+ if (!seq_putc(m, data[i]))
-+ return -EFAULT;
-
- return 0;
- }
-@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
- log->bios_event_log_end = log->bios_event_log + len;
-
- virt = acpi_os_map_memory(start, len);
-+ if (!virt) {
-+ kfree(log->bios_event_log);
-+ log->bios_event_log = NULL;
-+ return -EFAULT;
-+ }
-
-- memcpy(log->bios_event_log, virt, len);
-+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
-
- acpi_os_unmap_memory(virt, len);
- return 0;
-diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
-index e9d18aa..d0369bc 100644
---- a/drivers/char/virtio_console.c
-+++ b/drivers/char/virtio_console.c
-@@ -571,7 +571,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
- if (to_user) {
- ssize_t ret;
-
-- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
-+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
- if (ret)
- return -EFAULT;
- } else {
-@@ -674,7 +674,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
- if (!port_has_data(port) && !port->host_connected)
- return 0;
-
-- return fill_readbuf(port, ubuf, count, true);
-+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
- }
-
- static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
-diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
-index 56c6c6b..99056e6 100644
---- a/drivers/cpufreq/acpi-cpufreq.c
-+++ b/drivers/cpufreq/acpi-cpufreq.c
-@@ -533,8 +533,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
- data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
- per_cpu(acfreq_data, cpu) = data;
-
-- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
-- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
-+ pax_open_kernel();
-+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
-+ }
-
- result = acpi_processor_register_performance(data->acpi_data, cpu);
- if (result)
-@@ -644,7 +647,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
- policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
- break;
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
-- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
-+ pax_open_kernel();
-+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
-+ pax_close_kernel();
- policy->cur = get_cur_freq_on_cpu(cpu);
- break;
- default:
-@@ -655,8 +660,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
- acpi_processor_notify_smm(THIS_MODULE);
-
- /* Check for APERF/MPERF support in hardware */
-- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
-- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
-+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
-+ pax_open_kernel();
-+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
-+ pax_close_kernel();
-+ }
-
- pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
- for (i = 0; i < perf->state_count; i++)
-diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 987a165..d7f2bcd 100644
---- a/drivers/cpufreq/cpufreq.c
-+++ b/drivers/cpufreq/cpufreq.c
-@@ -1790,7 +1790,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata cpufreq_cpu_notifier = {
-+static struct notifier_block cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
- };
-
-@@ -1819,8 +1819,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
-
- pr_debug("trying to register driver %s\n", driver_data->name);
-
-- if (driver_data->setpolicy)
-- driver_data->flags |= CPUFREQ_CONST_LOOPS;
-+ if (driver_data->setpolicy) {
-+ pax_open_kernel();
-+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
-+ }
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- if (cpufreq_driver) {
-diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
-index 4bf374d..b170d80 100644
---- a/drivers/cpufreq/cpufreq_stats.c
-+++ b/drivers/cpufreq/cpufreq_stats.c
-@@ -342,7 +342,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
- }
-
- /* priority=1 so this will get called before cpufreq_remove_dev */
--static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
-+static struct notifier_block cpufreq_stat_cpu_notifier = {
- .notifier_call = cpufreq_stat_cpu_callback,
- .priority = 1,
- };
-diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
-index 6be3e07..dafe020 100644
---- a/drivers/cpufreq/p4-clockmod.c
-+++ b/drivers/cpufreq/p4-clockmod.c
-@@ -166,10 +166,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
- case 0x0F: /* Core Duo */
- case 0x16: /* Celeron Core */
- case 0x1C: /* Atom */
-- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_open_kernel();
-+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
- return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
- case 0x0D: /* Pentium M (Dothan) */
-- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_open_kernel();
-+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
- /* fall through */
- case 0x09: /* Pentium M (Banias) */
- return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
-@@ -181,7 +185,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
-
- /* on P-4s, the TSC runs with constant frequency independent whether
- * throttling is active or not. */
-- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_open_kernel();
-+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
-
- if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
- printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
-diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
-index f6cd315..ce0d3b7 100644
---- a/drivers/cpufreq/powernow-k8.c
-+++ b/drivers/cpufreq/powernow-k8.c
-@@ -1341,8 +1341,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
- }
-
- /* Check for APERF/MPERF support in hardware */
-- if (cpu_has(c, X86_FEATURE_APERFMPERF))
-- cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
-+ if (cpu_has(c, X86_FEATURE_APERFMPERF)) {
-+ pax_open_kernel();
-+ *(void **)&cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
-+ pax_close_kernel();
-+ }
-
- cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
-diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
-index 6ea3455..4a1af8f 100644
---- a/drivers/cpufreq/speedstep-centrino.c
-+++ b/drivers/cpufreq/speedstep-centrino.c
-@@ -352,8 +352,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
- !cpu_has(cpu, X86_FEATURE_EST))
- return -ENODEV;
-
-- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
-- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
-+ pax_open_kernel();
-+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
-+ pax_close_kernel();
-+ }
-
- if (policy->cpu != 0)
- return -ENODEV;
-diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
-index 06ce268..1e0d3e4 100644
---- a/drivers/cpuidle/cpuidle.c
-+++ b/drivers/cpuidle/cpuidle.c
-@@ -188,7 +188,7 @@ static int poll_idle(struct cpuidle_device *dev,
-
- static void poll_idle_init(struct cpuidle_driver *drv)
- {
-- struct cpuidle_state *state = &drv->states[0];
-+ cpuidle_state_no_const *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
-diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
-index ea2f8e7..70ac501 100644
---- a/drivers/cpuidle/governor.c
-+++ b/drivers/cpuidle/governor.c
-@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
- mutex_lock(&cpuidle_lock);
- if (__cpuidle_find_governor(gov->name) == NULL) {
- ret = 0;
-- list_add_tail(&gov->governor_list, &cpuidle_governors);
-+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
- if (!cpuidle_curr_governor ||
- cpuidle_curr_governor->rating < gov->rating)
- cpuidle_switch_governor(gov);
-@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
- new_gov = cpuidle_replace_governor(gov->rating);
- cpuidle_switch_governor(new_gov);
- }
-- list_del(&gov->governor_list);
-+ pax_list_del((struct list_head *)&gov->governor_list);
- mutex_unlock(&cpuidle_lock);
- }
-
-diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
-index 1e756e1..6f7ead5 100644
---- a/drivers/cpuidle/sysfs.c
-+++ b/drivers/cpuidle/sysfs.c
-@@ -131,7 +131,7 @@ static struct attribute *cpuclass_switch_attrs[] = {
- NULL
- };
-
--static struct attribute_group cpuclass_attr_group = {
-+static attribute_group_no_const cpuclass_attr_group = {
- .attrs = cpuclass_default_attrs,
- .name = "cpuidle",
- };
-diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
-index fe765f4..0bc6d6a 100644
---- a/drivers/crypto/hifn_795x.c
-+++ b/drivers/crypto/hifn_795x.c
-@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
- MODULE_PARM_DESC(hifn_pll_ref,
- "PLL reference clock (pci[freq] or ext[freq], default ext)");
-
--static atomic_t hifn_dev_number;
-+static atomic_unchecked_t hifn_dev_number;
-
- #define ACRYPTO_OP_DECRYPT 0
- #define ACRYPTO_OP_ENCRYPT 1
-@@ -2576,7 +2576,7 @@ static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id
- goto err_out_disable_pci_device;
-
- snprintf(name, sizeof(name), "hifn%d",
-- atomic_inc_return(&hifn_dev_number)-1);
-+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
-
- err = pci_request_regions(pdev, name);
- if (err)
-diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
-index 59d24e9..0d20240 100644
---- a/drivers/devfreq/devfreq.c
-+++ b/drivers/devfreq/devfreq.c
-@@ -372,7 +372,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
- = msecs_to_jiffies(devfreq->profile->polling_ms);
- devfreq->nb.notifier_call = devfreq_notifier_call;
-
-- dev_set_name(&devfreq->dev, dev_name(dev));
-+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
- err = device_register(&devfreq->dev);
- if (err) {
- put_device(&devfreq->dev);
-diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
-index eb1d864..39ee5a7 100644
---- a/drivers/dma/dmatest.c
-+++ b/drivers/dma/dmatest.c
-@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
- }
- if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_PQ);
-- thread_count += cnt > 0 ?: 0;
-+ thread_count += cnt > 0 ? cnt : 0;
- }
-
- pr_info("dmatest: Started %u threads using %s\n",
-diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
-index 81809c2..6409470 100644
---- a/drivers/dma/shdma.c
-+++ b/drivers/dma/shdma.c
-@@ -1054,7 +1054,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
- return ret;
- }
-
--static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
-+static struct notifier_block sh_dmae_nmi_notifier = {
- .notifier_call = sh_dmae_nmi_handler,
-
- /* Run before NMI debug handler and KGDB */
-diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
-index a9d5482..376077f 100644
---- a/drivers/edac/amd64_edac.c
-+++ b/drivers/edac/amd64_edac.c
-@@ -2682,7 +2682,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
- * PCI core identifies what devices are on a system during boot, and then
- * inquiry this table to see if this driver is for a given device found.
- */
--static const struct pci_device_id amd64_pci_table[] __devinitdata = {
-+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
- {
- .vendor = PCI_VENDOR_ID_AMD,
- .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
-diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
-index e47e73b..348e0bd 100644
---- a/drivers/edac/amd76x_edac.c
-+++ b/drivers/edac/amd76x_edac.c
-@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD762},
-diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
-index 1af531a1..3a8ff27 100644
---- a/drivers/edac/e752x_edac.c
-+++ b/drivers/edac/e752x_edac.c
-@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7520},
-diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
-index 6ffb6d2..383d8d7 100644
---- a/drivers/edac/e7xxx_edac.c
-+++ b/drivers/edac/e7xxx_edac.c
-@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7205},
-diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
-index c3f6743..e2c52b0 100644
---- a/drivers/edac/edac_device.c
-+++ b/drivers/edac/edac_device.c
-@@ -483,9 +483,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
- */
- int edac_device_alloc_index(void)
- {
-- static atomic_t device_indexes = ATOMIC_INIT(0);
-+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
-
-- return atomic_inc_return(&device_indexes) - 1;
-+ return atomic_inc_return_unchecked(&device_indexes) - 1;
- }
- EXPORT_SYMBOL_GPL(edac_device_alloc_index);
-
-diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
-index 2b378207..99ba0bd 100644
---- a/drivers/edac/edac_pci.c
-+++ b/drivers/edac/edac_pci.c
-@@ -30,7 +30,7 @@
-
- static DEFINE_MUTEX(edac_pci_ctls_mutex);
- static LIST_HEAD(edac_pci_list);
--static atomic_t pci_indexes = ATOMIC_INIT(0);
-+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
-
- /*
- * edac_pci_alloc_ctl_info
-@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
- */
- int edac_pci_alloc_index(void)
- {
-- return atomic_inc_return(&pci_indexes) - 1;
-+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
- }
- EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
-
-diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
-index 8cc8676..90b70b9 100644
---- a/drivers/edac/edac_pci_sysfs.c
-+++ b/drivers/edac/edac_pci_sysfs.c
-@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
- static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
- static int edac_pci_poll_msec = 1000; /* one second workq period */
-
--static atomic_t pci_parity_count = ATOMIC_INIT(0);
--static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
-+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
-+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
-
- static struct kobject *edac_pci_top_main_kobj;
- static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
-@@ -236,7 +236,7 @@ struct edac_pci_dev_attribute {
- void *value;
- ssize_t(*show) (void *, char *);
- ssize_t(*store) (void *, const char *, size_t);
--};
-+} __do_const;
-
- /* Set of show/store abstract level functions for PCI Parity object */
- static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
-@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Signaled System Error on %s\n",
- pci_name(dev));
-- atomic_inc(&pci_nonparity_count);
-+ atomic_inc_unchecked(&pci_nonparity_count);
- }
-
- if (status & (PCI_STATUS_PARITY)) {
-@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- "Master Data Parity Error on %s\n",
- pci_name(dev));
-
-- atomic_inc(&pci_parity_count);
-+ atomic_inc_unchecked(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
-@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
-- atomic_inc(&pci_parity_count);
-+ atomic_inc_unchecked(&pci_parity_count);
- }
- }
-
-@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Signaled System Error on %s\n",
- pci_name(dev));
-- atomic_inc(&pci_nonparity_count);
-+ atomic_inc_unchecked(&pci_nonparity_count);
- }
-
- if (status & (PCI_STATUS_PARITY)) {
-@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- "Master Data Parity Error on "
- "%s\n", pci_name(dev));
-
-- atomic_inc(&pci_parity_count);
-+ atomic_inc_unchecked(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
-@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
-- atomic_inc(&pci_parity_count);
-+ atomic_inc_unchecked(&pci_parity_count);
- }
- }
- }
-@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
- if (!check_pci_errors)
- return;
-
-- before_count = atomic_read(&pci_parity_count);
-+ before_count = atomic_read_unchecked(&pci_parity_count);
-
- /* scan all PCI devices looking for a Parity Error on devices and
- * bridges.
-@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
- /* Only if operator has selected panic on PCI Error */
- if (edac_pci_get_panic_on_pe()) {
- /* If the count is different 'after' from 'before' */
-- if (before_count != atomic_read(&pci_parity_count))
-+ if (before_count != atomic_read_unchecked(&pci_parity_count))
- panic("EDAC: PCI Parity Error");
- }
- }
-diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
-index c0510b3..6e2a954 100644
---- a/drivers/edac/i3000_edac.c
-+++ b/drivers/edac/i3000_edac.c
-@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I3000},
-diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
-index aa08497..7e6822a 100644
---- a/drivers/edac/i3200_edac.c
-+++ b/drivers/edac/i3200_edac.c
-@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I3200},
-diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
-index 4dc3ac2..67d05a6 100644
---- a/drivers/edac/i5000_edac.c
-+++ b/drivers/edac/i5000_edac.c
-@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
- *
- * The "E500P" device is the first device supported.
- */
--static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
- .driver_data = I5000P},
-
-diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
-index bcbdeec..9886d16 100644
---- a/drivers/edac/i5100_edac.c
-+++ b/drivers/edac/i5100_edac.c
-@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
- /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
- { 0, }
-diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
-index 74d6ec34..baff517 100644
---- a/drivers/edac/i5400_edac.c
-+++ b/drivers/edac/i5400_edac.c
-@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
- *
- * The "E500P" device is the first device supported.
- */
--static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
- {0,} /* 0 terminated list. */
- };
-diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
-index 1e08426..87e8c13 100644
---- a/drivers/edac/i7300_edac.c
-+++ b/drivers/edac/i7300_edac.c
-@@ -1194,7 +1194,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
- *
- * Has only 8086:360c PCI ID
- */
--static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
- {0,} /* 0 terminated list. */
- };
-diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
-index 4c18b3c..f256adc 100644
---- a/drivers/edac/i7core_edac.c
-+++ b/drivers/edac/i7core_edac.c
-@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
- /*
- * pci_device_id table for which devices we are looking for
- */
--static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
- {0,} /* 0 terminated list. */
-diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
-index 4329d39..f3022ef 100644
---- a/drivers/edac/i82443bxgx_edac.c
-+++ b/drivers/edac/i82443bxgx_edac.c
-@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
-
- EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
-
--static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
-diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
-index 931a057..fd28340 100644
---- a/drivers/edac/i82860_edac.c
-+++ b/drivers/edac/i82860_edac.c
-@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82860},
-diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
-index 33864c6..01edc61 100644
---- a/drivers/edac/i82875p_edac.c
-+++ b/drivers/edac/i82875p_edac.c
-@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82875P},
-diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
-index 01658ca..092a452 100644
---- a/drivers/edac/i82975x_edac.c
-+++ b/drivers/edac/i82975x_edac.c
-@@ -601,7 +601,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82975X
-diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
-index 0106747..0b40417 100644
---- a/drivers/edac/mce_amd.h
-+++ b/drivers/edac/mce_amd.h
-@@ -83,7 +83,7 @@ struct amd_decoder_ops {
- bool (*dc_mce)(u16, u8);
- bool (*ic_mce)(u16, u8);
- bool (*nb_mce)(u16, u8);
--};
-+} __no_const;
-
- void amd_report_gart_errors(bool);
- void amd_register_ecc_decoder(void (*f)(int, struct mce *));
-diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
-index b153674..ad2ba9b 100644
---- a/drivers/edac/r82600_edac.c
-+++ b/drivers/edac/r82600_edac.c
-@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
- {
- PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
- },
-diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
-index da71881..8d7d62c 100644
---- a/drivers/edac/sb_edac.c
-+++ b/drivers/edac/sb_edac.c
-@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- /*
- * pci_device_id table for which devices we are looking for
- */
--static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
- {0,} /* 0 terminated list. */
- };
-diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
-index b6f47de..c5acf3a 100644
---- a/drivers/edac/x38_edac.c
-+++ b/drivers/edac/x38_edac.c
-@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
- edac_mc_free(mci);
- }
-
--static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
- {
- PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- X38},
-diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
-index 85661b0..cdd4560 100644
---- a/drivers/firewire/core-card.c
-+++ b/drivers/firewire/core-card.c
-@@ -512,9 +512,9 @@ void fw_card_initialize(struct fw_card *card,
- const struct fw_card_driver *driver,
- struct device *device)
- {
-- static atomic_t index = ATOMIC_INIT(-1);
-+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
-
-- card->index = atomic_inc_return(&index);
-+ card->index = atomic_inc_return_unchecked(&index);
- card->driver = driver;
- card->device = device;
- card->current_tlabel = 0;
-@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
-
- void fw_core_remove_card(struct fw_card *card)
- {
-- struct fw_card_driver dummy_driver = dummy_driver_template;
-+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
-
- card->driver->update_phy_reg(card, 4,
- PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
-diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
-index ee96b91..86be331 100644
---- a/drivers/firewire/core-cdev.c
-+++ b/drivers/firewire/core-cdev.c
-@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
- int ret;
-
- if ((request->channels == 0 && request->bandwidth == 0) ||
-- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
-- request->bandwidth < 0)
-+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
- return -EINVAL;
-
- r = kmalloc(sizeof(*r), GFP_KERNEL);
-diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
-index 1f3dd51..1ad071c 100644
---- a/drivers/firewire/core-device.c
-+++ b/drivers/firewire/core-device.c
-@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
- struct config_rom_attribute {
- struct device_attribute attr;
- u32 key;
--};
-+} __do_const;
-
- static ssize_t show_immediate(struct device *dev,
- struct device_attribute *dattr, char *buf)
-diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
-index 855ab3f..11f4bbd 100644
---- a/drivers/firewire/core-transaction.c
-+++ b/drivers/firewire/core-transaction.c
-@@ -37,6 +37,7 @@
- #include <linux/timer.h>
- #include <linux/types.h>
- #include <linux/workqueue.h>
-+#include <linux/sched.h>
-
- #include <asm/byteorder.h>
-
-diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
-index b45be57..5fad18b 100644
---- a/drivers/firewire/core.h
-+++ b/drivers/firewire/core.h
-@@ -101,6 +101,7 @@ struct fw_card_driver {
-
- int (*stop_iso)(struct fw_iso_context *ctx);
- };
-+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
-
- void fw_card_initialize(struct fw_card *card,
- const struct fw_card_driver *driver, struct device *device);
-diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
-index 94a58a0..f5eba42 100644
---- a/drivers/firmware/dmi-id.c
-+++ b/drivers/firmware/dmi-id.c
-@@ -16,7 +16,7 @@
- struct dmi_device_attribute{
- struct device_attribute dev_attr;
- int field;
--};
-+} __do_const;
- #define to_dmi_dev_attr(_dev_attr) \
- container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
-
-diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
-index 2861ef4..9e90c69 100644
---- a/drivers/firmware/dmi_scan.c
-+++ b/drivers/firmware/dmi_scan.c
-@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
- }
- }
- else {
-- /*
-- * no iounmap() for that ioremap(); it would be a no-op, but
-- * it's so early in setup that sucker gets confused into doing
-- * what it shouldn't if we actually call it.
-- */
- p = dmi_ioremap(0xF0000, 0x10000);
- if (p == NULL)
- goto error;
-@@ -775,7 +770,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
- if (buf == NULL)
- return -1;
-
-- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
-+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
-
- iounmap(buf);
- return 0;
-diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
-index 2a64e69..ac8fe51 100644
---- a/drivers/firmware/efivars.c
-+++ b/drivers/firmware/efivars.c
-@@ -1221,7 +1221,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
- static int
- create_efivars_bin_attributes(struct efivars *efivars)
- {
-- struct bin_attribute *attr;
-+ bin_attribute_no_const *attr;
- int error;
-
- /* new_var */
-@@ -1413,7 +1413,7 @@ out:
- }
- EXPORT_SYMBOL_GPL(register_efivars);
-
--static struct efivar_operations ops;
-+static efivar_operations_no_const ops __read_only;
-
- /*
- * For now we register the efi subsystem with the firmware subsystem
-diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
-index c4e7c59..c85004e 100644
---- a/drivers/firmware/google/gsmi.c
-+++ b/drivers/firmware/google/gsmi.c
-@@ -718,7 +718,7 @@ static u32 __init hash_oem_table_id(char s[8])
- return local_hash_64(input, 32);
- }
-
--static struct dmi_system_id gsmi_dmi_table[] __initdata = {
-+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
- {
- .ident = "Google Board",
- .matches = {
-diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
-index 2a90ba6..72379aa 100644
---- a/drivers/firmware/google/memconsole.c
-+++ b/drivers/firmware/google/memconsole.c
-@@ -126,7 +126,7 @@ static bool found_memconsole(void)
- return false;
- }
-
--static struct dmi_system_id memconsole_dmi_table[] __initdata = {
-+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
- {
- .ident = "Google Board",
- .matches = {
-@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
- if (!found_memconsole())
- return -ENODEV;
-
-- memconsole_bin_attr.size = memconsole_length;
-+ pax_open_kernel();
-+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
-+ pax_close_kernel();
-
- ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
-
-diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
-index 98723cb..10ca85b 100644
---- a/drivers/gpio/gpio-vr41xx.c
-+++ b/drivers/gpio/gpio-vr41xx.c
-@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
- printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
- maskl, pendl, maskh, pendh);
-
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
-
- return -EINVAL;
- }
-diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 09851ce..4ba7573 100644
---- a/drivers/gpu/drm/drm_crtc.c
-+++ b/drivers/gpu/drm/drm_crtc.c
-@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
- */
- if ((out_resp->count_modes >= mode_count) && mode_count) {
- copied = 0;
-- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
-+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &connector->modes, head) {
- drm_crtc_convert_to_umode(&u_mode, mode);
- if (copy_to_user(mode_ptr + copied,
-@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
-
- if ((out_resp->count_props >= props_count) && props_count) {
- copied = 0;
-- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
-- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
-+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
-+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
-@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
-- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
-+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
-@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
- }
-
- for (i = 0; i < crtc_req->count_connectors; i++) {
-- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
-+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
- if (get_user(out_id, &set_connectors_ptr[i])) {
- ret = -EFAULT;
- goto out;
-@@ -1856,7 +1856,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- fb = obj_to_fb(obj);
-
- num_clips = r->num_clips;
-- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
-+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
-
- if (!num_clips != !clips_ptr) {
- ret = -EINVAL;
-@@ -2282,7 +2282,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
- out_resp->flags = property->flags;
-
- if ((out_resp->count_values >= value_count) && value_count) {
-- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
-+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
-@@ -2295,7 +2295,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
- if (property->flags & DRM_MODE_PROP_ENUM) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
-- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
-+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
-@@ -2303,7 +2303,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
- goto done;
- }
-
-- if (copy_to_user(&enum_ptr[copied].name,
-+ if (copy_to_user(enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
-@@ -2318,7 +2318,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
- if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
- copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
-- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
-+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
-
- list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
- if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
-@@ -2379,7 +2379,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
- struct drm_mode_get_blob *out_resp = data;
- struct drm_property_blob *blob;
- int ret = 0;
-- void *blob_ptr;
-+ void __user *blob_ptr;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-@@ -2393,7 +2393,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
- blob = obj_to_blob(obj);
-
- if (out_resp->length == blob->length) {
-- blob_ptr = (void *)(unsigned long)out_resp->data;
-+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)){
- ret = -EFAULT;
- goto done;
-diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
-index 11788f7..cd469eb 100644
---- a/drivers/gpu/drm/drm_crtc_helper.c
-+++ b/drivers/gpu/drm/drm_crtc_helper.c
-@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *tmp;
- int crtc_mask = 1;
-
-- WARN(!crtc, "checking null crtc?\n");
-+ BUG_ON(!crtc);
-
- dev = crtc->dev;
-
-diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-old mode 100644
-new mode 100755
-index 0731d43..63887a6
---- a/drivers/gpu/drm/drm_drv.c
-+++ b/drivers/gpu/drm/drm_drv.c
-@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
- /**
- * Copy and IOCTL return string to user space
- */
--static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
-+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
- {
- int len;
-
-@@ -378,7 +378,7 @@ long drm_ioctl(struct file *filp,
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev;
- struct drm_ioctl_desc *ioctl;
-- drm_ioctl_t *func;
-+ drm_ioctl_no_const_t func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
-@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
-
- dev = file_priv->minor->dev;
- atomic_inc(&dev->ioctl_count);
-- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
- ++file_priv->ioctl_count;
-
- DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
-diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
-index fb94355..e1fcec5 100644
---- a/drivers/gpu/drm/drm_encoder_slave.c
-+++ b/drivers/gpu/drm/drm_encoder_slave.c
-@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info)
- {
-- char modalias[sizeof(I2C_MODULE_PREFIX)
-- + I2C_NAME_SIZE];
- struct module *module = NULL;
- struct i2c_client *client;
- struct drm_i2c_encoder_driver *encoder_drv;
- int err = 0;
-
-- snprintf(modalias, sizeof(modalias),
-- "%s%s", I2C_MODULE_PREFIX, info->type);
-- request_module(modalias);
-+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
-
- client = i2c_new_device(adap, info);
- if (!client) {
-diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
-index 020b103..68ae292 100644
---- a/drivers/gpu/drm/drm_fops.c
-+++ b/drivers/gpu/drm/drm_fops.c
-@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-- atomic_set(&dev->counts[i], 0);
-+ atomic_set_unchecked(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
-@@ -135,11 +135,11 @@ int drm_open(struct inode *inode, struct file *filp)
-
- retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
-- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
-- if (!dev->open_count++) {
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
-+ if (local_inc_return(&dev->open_count) == 1) {
- retcode = drm_setup(dev);
- if (retcode)
-- dev->open_count--;
-+ local_dec(&dev->open_count);
- }
- }
- if (!retcode) {
-@@ -476,7 +476,7 @@ int drm_release(struct inode *inode, struct file *filp)
-
- mutex_lock(&drm_global_mutex);
-
-- DRM_DEBUG("open_count = %d\n", dev->open_count);
-+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
-
- if (dev->driver->preclose)
- dev->driver->preclose(dev, file_priv);
-@@ -488,7 +488,7 @@ int drm_release(struct inode *inode, struct file *filp)
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->device),
-- dev->open_count);
-+ local_read(&dev->open_count));
-
- /* Release any auth tokens that might point to this file_priv,
- (do that under the drm_global_mutex) */
-@@ -574,8 +574,8 @@ int drm_release(struct inode *inode, struct file *filp)
- * End inline drm_release
- */
-
-- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
-- if (!--dev->open_count) {
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
-+ if (local_dec_and_test(&dev->open_count)) {
- if (atomic_read(&dev->ioctl_count)) {
- DRM_ERROR("Device busy: %d\n",
- atomic_read(&dev->ioctl_count));
-diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
-index c87dc96..326055d 100644
---- a/drivers/gpu/drm/drm_global.c
-+++ b/drivers/gpu/drm/drm_global.c
-@@ -36,7 +36,7 @@
- struct drm_global_item {
- struct mutex mutex;
- void *object;
-- int refcount;
-+ atomic_t refcount;
- };
-
- static struct drm_global_item glob[DRM_GLOBAL_NUM];
-@@ -49,7 +49,7 @@ void drm_global_init(void)
- struct drm_global_item *item = &glob[i];
- mutex_init(&item->mutex);
- item->object = NULL;
-- item->refcount = 0;
-+ atomic_set(&item->refcount, 0);
- }
- }
-
-@@ -59,7 +59,7 @@ void drm_global_release(void)
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- BUG_ON(item->object != NULL);
-- BUG_ON(item->refcount != 0);
-+ BUG_ON(atomic_read(&item->refcount) != 0);
- }
- }
-
-@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
- void *object;
-
- mutex_lock(&item->mutex);
-- if (item->refcount == 0) {
-+ if (atomic_read(&item->refcount) == 0) {
- item->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(item->object == NULL)) {
- ret = -ENOMEM;
-@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
- goto out_err;
-
- }
-- ++item->refcount;
-+ atomic_inc(&item->refcount);
- ref->object = item->object;
- object = item->object;
- mutex_unlock(&item->mutex);
-@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
-- BUG_ON(item->refcount == 0);
-+ BUG_ON(atomic_read(&item->refcount) == 0);
- BUG_ON(ref->object != item->object);
-- if (--item->refcount == 0) {
-+ if (atomic_dec_and_test(&item->refcount)) {
- ref->release(ref);
- item->object = NULL;
- }
-diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
-index ab1162d..42587b2 100644
---- a/drivers/gpu/drm/drm_info.c
-+++ b/drivers/gpu/drm/drm_info.c
-@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
-- /* Hardcoded from _DRM_FRAME_BUFFER,
-- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
-+ static const char * const types[] = {
-+ [_DRM_FRAME_BUFFER] = "FB",
-+ [_DRM_REGISTERS] = "REG",
-+ [_DRM_SHM] = "SHM",
-+ [_DRM_AGP] = "AGP",
-+ [_DRM_SCATTER_GATHER] = "SG",
-+ [_DRM_CONSISTENT] = "PCI",
-+ [_DRM_GEM] = "GEM" };
- const char *type;
- int i;
-
-@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
- map = r_list->map;
- if (!map)
- continue;
-- if (map->type < 0 || map->type > 5)
-+ if (map->type >= ARRAY_SIZE(types))
- type = "??";
- else
- type = types[map->type];
-@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ 0);
-+#else
- vma->vm_pgoff);
-+#endif
-
- #if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
-diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
-index ddd70db..d1db604 100644
---- a/drivers/gpu/drm/drm_ioc32.c
-+++ b/drivers/gpu/drm/drm_ioc32.c
-@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
- request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
- return -EFAULT;
-- list = (struct drm_buf_desc *) (request + 1);
-+ list = (struct drm_buf_desc __user *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
-@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
- request = compat_alloc_user_space(nbytes);
- if (!access_ok(VERIFY_WRITE, request, nbytes))
- return -EFAULT;
-- list = (struct drm_buf_pub *) (request + 1);
-+ list = (struct drm_buf_pub __user *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
-@@ -1015,7 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
- return 0;
- }
-
--drm_ioctl_compat_t *drm_compat_ioctls[] = {
-+drm_ioctl_compat_t drm_compat_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
-@@ -1061,7 +1061,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
- long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn;
- int ret;
-
- /* Assume that ioctls without an explicit compat routine will just
-@@ -1071,10 +1070,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return drm_ioctl(filp, cmd, arg);
-
-- fn = drm_compat_ioctls[nr];
--
-- if (fn != NULL)
-- ret = (*fn) (filp, cmd, arg);
-+ if (drm_compat_ioctls[nr] != NULL)
-+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
-diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
-index 904d7e9..ab88581 100644
---- a/drivers/gpu/drm/drm_ioctl.c
-+++ b/drivers/gpu/drm/drm_ioctl.c
-@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
-- stats->data[i].value = atomic_read(&dev->counts[i]);
-+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
-diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
-index 632ae24..244cf4a 100644
---- a/drivers/gpu/drm/drm_lock.c
-+++ b/drivers/gpu/drm/drm_lock.c
-@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
-- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
-
-@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
- return -EINVAL;
- }
-
-- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
-
- if (drm_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
-diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
-index 0f9ef9b..48bd6956 100644
---- a/drivers/gpu/drm/drm_sysfs.c
-+++ b/drivers/gpu/drm/drm_sysfs.c
-@@ -495,7 +495,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
- int drm_sysfs_device_add(struct drm_minor *minor)
- {
- int err;
-- char *minor_str;
-+ const char *minor_str;
-
- minor->kdev.parent = minor->dev->dev;
-
-diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
-index 8f371e8..9f85d52 100644
---- a/drivers/gpu/drm/i810/i810_dma.c
-+++ b/drivers/gpu/drm/i810/i810_dma.c
-@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
-- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
-+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
-@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
- i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
- mc->last_render);
-
-- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
-+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
-diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
-index c9339f4..f5e1b9d 100644
---- a/drivers/gpu/drm/i810/i810_drv.h
-+++ b/drivers/gpu/drm/i810/i810_drv.h
-@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
- int page_flipping;
-
- wait_queue_head_t irq_queue;
-- atomic_t irq_received;
-- atomic_t irq_emitted;
-+ atomic_unchecked_t irq_received;
-+ atomic_unchecked_t irq_emitted;
-
- int front_offset;
- } drm_i810_private_t;
-diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
-index 9b4e5c6..d7ec240 100644
---- a/drivers/gpu/drm/i915/i915_debugfs.c
-+++ b/drivers/gpu/drm/i915/i915_debugfs.c
-@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
- I915_READ(GTIMR));
- }
- seq_printf(m, "Interrupts received: %d\n",
-- atomic_read(&dev_priv->irq_received));
-+ atomic_read_unchecked(&dev_priv->irq_received));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
-@@ -1234,7 +1234,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
- return ret;
-
- if (opregion->header)
-- seq_write(m, opregion->header, OPREGION_SIZE);
-+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
-
- mutex_unlock(&dev->struct_mutex);
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index ca67338..0003ba7 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
- bool can_switch;
-
- spin_lock(&dev->count_lock);
-- can_switch = (dev->open_count == 0);
-+ can_switch = (local_read(&dev->open_count) == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
- }
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 61274bf..72cb4a2 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -321,7 +321,7 @@ typedef struct drm_i915_private {
- int current_page;
- int page_flipping;
-
-- atomic_t irq_received;
-+ atomic_unchecked_t irq_received;
-
- /* protects the irq masks */
- spinlock_t irq_lock;
-@@ -898,7 +898,7 @@ struct drm_i915_gem_object {
- * will be page flipped away on the next vblank. When it
- * reaches 0, dev_priv->pending_flip_queue will be woken up.
- */
-- atomic_t pending_flip;
-+ atomic_unchecked_t pending_flip;
- };
-
- #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-@@ -1275,7 +1275,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
- extern void intel_teardown_gmbus(struct drm_device *dev);
- extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
- extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
--extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
-+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
- {
- return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
- }
-diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index a0b69ae0..98ea0f3 100644
---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
- i915_gem_clflush_object(obj);
-
- if (obj->base.pending_write_domain)
-- cd->flips |= atomic_read(&obj->pending_flip);
-+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
-@@ -904,9 +904,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
-
- static int
- validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
-- int count)
-+ unsigned int count)
- {
-- int i;
-+ unsigned int i;
- int relocs_total = 0;
- int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
-
-@@ -1373,7 +1373,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
-- (struct drm_i915_relocation_entry __user *)
-+ (struct drm_i915_gem_exec_object2 __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
-diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
-index 13b0289..5c254c1 100644
---- a/drivers/gpu/drm/i915/i915_ioc32.c
-+++ b/drivers/gpu/drm/i915/i915_ioc32.c
-@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
- || __put_user(batchbuffer32.num_cliprects,
- &batchbuffer->num_cliprects)
-- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
-+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
- &batchbuffer->cliprects))
- return -EFAULT;
-
-@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
-
- cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
- if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
-- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
-+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
- &cmdbuffer->buf)
- || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
- || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
- || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
- || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
-- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
-+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
- &cmdbuffer->cliprects))
- return -EFAULT;
-
-@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
- (unsigned long)request);
- }
-
--drm_ioctl_compat_t *i915_compat_ioctls[] = {
-+drm_ioctl_compat_t i915_compat_ioctls[] = {
- [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
- [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
- [DRM_I915_GETPARAM] = compat_i915_getparam,
-@@ -201,17 +201,13 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
- long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
-- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
-- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
--
-- if (fn != NULL)
-- ret = (*fn) (filp, cmd, arg);
-+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
-+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 93e74fb..4a1182d 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- struct drm_i915_master_private *master_priv;
-
-- atomic_inc(&dev_priv->irq_received);
-+ atomic_inc_unchecked(&dev_priv->irq_received);
-
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
-@@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
- struct drm_i915_master_private *master_priv;
- u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
-
-- atomic_inc(&dev_priv->irq_received);
-+ atomic_inc_unchecked(&dev_priv->irq_received);
-
- if (IS_GEN6(dev))
- bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-@@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- int ret = IRQ_NONE, pipe;
- bool blc_event = false;
-
-- atomic_inc(&dev_priv->irq_received);
-+ atomic_inc_unchecked(&dev_priv->irq_received);
-
- iir = I915_READ(IIR);
-
-@@ -1748,7 +1748,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-- atomic_set(&dev_priv->irq_received, 0);
-+ atomic_set_unchecked(&dev_priv->irq_received, 0);
-
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-@@ -1936,7 +1936,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipe;
-
-- atomic_set(&dev_priv->irq_received, 0);
-+ atomic_set_unchecked(&dev_priv->irq_received, 0);
-
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 27999d9..28d110e 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -2215,7 +2215,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
-
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
-- atomic_read(&obj->pending_flip) == 0);
-+ atomic_read_unchecked(&obj->pending_flip) == 0);
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-@@ -6991,8 +6991,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
-
- obj = work->old_fb_obj;
-
-- atomic_clear_mask(1 << intel_crtc->plane,
-- &obj->pending_flip.counter);
-+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
-
- wake_up(&dev_priv->pending_flip_queue);
- schedule_work(&work->work);
-@@ -7201,7 +7200,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
- OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
-
-- pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
-+ /* Contrary to the suggestions in the documentation,
-+ * "Enable Panel Fitter" does not seem to be required when page
-+ * flipping with a non-native mode, and worse causes a normal
-+ * modeset to fail.
-+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
-+ */
-+ pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
-
-@@ -7347,7 +7352,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
-- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
- if (ret)
-@@ -7361,7 +7366,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
- return 0;
-
- cleanup_pending:
-- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- crtc->fb = old_fb;
- drm_gem_object_unreference(&work->old_fb_obj->base);
- drm_gem_object_unreference(&obj->base);
-@@ -7496,11 +7501,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
- if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
-- intel_helper_funcs.prepare = ironlake_crtc_prepare;
-- intel_helper_funcs.commit = ironlake_crtc_commit;
-+ pax_open_kernel();
-+ *(void **)&intel_helper_funcs.prepare = ironlake_crtc_prepare;
-+ *(void **)&intel_helper_funcs.commit = ironlake_crtc_commit;
-+ pax_close_kernel();
- } else {
-- intel_helper_funcs.prepare = i9xx_crtc_prepare;
-- intel_helper_funcs.commit = i9xx_crtc_commit;
-+ pax_open_kernel();
-+ *(void **)&intel_helper_funcs.prepare = i9xx_crtc_prepare;
-+ *(void **)&intel_helper_funcs.commit = i9xx_crtc_commit;
-+ pax_close_kernel();
- }
-
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-@@ -8876,7 +8885,7 @@ struct intel_quirk {
- int subsystem_vendor;
- int subsystem_device;
- void (*hook)(struct drm_device *dev);
--};
-+} __do_const;
-
- /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
- struct intel_dmi_quirk {
-diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
-index 54558a0..2d97005 100644
---- a/drivers/gpu/drm/mga/mga_drv.h
-+++ b/drivers/gpu/drm/mga/mga_drv.h
-@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
- u32 clear_cmd;
- u32 maccess;
-
-- atomic_t vbl_received; /**< Number of vblanks received. */
-+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
- wait_queue_head_t fence_queue;
-- atomic_t last_fence_retired;
-+ atomic_unchecked_t last_fence_retired;
- u32 next_fence_to_post;
-
- unsigned int fb_cpp;
-diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
-index c1f877b..d5fc7ad 100644
---- a/drivers/gpu/drm/mga/mga_ioc32.c
-+++ b/drivers/gpu/drm/mga/mga_ioc32.c
-@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
- return 0;
- }
-
--drm_ioctl_compat_t *mga_compat_ioctls[] = {
-+drm_ioctl_compat_t mga_compat_ioctls[] = {
- [DRM_MGA_INIT] = compat_mga_init,
- [DRM_MGA_GETPARAM] = compat_mga_getparam,
- [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
-@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
- long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
-- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
-- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
--
-- if (fn != NULL)
-- ret = (*fn) (filp, cmd, arg);
-+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
-+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
-diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
-index 2581202..f230a8d9 100644
---- a/drivers/gpu/drm/mga/mga_irq.c
-+++ b/drivers/gpu/drm/mga/mga_irq.c
-@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
- if (crtc != 0)
- return 0;
-
-- return atomic_read(&dev_priv->vbl_received);
-+ return atomic_read_unchecked(&dev_priv->vbl_received);
- }
-
-
-@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
- /* VBLANK interrupt */
- if (status & MGA_VLINEPEN) {
- MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
-- atomic_inc(&dev_priv->vbl_received);
-+ atomic_inc_unchecked(&dev_priv->vbl_received);
- drm_handle_vblank(dev, 0);
- handled = 1;
- }
-@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
- if ((prim_start & ~0x03) != (prim_end & ~0x03))
- MGA_WRITE(MGA_PRIMEND, prim_end);
-
-- atomic_inc(&dev_priv->last_fence_retired);
-+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
- DRM_WAKEUP(&dev_priv->fence_queue);
- handled = 1;
- }
-@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
- * using fences.
- */
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
-- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
-+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
- - *sequence) <= (1 << 23)));
-
- *sequence = cur_fence;
-diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
-index 5fc201b..20b6980 100644
---- a/drivers/gpu/drm/nouveau/nouveau_bios.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
-@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
- struct bit_table {
- const char id;
- int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
--};
-+} __no_const;
-
- #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
-
-diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
-index 4c0be3a..8f2cbb5 100644
---- a/drivers/gpu/drm/nouveau/nouveau_drv.h
-+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
-@@ -238,7 +238,7 @@ struct nouveau_channel {
- struct list_head pending;
- uint32_t sequence;
- uint32_t sequence_ack;
-- atomic_t last_sequence_irq;
-+ atomic_unchecked_t last_sequence_irq;
- struct nouveau_vma vma;
- } fence;
-
-@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
- u32 handle, u16 class);
- void (*set_tile_region)(struct drm_device *dev, int i);
- void (*tlb_flush)(struct drm_device *, int engine);
--};
-+} __no_const;
-
- struct nouveau_instmem_engine {
- void *priv;
-@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
- struct nouveau_mc_engine {
- int (*init)(struct drm_device *dev);
- void (*takedown)(struct drm_device *dev);
--};
-+} __no_const;
-
- struct nouveau_timer_engine {
- int (*init)(struct drm_device *dev);
- void (*takedown)(struct drm_device *dev);
- uint64_t (*read)(struct drm_device *dev);
--};
-+} __no_const;
-
- struct nouveau_fb_engine {
- int num_tiles;
-@@ -706,7 +706,7 @@ struct drm_nouveau_private {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_bo_device bdev;
-- atomic_t validate_sequence;
-+ atomic_unchecked_t validate_sequence;
- } ttm;
-
- struct {
-diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
-index 2f6daae..c9d7b9e 100644
---- a/drivers/gpu/drm/nouveau/nouveau_fence.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
-@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
-- sequence = atomic_read(&chan->fence.last_sequence_irq);
-+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
-
- if (chan->fence.sequence_ack == sequence)
- goto out;
-@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
- return ret;
- }
-
-- atomic_set(&chan->fence.last_sequence_irq, 0);
-+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
- return 0;
- }
-
-diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
-index bd0b1fc..c082986 100644
---- a/drivers/gpu/drm/nouveau/nouveau_gem.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
-@@ -315,7 +315,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
- int trycnt = 0;
- int ret, i;
-
-- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
-+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
- retry:
- if (++trycnt > 100000) {
- NV_ERROR(dev, "%s failed and gave up.\n", __func__);
-diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
-index 475ba81..a6c530c 100644
---- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
-@@ -51,7 +51,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn = NULL;
-+ drm_ioctl_compat_t fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
-diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
-index 36bec48..3a128f3 100644
---- a/drivers/gpu/drm/nouveau/nouveau_mem.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
-@@ -812,11 +812,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
- }
-
- const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-- nouveau_vram_manager_init,
-- nouveau_vram_manager_fini,
-- nouveau_vram_manager_new,
-- nouveau_vram_manager_del,
-- nouveau_vram_manager_debug
-+ .init = nouveau_vram_manager_init,
-+ .takedown = nouveau_vram_manager_fini,
-+ .get_node = nouveau_vram_manager_new,
-+ .put_node = nouveau_vram_manager_del,
-+ .debug = nouveau_vram_manager_debug
- };
-
- static int
-@@ -869,9 +869,9 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
- }
-
- const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-- nouveau_gart_manager_init,
-- nouveau_gart_manager_fini,
-- nouveau_gart_manager_new,
-- nouveau_gart_manager_del,
-- nouveau_gart_manager_debug
-+ .init = nouveau_gart_manager_init,
-+ .takedown = nouveau_gart_manager_fini,
-+ .get_node = nouveau_gart_manager_new,
-+ .put_node = nouveau_gart_manager_del,
-+ .debug = nouveau_gart_manager_debug
- };
-diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
-index 01adcfb..c6726fe 100644
---- a/drivers/gpu/drm/nouveau/nouveau_state.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
-@@ -544,7 +544,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
- bool can_switch;
-
- spin_lock(&dev->count_lock);
-- can_switch = (dev->open_count == 0);
-+ can_switch = (local_read(&dev->open_count) == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
- }
-diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
-index dbdea8e..cd6eeeb 100644
---- a/drivers/gpu/drm/nouveau/nv04_graph.c
-+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
-@@ -554,7 +554,7 @@ static int
- nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
- {
-- atomic_set(&chan->fence.last_sequence_irq, data);
-+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
- return 0;
- }
-
-diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
-index bcac90b..53bfc76 100644
---- a/drivers/gpu/drm/r128/r128_cce.c
-+++ b/drivers/gpu/drm/r128/r128_cce.c
-@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
-
- /* GH: Simple idle check.
- */
-- atomic_set(&dev_priv->idle_count, 0);
-+ atomic_set_unchecked(&dev_priv->idle_count, 0);
-
- /* We don't support anything other than bus-mastering ring mode,
- * but the ring can be in either AGP or PCI space for the ring
-diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
-index 930c71b..499aded 100644
---- a/drivers/gpu/drm/r128/r128_drv.h
-+++ b/drivers/gpu/drm/r128/r128_drv.h
-@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
- int is_pci;
- unsigned long cce_buffers_offset;
-
-- atomic_t idle_count;
-+ atomic_unchecked_t idle_count;
-
- int page_flipping;
- int current_page;
- u32 crtc_offset;
- u32 crtc_offset_cntl;
-
-- atomic_t vbl_received;
-+ atomic_unchecked_t vbl_received;
-
- u32 color_fmt;
- unsigned int front_offset;
-diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
-index 51c99fc..bae6026 100644
---- a/drivers/gpu/drm/r128/r128_ioc32.c
-+++ b/drivers/gpu/drm/r128/r128_ioc32.c
-@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
- return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
- }
-
--drm_ioctl_compat_t *r128_compat_ioctls[] = {
-+drm_ioctl_compat_t r128_compat_ioctls[] = {
- [DRM_R128_INIT] = compat_r128_init,
- [DRM_R128_DEPTH] = compat_r128_depth,
- [DRM_R128_STIPPLE] = compat_r128_stipple,
-@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
- long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
-- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
-- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
--
-- if (fn != NULL)
-- ret = (*fn) (filp, cmd, arg);
-+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
-+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
-diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
-index 429d5a0..7e899ed 100644
---- a/drivers/gpu/drm/r128/r128_irq.c
-+++ b/drivers/gpu/drm/r128/r128_irq.c
-@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
- if (crtc != 0)
- return 0;
-
-- return atomic_read(&dev_priv->vbl_received);
-+ return atomic_read_unchecked(&dev_priv->vbl_received);
- }
-
- irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
-@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
- /* VBLANK interrupt */
- if (status & R128_CRTC_VBLANK_INT) {
- R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-- atomic_inc(&dev_priv->vbl_received);
-+ atomic_inc_unchecked(&dev_priv->vbl_received);
- drm_handle_vblank(dev, 0);
- return IRQ_HANDLED;
- }
-diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
-index a9e33ce6..09edd4b 100644
---- a/drivers/gpu/drm/r128/r128_state.c
-+++ b/drivers/gpu/drm/r128/r128_state.c
-@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
-
- static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
- {
-- if (atomic_read(&dev_priv->idle_count) == 0)
-+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
- r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
- else
-- atomic_set(&dev_priv->idle_count, 0);
-+ atomic_set_unchecked(&dev_priv->idle_count, 0);
- }
-
- #endif
-diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
-index 5d78973..2c1fe27 100644
---- a/drivers/gpu/drm/radeon/evergreen.c
-+++ b/drivers/gpu/drm/radeon/evergreen.c
-@@ -3098,7 +3098,9 @@ static int evergreen_startup(struct radeon_device *rdev)
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
-- rdev->asic->copy = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->copy = NULL;
-+ pax_close_kernel();
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
-diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
-index 5a82b6b..9e69c73 100644
---- a/drivers/gpu/drm/radeon/mkregtable.c
-+++ b/drivers/gpu/drm/radeon/mkregtable.c
-@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
- regex_t mask_rex;
- regmatch_t match[4];
- char buf[1024];
-- size_t end;
-+ long end;
- int len;
- int done = 0;
- int r;
- unsigned o;
- struct offset *offset;
- char last_reg_s[10];
-- int last_reg;
-+ unsigned long last_reg;
-
- if (regcomp
- (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
-diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
-index 77e6fb1..43e5aae 100644
---- a/drivers/gpu/drm/radeon/ni.c
-+++ b/drivers/gpu/drm/radeon/ni.c
-@@ -1380,7 +1380,9 @@ static int cayman_startup(struct radeon_device *rdev)
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
-- rdev->asic->copy = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->copy = NULL;
-+ pax_close_kernel();
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
-diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
-index dfd1267..e7251ec 100644
---- a/drivers/gpu/drm/radeon/r100.c
-+++ b/drivers/gpu/drm/radeon/r100.c
-@@ -592,8 +592,10 @@ int r100_pci_gart_init(struct radeon_device *rdev)
- if (r)
- return r;
- rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
-- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-+ *(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-+ pax_close_kernel();
- return radeon_gart_table_ram_alloc(rdev);
- }
-
-diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
-index 441570b..8896094 100644
---- a/drivers/gpu/drm/radeon/r300.c
-+++ b/drivers/gpu/drm/radeon/r300.c
-@@ -105,8 +105,10 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
- if (r)
- DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
- rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
-- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-+ *(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-+ pax_close_kernel();
- return radeon_gart_table_vram_alloc(rdev);
- }
-
-diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
-index e5299a0..eb205af 100644
---- a/drivers/gpu/drm/radeon/r600.c
-+++ b/drivers/gpu/drm/radeon/r600.c
-@@ -2442,7 +2442,9 @@ int r600_startup(struct radeon_device *rdev)
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
-- rdev->asic->copy = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->copy = NULL;
-+ pax_close_kernel();
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
-diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
-index cb1acff..8861bc5 100644
---- a/drivers/gpu/drm/radeon/r600_cs.c
-+++ b/drivers/gpu/drm/radeon/r600_cs.c
-@@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
- h0 = G_038004_TEX_HEIGHT(word1) + 1;
- d0 = G_038004_TEX_DEPTH(word1);
- nfaces = 1;
-+ array = 0;
- switch (G_038000_DIM(word0)) {
- case V_038000_SQ_TEX_DIM_1D:
- case V_038000_SQ_TEX_DIM_2D:
-diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index 28e69e9..c4a82cc 100644
---- a/drivers/gpu/drm/radeon/radeon.h
-+++ b/drivers/gpu/drm/radeon/radeon.h
-@@ -177,7 +177,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
- */
- struct radeon_fence_driver {
- uint32_t scratch_reg;
-- atomic_t seq;
-+ atomic_unchecked_t seq;
- uint32_t last_seq;
- unsigned long last_jiffies;
- unsigned long last_timeout;
-diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
-index a2e1eae..8e4a0ec 100644
---- a/drivers/gpu/drm/radeon/radeon_asic.c
-+++ b/drivers/gpu/drm/radeon/radeon_asic.c
-@@ -114,13 +114,17 @@ void radeon_agp_disable(struct radeon_device *rdev)
- rdev->family == CHIP_R423) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
-- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-+ *(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-+ pax_close_kernel();
- } else {
- DRM_INFO("Forcing AGP to PCI mode\n");
- rdev->flags |= RADEON_IS_PCI;
-- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-+ *(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-+ pax_close_kernel();
- }
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- }
-@@ -974,10 +978,12 @@ int radeon_asic_init(struct radeon_device *rdev)
- rdev->asic = &r420_asic;
- /* handle macs */
- if (rdev->bios == NULL) {
-- rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
-- rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
-- rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
-- rdev->asic->set_memory_clock = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
-+ *(void **)&rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
-+ *(void **)&rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
-+ *(void **)&rdev->asic->set_memory_clock = NULL;
-+ pax_close_kernel();
- }
- break;
- case CHIP_RS400:
-@@ -1057,8 +1063,10 @@ int radeon_asic_init(struct radeon_device *rdev)
- }
-
- if (rdev->flags & RADEON_IS_IGP) {
-- rdev->asic->get_memory_clock = NULL;
-- rdev->asic->set_memory_clock = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->get_memory_clock = NULL;
-+ *(void **)&rdev->asic->set_memory_clock = NULL;
-+ pax_close_kernel();
- }
-
- return 0;
-diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 8cde84b..0d3f11f 100644
---- a/drivers/gpu/drm/radeon/radeon_device.c
-+++ b/drivers/gpu/drm/radeon/radeon_device.c
-@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
- bool can_switch;
-
- spin_lock(&dev->count_lock);
-- can_switch = (dev->open_count == 0);
-+ can_switch = (local_read(&dev->open_count) == 0);
- spin_unlock(&dev->count_lock);
- return can_switch;
- }
-diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
-index a1b59ca..86f2d44 100644
---- a/drivers/gpu/drm/radeon/radeon_drv.h
-+++ b/drivers/gpu/drm/radeon/radeon_drv.h
-@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
-
- /* SW interrupt */
- wait_queue_head_t swi_queue;
-- atomic_t swi_emitted;
-+ atomic_unchecked_t swi_emitted;
- int vblank_crtc;
- uint32_t irq_enable_reg;
- uint32_t r500_disp_irq_reg;
-diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
-index 76ec0e9..6feb1a3 100644
---- a/drivers/gpu/drm/radeon/radeon_fence.c
-+++ b/drivers/gpu/drm/radeon/radeon_fence.c
-@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- return 0;
- }
-- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
-+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready)
- /* FIXME: cp is not running assume everythings is done right
- * away
-@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
- return r;
- }
- radeon_fence_write(rdev, 0);
-- atomic_set(&rdev->fence_drv.seq, 0);
-+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv.created);
- INIT_LIST_HEAD(&rdev->fence_drv.emited);
- INIT_LIST_HEAD(&rdev->fence_drv.signaled);
-diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
-index 48b7cea..17695c1 100644
---- a/drivers/gpu/drm/radeon/radeon_ioc32.c
-+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
-@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
-- || __put_user((void __user *)(unsigned long)req32.value,
-+ || __put_user((unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
-@@ -369,7 +369,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
- #define compat_radeon_cp_setparam NULL
- #endif /* X86_64 || IA64 */
-
--drm_ioctl_compat_t *radeon_compat_ioctls[] = {
-+drm_ioctl_compat_t radeon_compat_ioctls[] = {
- [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
- [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
- [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
-@@ -394,17 +394,13 @@ drm_ioctl_compat_t *radeon_compat_ioctls[] = {
- long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
- {
- unsigned int nr = DRM_IOCTL_NR(cmd);
-- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
-- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
-- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
--
-- if (fn != NULL)
-- ret = (*fn) (filp, cmd, arg);
-+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
-+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
-diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
-index 00da384..32f972d 100644
---- a/drivers/gpu/drm/radeon/radeon_irq.c
-+++ b/drivers/gpu/drm/radeon/radeon_irq.c
-@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
- unsigned int ret;
- RING_LOCALS;
-
-- atomic_inc(&dev_priv->swi_emitted);
-- ret = atomic_read(&dev_priv->swi_emitted);
-+ atomic_inc_unchecked(&dev_priv->swi_emitted);
-+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
-
- BEGIN_RING(4);
- OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
-@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
- drm_radeon_private_t *dev_priv =
- (drm_radeon_private_t *) dev->dev_private;
-
-- atomic_set(&dev_priv->swi_emitted, 0);
-+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
- DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
-
- dev->max_vblank_count = 0x001fffff;
-diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
-index 65be5e8..578b3c2 100644
---- a/drivers/gpu/drm/radeon/radeon_ring.c
-+++ b/drivers/gpu/drm/radeon/radeon_ring.c
-@@ -487,16 +487,20 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev)
- unsigned i;
- int r;
-
-- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
-+ pax_open_kernel();
-+ *(void **)&radeon_debugfs_ib_bogus_info_list[0].data = rdev;
-+ pax_close_kernel();
- r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
- if (r)
- return r;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
-- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
-- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
-- radeon_debugfs_ib_list[i].driver_features = 0;
-- radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
-+ pax_open_kernel();
-+ *(void **)&radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
-+ *(void **)&radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
-+ *(u32 *)&radeon_debugfs_ib_list[i].driver_features = 0;
-+ *(void **)&radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
-+ pax_close_kernel();
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
-diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
-index e8422ae..d22d4a8 100644
---- a/drivers/gpu/drm/radeon/radeon_state.c
-+++ b/drivers/gpu/drm/radeon/radeon_state.c
-@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
- if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
-
-- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
-+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
- sarea_priv->nbox * sizeof(depth_boxes[0])))
- return -EFAULT;
-
-@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
- {
- drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_getparam_t *param = data;
-- int value;
-+ int value = 0;
-
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
-
-diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index 0b5468b..74cfb87 100644
---- a/drivers/gpu/drm/radeon/radeon_ttm.c
-+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
-@@ -631,7 +631,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
- man->size = size >> PAGE_SHIFT;
- }
-
--static struct vm_operations_struct radeon_ttm_vm_ops;
-+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
- static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
- static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
- }
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
-+ pax_open_kernel();
- radeon_ttm_vm_ops = *ttm_vm_ops;
- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
-+ pax_close_kernel();
- }
- vma->vm_ops = &radeon_ttm_vm_ops;
- return 0;
-@@ -820,30 +822,25 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
- static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
- {
- #if defined(CONFIG_DEBUG_FS)
-- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
-- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
-- unsigned i;
-+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1] = {
-+ {
-+ .name = "radeon_vram_mm",
-+ .show = &radeon_mm_dump_table,
-+ },
-+ {
-+ .name = "radeon_gtt_mm",
-+ .show = &radeon_mm_dump_table,
-+ },
-+ {
-+ .name = "ttm_page_pool",
-+ .show = &ttm_page_alloc_debugfs,
-+ },
-+ };
-
-- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
-- if (i == 0)
-- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
-- else
-- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
-- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
-- radeon_mem_types_list[i].driver_features = 0;
-- if (i == 0)
-- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
-- else
-- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
--
-- }
-- /* Add ttm page pool to debugfs */
-- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
-- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
-- radeon_mem_types_list[i].driver_features = 0;
-- radeon_mem_types_list[i].data = NULL;
-+ pax_open_kernel();
-+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
-+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
-+ pax_close_kernel();
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
-
- #endif
-diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
-index 414a681..2b17775 100644
---- a/drivers/gpu/drm/radeon/rs690.c
-+++ b/drivers/gpu/drm/radeon/rs690.c
-@@ -314,9 +314,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
- if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
- rdev->pm.sideport_bandwidth.full)
- rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
-- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
-+ read_delay_latency.full = dfixed_const(800 * 1000);
- read_delay_latency.full = dfixed_div(read_delay_latency,
- rdev->pm.igp_sideport_mclk);
-+ a.full = dfixed_const(370);
-+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
- } else {
- if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
- rdev->pm.k8_bandwidth.full)
-diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
-index 3e72074..9fbe45b 100644
---- a/drivers/gpu/drm/radeon/rv770.c
-+++ b/drivers/gpu/drm/radeon/rv770.c
-@@ -1083,7 +1083,9 @@ static int rv770_startup(struct radeon_device *rdev)
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
-- rdev->asic->copy = NULL;
-+ pax_open_kernel();
-+ *(void **)&rdev->asic->copy = NULL;
-+ pax_close_kernel();
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
-diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
-index 038e947..4ae87f0 100644
---- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
-+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
-@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- }
-
- const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
-- ttm_bo_man_init,
-- ttm_bo_man_takedown,
-- ttm_bo_man_get_node,
-- ttm_bo_man_put_node,
-- ttm_bo_man_debug
-+ .init = ttm_bo_man_init,
-+ .takedown = ttm_bo_man_takedown,
-+ .get_node = ttm_bo_man_get_node,
-+ .put_node = ttm_bo_man_put_node,
-+ .debug = ttm_bo_man_debug
- };
- EXPORT_SYMBOL(ttm_bo_manager_func);
-diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
-index e70ddd8..ddfa1cd 100644
---- a/drivers/gpu/drm/ttm/ttm_memory.c
-+++ b/drivers/gpu/drm/ttm/ttm_memory.c
-@@ -263,7 +263,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
- zone->glob = glob;
- glob->zone_kernel = zone;
- ret = kobject_init_and_add(
-- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
-+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
-@@ -346,7 +346,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
- zone->glob = glob;
- glob->zone_dma32 = zone;
- ret = kobject_init_and_add(
-- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
-+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
- if (unlikely(ret != 0)) {
- kobject_put(&zone->kobj);
- return ret;
-diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-index 508c64c..03018ec 100644
---- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
-+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-@@ -51,7 +51,7 @@
-
- #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
- #define SMALL_ALLOCATION 16
--#define FREE_ALL_PAGES (~0U)
-+#define FREE_ALL_PAGES (~0UL)
- /* times are in msecs */
- #define PAGE_FREE_INTERVAL 1000
-
-@@ -301,13 +301,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
- * @pool: to free the pages from
- * @free_all: If set to true will free all pages in pool
- **/
--static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
-+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free)
- {
- unsigned long irq_flags;
- struct page *p;
- struct page **pages_to_free;
-- unsigned freed_pages = 0,
-- npages_to_free = nr_free;
-+ unsigned long freed_pages = 0, npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-@@ -369,7 +368,8 @@ restart:
- __list_del(&p->lru, &pool->list);
-
- ttm_pool_update_free_locked(pool, freed_pages);
-- nr_free -= freed_pages;
-+ if (likely(nr_free != FREE_ALL_PAGES))
-+ nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-@@ -403,7 +403,7 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
- unsigned i;
- unsigned pool_offset;
- struct ttm_page_pool *pool;
-- int shrink_pages = sc->nr_to_scan;
-+ unsigned long shrink_pages = sc->nr_to_scan;
-
- if (shrink_pages == 0)
- goto out;
-@@ -412,7 +412,7 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
- pool_offset = ++start_pool % NUM_POOLS;
- /* select start pool in round robin fashion */
- for (i = 0; i < NUM_POOLS; ++i) {
-- unsigned nr_free = shrink_pages;
-+ unsigned long nr_free = shrink_pages;
- if (shrink_pages == 0)
- break;
- pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
-@@ -744,7 +744,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
- }
-
- /* Put all pages in pages list to correct pool to wait for reuse */
--void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
-+void ttm_put_pages(struct list_head *pages, unsigned long page_count, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
- {
- unsigned long irq_flags;
-diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
-index f9cc548..92bad48 100644
---- a/drivers/gpu/drm/ttm/ttm_tt.c
-+++ b/drivers/gpu/drm/ttm/ttm_tt.c
-@@ -281,7 +281,7 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
- static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
- {
- int i;
-- unsigned count = 0;
-+ unsigned long count = 0;
- struct list_head h;
- struct page *cur_page;
- struct ttm_backend *be = ttm->be;
-diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
-index 9cf87d9..2000b7d 100644
---- a/drivers/gpu/drm/via/via_drv.h
-+++ b/drivers/gpu/drm/via/via_drv.h
-@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
- typedef uint32_t maskarray_t[5];
-
- typedef struct drm_via_irq {
-- atomic_t irq_received;
-+ atomic_unchecked_t irq_received;
- uint32_t pending_mask;
- uint32_t enable_mask;
- wait_queue_head_t irq_queue;
-@@ -75,7 +75,7 @@ typedef struct drm_via_private {
- struct timeval last_vblank;
- int last_vblank_valid;
- unsigned usec_per_vblank;
-- atomic_t vbl_received;
-+ atomic_unchecked_t vbl_received;
- drm_via_state_t hc_state;
- char pci_buf[VIA_PCI_BUF_SIZE];
- const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
-diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
-index d391f48..10c8ca3 100644
---- a/drivers/gpu/drm/via/via_irq.c
-+++ b/drivers/gpu/drm/via/via_irq.c
-@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
- if (crtc != 0)
- return 0;
-
-- return atomic_read(&dev_priv->vbl_received);
-+ return atomic_read_unchecked(&dev_priv->vbl_received);
- }
-
- irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
-@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
-
- status = VIA_READ(VIA_REG_INTERRUPT);
- if (status & VIA_IRQ_VBLANK_PENDING) {
-- atomic_inc(&dev_priv->vbl_received);
-- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
-+ atomic_inc_unchecked(&dev_priv->vbl_received);
-+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
- do_gettimeofday(&cur_vblank);
- if (dev_priv->last_vblank_valid) {
- dev_priv->usec_per_vblank =
-@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
- dev_priv->last_vblank = cur_vblank;
- dev_priv->last_vblank_valid = 1;
- }
-- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
-+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
- DRM_DEBUG("US per vblank is: %u\n",
- dev_priv->usec_per_vblank);
- }
-@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- if (status & cur_irq->pending_mask) {
-- atomic_inc(&cur_irq->irq_received);
-+ atomic_inc_unchecked(&cur_irq->irq_received);
- DRM_WAKEUP(&cur_irq->irq_queue);
- handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
-@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
- ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
- masks[irq][4]));
-- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
-+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
- } else {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
- (((cur_irq_sequence =
-- atomic_read(&cur_irq->irq_received)) -
-+ atomic_read_unchecked(&cur_irq->irq_received)) -
- *sequence) <= (1 << 23)));
- }
- *sequence = cur_irq_sequence;
-@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
-- atomic_set(&cur_irq->irq_received, 0);
-+ atomic_set_unchecked(&cur_irq->irq_received, 0);
- cur_irq->enable_mask = dev_priv->irq_masks[i][0];
- cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
-@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
- switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
- case VIA_IRQ_RELATIVE:
- irqwait->request.sequence +=
-- atomic_read(&cur_irq->irq_received);
-+ atomic_read_unchecked(&cur_irq->irq_received);
- irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- case VIA_IRQ_ABSOLUTE:
- break;
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-index 0e3fa7d..35f9ed6 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-@@ -260,7 +260,7 @@ struct vmw_private {
- * Fencing and IRQs.
- */
-
-- atomic_t marker_seq;
-+ atomic_unchecked_t marker_seq;
- wait_queue_head_t fence_queue;
- wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
-index decca82..7968bc5 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
-@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
- (unsigned int) min,
- (unsigned int) fifo->capabilities);
-
-- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
-+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
- vmw_marker_queue_init(&fifo->marker_queue);
- return vmw_fifo_send_fence(dev_priv, &dummy);
-@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
- if (reserveable)
- iowrite32(bytes, fifo_mem +
- SVGA_FIFO_RESERVED);
-- return fifo_mem + (next_cmd >> 2);
-+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
- } else {
- need_bounce = true;
- }
-@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
-
- fm = vmw_fifo_reserve(dev_priv, bytes);
- if (unlikely(fm == NULL)) {
-- *seqno = atomic_read(&dev_priv->marker_seq);
-+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
- ret = -ENOMEM;
- (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
- false, 3*HZ);
-@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
- }
-
- do {
-- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
-+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
- } while (*seqno == 0);
-
- if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
-index 5f71715..e40c528 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
-@@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
- }
-
- const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
-- vmw_gmrid_man_init,
-- vmw_gmrid_man_takedown,
-- vmw_gmrid_man_get_node,
-- vmw_gmrid_man_put_node,
-- vmw_gmrid_man_debug
-+ .init = vmw_gmrid_man_init,
-+ .takedown = vmw_gmrid_man_takedown,
-+ .get_node = vmw_gmrid_man_get_node,
-+ .put_node = vmw_gmrid_man_put_node,
-+ .debug = vmw_gmrid_man_debug
- };
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
-index 66917c6..2dcc8ae 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
-@@ -135,7 +135,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
- int ret;
-
- num_clips = arg->num_clips;
-- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
-+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
-
- if (unlikely(num_clips == 0))
- return 0;
-@@ -221,7 +221,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
- int ret;
-
- num_clips = arg->num_clips;
-- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
-+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
-
- if (unlikely(num_clips == 0))
- return 0;
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
-index cabc95f..14b3d77 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
-@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
- * emitted. Then the fence is stale and signaled.
- */
-
-- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
-+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
- > VMW_FENCE_WRAP);
-
- return ret;
-@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
-
- if (fifo_idle)
- down_read(&fifo_state->rwsem);
-- signal_seq = atomic_read(&dev_priv->marker_seq);
-+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
- ret = 0;
-
- for (;;) {
-diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
-index 8a8725c2..afed796 100644
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
-@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
- while (!vmw_lag_lt(queue, us)) {
- spin_lock(&queue->lock);
- if (list_empty(&queue->head))
-- seqno = atomic_read(&dev_priv->marker_seq);
-+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
- else {
- marker = list_first_entry(&queue->head,
- struct vmw_marker, head);
-diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 64d79d2..6c83020 100644
---- a/drivers/hid/hid-core.c
-+++ b/drivers/hid/hid-core.c
-@@ -2112,7 +2112,7 @@ static bool hid_ignore(struct hid_device *hdev)
-
- int hid_add_device(struct hid_device *hdev)
- {
-- static atomic_t id = ATOMIC_INIT(0);
-+ static atomic_unchecked_t id = ATOMIC_INIT(0);
- int ret;
-
- if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2127,7 +2127,7 @@ int hid_add_device(struct hid_device *hdev)
- /* XXX hack, any other cleaner solution after the driver core
- * is converted to allow more than 20 bytes as the device name? */
- dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
-- hdev->vendor, hdev->product, atomic_inc_return(&id));
-+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
-
- hid_debug_register(hdev, dev_name(&hdev->dev));
- ret = device_add(&hdev->dev);
-diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
-index 488a21a..ab0a415 100644
---- a/drivers/hid/hid-logitech-dj.c
-+++ b/drivers/hid/hid-logitech-dj.c
-@@ -705,6 +705,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
- * device (via hid_input_report() ) and return 1 so hid-core does not do
- * anything else with it.
- */
-+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
-+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
-+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
-+ __func__, dj_report->device_index);
-+ return false;
-+ }
-
- /* case 1) */
- if (data[0] != REPORT_ID_DJ_SHORT)
-diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
-index a605ba1..fb91952 100644
---- a/drivers/hid/hid-multitouch.c
-+++ b/drivers/hid/hid-multitouch.c
-@@ -195,6 +195,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
- td->inputmode = field->report->id;
- break;
- case HID_DG_CONTACTMAX:
-+ /* Ignore if value count is out of bounds. */
-+ if (field->report_count < 1)
-+ break;
- td->maxcontacts = field->value[0];
- if (td->mtclass->maxcontacts)
- /* check if the maxcontacts is given by the class */
-@@ -506,7 +509,6 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
- if (field->index == td->last_field_index
- && td->num_received >= td->num_expected)
- mt_emit_event(td, field->hidinput->input);
--
- }
-
- /* we have handled the hidinput part, now remains hiddev */
-diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
-index 4ef02b2..8a96831 100644
---- a/drivers/hid/usbhid/hiddev.c
-+++ b/drivers/hid/usbhid/hiddev.c
-@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- break;
-
- case HIDIOCAPPLICATION:
-- if (arg < 0 || arg >= hid->maxapplication)
-+ if (arg >= hid->maxapplication)
- break;
-
- for (i = 0; i < hid->maxcollection; i++)
-diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 44a1ea4..21cce84 100644
---- a/drivers/hv/channel.c
-+++ b/drivers/hv/channel.c
-@@ -403,8 +403,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- unsigned long flags;
- int ret = 0;
-
-- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
-- atomic_inc(&vmbus_connection.next_gpadl_handle);
-+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
-+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
-
- ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
- if (ret)
-diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
-index 17ed6fb..82e91a7 100644
---- a/drivers/hv/hv.c
-+++ b/drivers/hv/hv.c
-@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
- u64 output_address = (output) ? virt_to_phys(output) : 0;
- u32 output_address_hi = output_address >> 32;
- u32 output_address_lo = output_address & 0xFFFFFFFF;
-- void *hypercall_page = hv_context.hypercall_page;
-+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
-
- __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
- "=a"(hv_status_lo) : "d" (control_hi),
-@@ -178,7 +178,7 @@ int hv_init(void)
- /* See if the hypercall page is already set */
- rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-
-- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
-+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
-
- if (!virtaddr)
- goto cleanup;
-diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
-index be2f3af..9911b09 100644
---- a/drivers/hv/hyperv_vmbus.h
-+++ b/drivers/hv/hyperv_vmbus.h
-@@ -560,7 +560,7 @@ enum vmbus_connect_state {
- struct vmbus_connection {
- enum vmbus_connect_state conn_state;
-
-- atomic_t next_gpadl_handle;
-+ atomic_unchecked_t next_gpadl_handle;
-
- /*
- * Represents channel interrupts. Each bit position represents a
-diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
-index f58067f..ed59814 100644
---- a/drivers/hv/vmbus_drv.c
-+++ b/drivers/hv/vmbus_drv.c
-@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
- {
- int ret = 0;
-
-- static atomic_t device_num = ATOMIC_INIT(0);
-+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
-
- dev_set_name(&child_device_obj->device, "vmbus_0_%d",
-- atomic_inc_return(&device_num));
-+ atomic_inc_return_unchecked(&device_num));
-
- child_device_obj->device.bus = &hv_bus;
- child_device_obj->device.parent = &hv_acpi_dev->dev;
-diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
-index 66f6729..7b6cb19 100644
---- a/drivers/hwmon/acpi_power_meter.c
-+++ b/drivers/hwmon/acpi_power_meter.c
-@@ -124,7 +124,7 @@ struct rw_sensor_template {
- struct device_attribute *devattr,
- const char *buf, size_t count);
- int index;
--};
-+} __do_const;
-
- /* Averaging interval */
- static int update_avg_interval(struct acpi_power_meter_resource *resource)
-@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
- return res;
-
- temp /= 1000;
-- if (temp < 0)
-- return -EINVAL;
-
- mutex_lock(&resource->lock);
- resource->trip[attr->index - 7] = temp;
-@@ -622,7 +620,7 @@ static int register_ro_attrs(struct acpi_power_meter_resource *resource,
- struct ro_sensor_template *ro)
- {
- struct device *dev = &resource->acpi_dev->dev;
-- struct sensor_device_attribute *sensors =
-+ sensor_device_attribute_no_const *sensors =
- &resource->sensors[resource->num_sensors];
- int res = 0;
-
-@@ -650,7 +648,7 @@ static int register_rw_attrs(struct acpi_power_meter_resource *resource,
- struct rw_sensor_template *rw)
- {
- struct device *dev = &resource->acpi_dev->dev;
-- struct sensor_device_attribute *sensors =
-+ sensor_device_attribute_no_const *sensors =
- &resource->sensors[resource->num_sensors];
- int res = 0;
-
-@@ -981,7 +979,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata pm_dmi_table[] = {
-+static const struct dmi_system_id __initconst pm_dmi_table[] = {
- {
- enable_cap_knobs, "IBM Active Energy Manager",
- {
-diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
-index 0b86d47..8066c3f 100644
---- a/drivers/hwmon/applesmc.c
-+++ b/drivers/hwmon/applesmc.c
-@@ -1082,7 +1082,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
- {
- struct applesmc_node_group *grp;
- struct applesmc_dev_attr *node;
-- struct attribute *attr;
-+ attribute_no_const *attr;
- int ret, i;
-
- for (grp = groups; grp->format; grp++) {
-diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
-index 83d2fbd6..93017f7 100644
---- a/drivers/hwmon/asus_atk0110.c
-+++ b/drivers/hwmon/asus_atk0110.c
-@@ -149,10 +149,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
- struct atk_sensor_data {
- struct list_head list;
- struct atk_data *data;
-- struct device_attribute label_attr;
-- struct device_attribute input_attr;
-- struct device_attribute limit1_attr;
-- struct device_attribute limit2_attr;
-+ device_attribute_no_const label_attr;
-+ device_attribute_no_const input_attr;
-+ device_attribute_no_const limit1_attr;
-+ device_attribute_no_const limit2_attr;
- char label_attr_name[ATTR_NAME_SIZE];
- char input_attr_name[ATTR_NAME_SIZE];
- char limit1_attr_name[ATTR_NAME_SIZE];
-@@ -271,7 +271,7 @@ static ssize_t atk_name_show(struct device *dev,
- static struct device_attribute atk_name_attr =
- __ATTR(name, 0444, atk_name_show, NULL);
-
--static void atk_init_attribute(struct device_attribute *attr, char *name,
-+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
- sysfs_show_func show)
- {
- sysfs_attr_init(&attr->attr);
-diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
-index e6ec920..7664a6b 100644
---- a/drivers/hwmon/coretemp.c
-+++ b/drivers/hwmon/coretemp.c
-@@ -787,7 +787,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block coretemp_cpu_notifier __refdata = {
-+static struct notifier_block coretemp_cpu_notifier = {
- .notifier_call = coretemp_cpu_callback,
- };
-
-diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
-index 6a967d7..7f0f923 100644
---- a/drivers/hwmon/ibmaem.c
-+++ b/drivers/hwmon/ibmaem.c
-@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
- struct aem_rw_sensor_template *rw)
- {
- struct device *dev = &data->pdev->dev;
-- struct sensor_device_attribute *sensors = data->sensors;
-+ sensor_device_attribute_no_const *sensors = data->sensors;
- int err;
-
- /* Set up read-only sensors */
-diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
-index d89b339..fdb124c 100644
---- a/drivers/hwmon/pmbus/pmbus_core.c
-+++ b/drivers/hwmon/pmbus/pmbus_core.c
-@@ -809,7 +809,7 @@ static ssize_t pmbus_show_label(struct device *dev,
-
- #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
- do { \
-- struct sensor_device_attribute *a \
-+ sensor_device_attribute_no_const *a \
- = &data->_type##s[data->num_##_type##s].attribute; \
- BUG_ON(data->num_attributes >= data->max_attributes); \
- sysfs_attr_init(&a->dev_attr.attr); \
-diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
-index 3e3153e..d445962 100644
---- a/drivers/hwmon/sht15.c
-+++ b/drivers/hwmon/sht15.c
-@@ -166,7 +166,7 @@ struct sht15_data {
- int supply_uV;
- bool supply_uV_valid;
- struct work_struct update_supply_work;
-- atomic_t interrupt_handled;
-+ atomic_unchecked_t interrupt_handled;
- };
-
- /**
-@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
- return ret;
-
- gpio_direction_input(data->pdata->gpio_data);
-- atomic_set(&data->interrupt_handled, 0);
-+ atomic_set_unchecked(&data->interrupt_handled, 0);
-
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- if (gpio_get_value(data->pdata->gpio_data) == 0) {
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
- /* Only relevant if the interrupt hasn't occurred. */
-- if (!atomic_read(&data->interrupt_handled))
-+ if (!atomic_read_unchecked(&data->interrupt_handled))
- schedule_work(&data->read_work);
- }
- ret = wait_event_timeout(data->wait_queue,
-@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
-
- /* First disable the interrupt */
- disable_irq_nosync(irq);
-- atomic_inc(&data->interrupt_handled);
-+ atomic_inc_unchecked(&data->interrupt_handled);
- /* Then schedule a reading work struct */
- if (data->state != SHT15_READING_NOTHING)
- schedule_work(&data->read_work);
-@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
- * If not, then start the interrupt again - care here as could
- * have gone low in meantime so verify it hasn't!
- */
-- atomic_set(&data->interrupt_handled, 0);
-+ atomic_set_unchecked(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- /* If still not occurred or another handler has been scheduled */
- if (gpio_get_value(data->pdata->gpio_data)
-- || atomic_read(&data->interrupt_handled))
-+ || atomic_read_unchecked(&data->interrupt_handled))
- return;
- }
-
-diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
-index 8eac67d..d7b2fa5 100644
---- a/drivers/hwmon/via-cputemp.c
-+++ b/drivers/hwmon/via-cputemp.c
-@@ -304,7 +304,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block via_cputemp_cpu_notifier __refdata = {
-+static struct notifier_block via_cputemp_cpu_notifier = {
- .notifier_call = via_cputemp_cpu_callback,
- };
-
-diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
-index 378fcb5..5e91fa8 100644
---- a/drivers/i2c/busses/i2c-amd756-s4882.c
-+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
-@@ -43,7 +43,7 @@
- extern struct i2c_adapter amd756_smbus;
-
- static struct i2c_adapter *s4882_adapter;
--static struct i2c_algorithm *s4882_algo;
-+static i2c_algorithm_no_const *s4882_algo;
-
- /* Wrapper access functions for multiplexed SMBus */
- static DEFINE_MUTEX(amd756_lock);
-diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
-index 7636671..53a2cab 100644
---- a/drivers/i2c/busses/i2c-diolan-u2c.c
-+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
-@@ -99,7 +99,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
- /* usb layer */
-
- /* Send command to device, and get response. */
--static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
-+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
- {
- int ret = 0;
- int actual;
-diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
-index 29015eb..af2d8e9 100644
---- a/drivers/i2c/busses/i2c-nforce2-s4985.c
-+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
-@@ -41,7 +41,7 @@
- extern struct i2c_adapter *nforce2_smbus;
-
- static struct i2c_adapter *s4985_adapter;
--static struct i2c_algorithm *s4985_algo;
-+static i2c_algorithm_no_const *s4985_algo;
-
- /* Wrapper access functions for multiplexed SMBus */
- static DEFINE_MUTEX(nforce2_lock);
-diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
-index 57a45ce8..51bd6c1 100644
---- a/drivers/i2c/i2c-dev.c
-+++ b/drivers/i2c/i2c-dev.c
-@@ -276,7 +276,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
- res = -EINVAL;
- break;
- }
-- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
-+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
- rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
- if (IS_ERR(rdwr_pa[i].buf)) {
- res = PTR_ERR(rdwr_pa[i].buf);
-diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
-index 57d00ca..0145194 100644
---- a/drivers/ide/aec62xx.c
-+++ b/drivers/ide/aec62xx.c
-@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
- .cable_detect = atp86x_cable_detect,
- };
-
--static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
-+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
- { /* 0: AEC6210 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_aec62xx,
-diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
-index 2c8016a..911a27c 100644
---- a/drivers/ide/alim15x3.c
-+++ b/drivers/ide/alim15x3.c
-@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info ali15x3_chipset __devinitdata = {
-+static const struct ide_port_info ali15x3_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_ali15x3,
- .init_hwif = init_hwif_ali15x3,
-diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
-index 3747b25..56fc995 100644
---- a/drivers/ide/amd74xx.c
-+++ b/drivers/ide/amd74xx.c
-@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
- .udma_mask = udma, \
- }
-
--static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
-+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
- /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
- /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
- /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
-diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
-index 15f0ead..cb43480 100644
---- a/drivers/ide/atiixp.c
-+++ b/drivers/ide/atiixp.c
-@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
- .cable_detect = atiixp_cable_detect,
- };
-
--static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
-+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
- { /* 0: IXP200/300/400/700 */
- .name = DRV_NAME,
- .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
-diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
-index 5f80312..d1fc438 100644
---- a/drivers/ide/cmd64x.c
-+++ b/drivers/ide/cmd64x.c
-@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
-+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
- { /* 0: CMD643 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_cmd64x,
-diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
-index 2c1e5f7..1444762 100644
---- a/drivers/ide/cs5520.c
-+++ b/drivers/ide/cs5520.c
-@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
- .set_dma_mode = cs5520_set_dma_mode,
- };
-
--static const struct ide_port_info cyrix_chipset __devinitdata = {
-+static const struct ide_port_info cyrix_chipset __devinitconst = {
- .name = DRV_NAME,
- .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
- .port_ops = &cs5520_port_ops,
-diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
-index 4dc4eb9..49b40ad 100644
---- a/drivers/ide/cs5530.c
-+++ b/drivers/ide/cs5530.c
-@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
- .udma_filter = cs5530_udma_filter,
- };
-
--static const struct ide_port_info cs5530_chipset __devinitdata = {
-+static const struct ide_port_info cs5530_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_cs5530,
- .init_hwif = init_hwif_cs5530,
-diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
-index 5059faf..18d4c85 100644
---- a/drivers/ide/cs5535.c
-+++ b/drivers/ide/cs5535.c
-@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
- .cable_detect = cs5535_cable_detect,
- };
-
--static const struct ide_port_info cs5535_chipset __devinitdata = {
-+static const struct ide_port_info cs5535_chipset __devinitconst = {
- .name = DRV_NAME,
- .port_ops = &cs5535_port_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
-diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
-index 847553f..3ffb49d 100644
---- a/drivers/ide/cy82c693.c
-+++ b/drivers/ide/cy82c693.c
-@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
- .set_dma_mode = cy82c693_set_dma_mode,
- };
-
--static const struct ide_port_info cy82c693_chipset __devinitdata = {
-+static const struct ide_port_info cy82c693_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_iops = init_iops_cy82c693,
- .port_ops = &cy82c693_port_ops,
-diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
-index 58c51cd..4aec3b8 100644
---- a/drivers/ide/hpt366.c
-+++ b/drivers/ide/hpt366.c
-@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
- }
- };
-
--static const struct hpt_info hpt36x __devinitdata = {
-+static const struct hpt_info hpt36x __devinitconst = {
- .chip_name = "HPT36x",
- .chip_type = HPT36x,
- .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
-@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
- .timings = &hpt36x_timings
- };
-
--static const struct hpt_info hpt370 __devinitdata = {
-+static const struct hpt_info hpt370 __devinitconst = {
- .chip_name = "HPT370",
- .chip_type = HPT370,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
-@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt370a __devinitdata = {
-+static const struct hpt_info hpt370a __devinitconst = {
- .chip_name = "HPT370A",
- .chip_type = HPT370A,
- .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
-@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt374 __devinitdata = {
-+static const struct hpt_info hpt374 __devinitconst = {
- .chip_name = "HPT374",
- .chip_type = HPT374,
- .udma_mask = ATA_UDMA5,
-@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt372 __devinitdata = {
-+static const struct hpt_info hpt372 __devinitconst = {
- .chip_name = "HPT372",
- .chip_type = HPT372,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt372a __devinitdata = {
-+static const struct hpt_info hpt372a __devinitconst = {
- .chip_name = "HPT372A",
- .chip_type = HPT372A,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt302 __devinitdata = {
-+static const struct hpt_info hpt302 __devinitconst = {
- .chip_name = "HPT302",
- .chip_type = HPT302,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt371 __devinitdata = {
-+static const struct hpt_info hpt371 __devinitconst = {
- .chip_name = "HPT371",
- .chip_type = HPT371,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt372n __devinitdata = {
-+static const struct hpt_info hpt372n __devinitconst = {
- .chip_name = "HPT372N",
- .chip_type = HPT372N,
- .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt302n __devinitdata = {
-+static const struct hpt_info hpt302n __devinitconst = {
- .chip_name = "HPT302N",
- .chip_type = HPT302N,
- .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
- .timings = &hpt37x_timings
- };
-
--static const struct hpt_info hpt371n __devinitdata = {
-+static const struct hpt_info hpt371n __devinitconst = {
- .chip_name = "HPT371N",
- .chip_type = HPT371N,
- .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
-@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
-+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
- { /* 0: HPT36x */
- .name = DRV_NAME,
- .init_chipset = init_chipset_hpt366,
-diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
-index 8126824..55a2798 100644
---- a/drivers/ide/ide-cd.c
-+++ b/drivers/ide/ide-cd.c
-@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
- alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- if ((unsigned long)buf & alignment
- || blk_rq_bytes(rq) & q->dma_pad_mask
-- || object_is_on_stack(buf))
-+ || object_starts_on_stack(buf))
- drive->dma = 0;
- }
- }
-diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
-index a743e68..1cfd674 100644
---- a/drivers/ide/ide-pci-generic.c
-+++ b/drivers/ide/ide-pci-generic.c
-@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
- .udma_mask = ATA_UDMA6, \
- }
-
--static const struct ide_port_info generic_chipsets[] __devinitdata = {
-+static const struct ide_port_info generic_chipsets[] __devinitconst = {
- /* 0: Unknown */
- DECLARE_GENERIC_PCI_DEV(0),
-
-diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
-index 560e66d..d5dd180 100644
---- a/drivers/ide/it8172.c
-+++ b/drivers/ide/it8172.c
-@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
- .set_dma_mode = it8172_set_dma_mode,
- };
-
--static const struct ide_port_info it8172_port_info __devinitdata = {
-+static const struct ide_port_info it8172_port_info __devinitconst = {
- .name = DRV_NAME,
- .port_ops = &it8172_port_ops,
- .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
-diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
-index 46816ba..1847aeb 100644
---- a/drivers/ide/it8213.c
-+++ b/drivers/ide/it8213.c
-@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
- .cable_detect = it8213_cable_detect,
- };
-
--static const struct ide_port_info it8213_chipset __devinitdata = {
-+static const struct ide_port_info it8213_chipset __devinitconst = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80} },
- .port_ops = &it8213_port_ops,
-diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
-index 2e3169f..c5611db 100644
---- a/drivers/ide/it821x.c
-+++ b/drivers/ide/it821x.c
-@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
- .cable_detect = it821x_cable_detect,
- };
-
--static const struct ide_port_info it821x_chipset __devinitdata = {
-+static const struct ide_port_info it821x_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_it821x,
- .init_hwif = init_hwif_it821x,
-diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
-index 74c2c4a..efddd7d 100644
---- a/drivers/ide/jmicron.c
-+++ b/drivers/ide/jmicron.c
-@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
- .cable_detect = jmicron_cable_detect,
- };
-
--static const struct ide_port_info jmicron_chipset __devinitdata = {
-+static const struct ide_port_info jmicron_chipset __devinitconst = {
- .name = DRV_NAME,
- .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
- .port_ops = &jmicron_port_ops,
-diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
-index 95327a2..73f78d8 100644
---- a/drivers/ide/ns87415.c
-+++ b/drivers/ide/ns87415.c
-@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
- .dma_sff_read_status = superio_dma_sff_read_status,
- };
-
--static const struct ide_port_info ns87415_chipset __devinitdata = {
-+static const struct ide_port_info ns87415_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_ns87415,
- .tp_ops = &ns87415_tp_ops,
-diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
-index 1a53a4c..39edc66 100644
---- a/drivers/ide/opti621.c
-+++ b/drivers/ide/opti621.c
-@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
- .set_pio_mode = opti621_set_pio_mode,
- };
-
--static const struct ide_port_info opti621_chipset __devinitdata = {
-+static const struct ide_port_info opti621_chipset __devinitconst = {
- .name = DRV_NAME,
- .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
- .port_ops = &opti621_port_ops,
-diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
-index 9546fe2..2e5ceb6 100644
---- a/drivers/ide/pdc202xx_new.c
-+++ b/drivers/ide/pdc202xx_new.c
-@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
- .udma_mask = udma, \
- }
-
--static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
-+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
- /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
- /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
- };
-diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
-index 3a35ec6..5634510 100644
---- a/drivers/ide/pdc202xx_old.c
-+++ b/drivers/ide/pdc202xx_old.c
-@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
- .max_sectors = sectors, \
- }
-
--static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
-+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
- { /* 0: PDC20246 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_pdc202xx,
-diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
-index 1892e81..fe0fd60 100644
---- a/drivers/ide/piix.c
-+++ b/drivers/ide/piix.c
-@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
- .udma_mask = udma, \
- }
-
--static const struct ide_port_info piix_pci_info[] __devinitdata = {
-+static const struct ide_port_info piix_pci_info[] __devinitconst = {
- /* 0: MPIIX */
- { /*
- * MPIIX actually has only a single IDE channel mapped to
-diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
-index a6414a8..c04173e 100644
---- a/drivers/ide/rz1000.c
-+++ b/drivers/ide/rz1000.c
-@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
- }
- }
-
--static const struct ide_port_info rz1000_chipset __devinitdata = {
-+static const struct ide_port_info rz1000_chipset __devinitconst = {
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_NO_DMA,
- };
-diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
-index 356b9b5..d4758eb 100644
---- a/drivers/ide/sc1200.c
-+++ b/drivers/ide/sc1200.c
-@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info sc1200_chipset __devinitdata = {
-+static const struct ide_port_info sc1200_chipset __devinitconst = {
- .name = DRV_NAME,
- .port_ops = &sc1200_port_ops,
- .dma_ops = &sc1200_dma_ops,
-diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
-index b7f5b0c..9701038 100644
---- a/drivers/ide/scc_pata.c
-+++ b/drivers/ide/scc_pata.c
-@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
- .dma_sff_read_status = scc_dma_sff_read_status,
- };
-
--static const struct ide_port_info scc_chipset __devinitdata = {
-+static const struct ide_port_info scc_chipset __devinitconst = {
- .name = "sccIDE",
- .init_iops = init_iops_scc,
- .init_dma = scc_init_dma,
-diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
-index 35fb8da..24d72ef 100644
---- a/drivers/ide/serverworks.c
-+++ b/drivers/ide/serverworks.c
-@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
- .cable_detect = svwks_cable_detect,
- };
-
--static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
-+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
- { /* 0: OSB4 */
- .name = DRV_NAME,
- .init_chipset = init_chipset_svwks,
-diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
-index ddeda44..46f7e30 100644
---- a/drivers/ide/siimage.c
-+++ b/drivers/ide/siimage.c
-@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
- .udma_mask = ATA_UDMA6, \
- }
-
--static const struct ide_port_info siimage_chipsets[] __devinitdata = {
-+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
- /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
- /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
- };
-diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
-index 4a00225..09e61b4 100644
---- a/drivers/ide/sis5513.c
-+++ b/drivers/ide/sis5513.c
-@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
- .cable_detect = sis_cable_detect,
- };
-
--static const struct ide_port_info sis5513_chipset __devinitdata = {
-+static const struct ide_port_info sis5513_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sis5513,
- .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
-diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
-index f21dc2a..d051cd2 100644
---- a/drivers/ide/sl82c105.c
-+++ b/drivers/ide/sl82c105.c
-@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info sl82c105_chipset __devinitdata = {
-+static const struct ide_port_info sl82c105_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_sl82c105,
- .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
-diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
-index 864ffe0..863a5e92 100644
---- a/drivers/ide/slc90e66.c
-+++ b/drivers/ide/slc90e66.c
-@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
- .cable_detect = slc90e66_cable_detect,
- };
-
--static const struct ide_port_info slc90e66_chipset __devinitdata = {
-+static const struct ide_port_info slc90e66_chipset __devinitconst = {
- .name = DRV_NAME,
- .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
- .port_ops = &slc90e66_port_ops,
-diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
-index 4799d5c..1794678 100644
---- a/drivers/ide/tc86c001.c
-+++ b/drivers/ide/tc86c001.c
-@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
- .dma_sff_read_status = ide_dma_sff_read_status,
- };
-
--static const struct ide_port_info tc86c001_chipset __devinitdata = {
-+static const struct ide_port_info tc86c001_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_tc86c001,
- .port_ops = &tc86c001_port_ops,
-diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
-index 281c914..55ce1b8 100644
---- a/drivers/ide/triflex.c
-+++ b/drivers/ide/triflex.c
-@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
- .set_dma_mode = triflex_set_mode,
- };
-
--static const struct ide_port_info triflex_device __devinitdata = {
-+static const struct ide_port_info triflex_device __devinitconst = {
- .name = DRV_NAME,
- .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
- .port_ops = &triflex_port_ops,
-diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
-index 4b42ca0..e494a98 100644
---- a/drivers/ide/trm290.c
-+++ b/drivers/ide/trm290.c
-@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
- .dma_check = trm290_dma_check,
- };
-
--static const struct ide_port_info trm290_chipset __devinitdata = {
-+static const struct ide_port_info trm290_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_hwif = init_hwif_trm290,
- .tp_ops = &trm290_tp_ops,
-diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
-index f46f49c..eb77678 100644
---- a/drivers/ide/via82cxxx.c
-+++ b/drivers/ide/via82cxxx.c
-@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
- .cable_detect = via82cxxx_cable_detect,
- };
-
--static const struct ide_port_info via82cxxx_chipset __devinitdata = {
-+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
- .name = DRV_NAME,
- .init_chipset = init_chipset_via82cxxx,
- .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
-diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
-index eb0e2cc..14241c7 100644
---- a/drivers/ieee802154/fakehard.c
-+++ b/drivers/ieee802154/fakehard.c
-@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
- phy->transmit_power = 0xbf;
-
- dev->netdev_ops = &fake_ops;
-- dev->ml_priv = &fake_mlme;
-+ dev->ml_priv = (void *)&fake_mlme;
-
- priv = netdev_priv(dev);
- priv->phy = phy;
-diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index 8b72f39..55df4c8 100644
---- a/drivers/infiniband/core/cm.c
-+++ b/drivers/infiniband/core/cm.c
-@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
-
- struct cm_counter_group {
- struct kobject obj;
-- atomic_long_t counter[CM_ATTR_COUNT];
-+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
- };
-
- struct cm_counter_attribute {
-@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
- struct ib_mad_send_buf *msg = NULL;
- int ret;
-
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REQ_COUNTER]);
-
- /* Quick state check to discard duplicate REQs. */
-@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
- if (!cm_id_priv)
- return;
-
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_REP_COUNTER]);
- ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
- if (ret)
-@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
- if (cm_id_priv->id.state != IB_CM_REP_SENT &&
- cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
- spin_unlock_irq(&cm_id_priv->lock);
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_RTU_COUNTER]);
- goto out;
- }
-@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
- cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
- dreq_msg->local_comm_id);
- if (!cm_id_priv) {
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
- cm_issue_drep(work->port, work->mad_recv_wc);
- return -EINVAL;
-@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
- case IB_CM_MRA_REP_RCVD:
- break;
- case IB_CM_TIMEWAIT:
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
- goto unlock;
-@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
- cm_free_msg(msg);
- goto deref;
- case IB_CM_DREQ_RCVD:
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_DREQ_COUNTER]);
- goto unlock;
- default:
-@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
- ib_modify_mad(cm_id_priv->av.port->mad_agent,
- cm_id_priv->msg, timeout)) {
- if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
-- atomic_long_inc(&work->port->
-+ atomic_long_inc_unchecked(&work->port->
- counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
- goto out;
-@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
- break;
- case IB_CM_MRA_REQ_RCVD:
- case IB_CM_MRA_REP_RCVD:
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_MRA_COUNTER]);
- /* fall through */
- default:
-@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
- case IB_CM_LAP_IDLE:
- break;
- case IB_CM_MRA_LAP_SENT:
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
- goto unlock;
-@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
- cm_free_msg(msg);
- goto deref;
- case IB_CM_LAP_RCVD:
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_LAP_COUNTER]);
- goto unlock;
- default:
-@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
- cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
- if (cur_cm_id_priv) {
- spin_unlock_irq(&cm.lock);
-- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
-+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
- counter[CM_SIDR_REQ_COUNTER]);
- goto out; /* Duplicate message. */
- }
-@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
- if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
- msg->retries = 1;
-
-- atomic_long_add(1 + msg->retries,
-+ atomic_long_add_unchecked(1 + msg->retries,
- &port->counter_group[CM_XMIT].counter[attr_index]);
- if (msg->retries)
-- atomic_long_add(msg->retries,
-+ atomic_long_add_unchecked(msg->retries,
- &port->counter_group[CM_XMIT_RETRIES].
- counter[attr_index]);
-
-@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
- }
-
- attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
-- atomic_long_inc(&port->counter_group[CM_RECV].
-+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
- counter[attr_id - CM_ATTR_ID_OFFSET]);
-
- work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
-@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
- cm_attr = container_of(attr, struct cm_counter_attribute, attr);
-
- return sprintf(buf, "%ld\n",
-- atomic_long_read(&group->counter[cm_attr->index]));
-+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
- }
-
- static const struct sysfs_ops cm_counter_ops = {
-diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
-index 176c8f9..2627b62 100644
---- a/drivers/infiniband/core/fmr_pool.c
-+++ b/drivers/infiniband/core/fmr_pool.c
-@@ -98,8 +98,8 @@ struct ib_fmr_pool {
-
- struct task_struct *thread;
-
-- atomic_t req_ser;
-- atomic_t flush_ser;
-+ atomic_unchecked_t req_ser;
-+ atomic_unchecked_t flush_ser;
-
- wait_queue_head_t force_wait;
- };
-@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
- struct ib_fmr_pool *pool = pool_ptr;
-
- do {
-- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
-+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
- ib_fmr_batch_release(pool);
-
-- atomic_inc(&pool->flush_ser);
-+ atomic_inc_unchecked(&pool->flush_ser);
- wake_up_interruptible(&pool->force_wait);
-
- if (pool->flush_function)
-@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
- }
-
- set_current_state(TASK_INTERRUPTIBLE);
-- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
-+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
-@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- pool->dirty_watermark = params->dirty_watermark;
- pool->dirty_len = 0;
- spin_lock_init(&pool->pool_lock);
-- atomic_set(&pool->req_ser, 0);
-- atomic_set(&pool->flush_ser, 0);
-+ atomic_set_unchecked(&pool->req_ser, 0);
-+ atomic_set_unchecked(&pool->flush_ser, 0);
- init_waitqueue_head(&pool->force_wait);
-
- pool->thread = kthread_run(ib_fmr_cleanup_thread,
-@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
- }
- spin_unlock_irq(&pool->pool_lock);
-
-- serial = atomic_inc_return(&pool->req_ser);
-+ serial = atomic_inc_return_unchecked(&pool->req_ser);
- wake_up_process(pool->thread);
-
- if (wait_event_interruptible(pool->force_wait,
-- atomic_read(&pool->flush_ser) - serial >= 0))
-+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
- return -EINTR;
-
- return 0;
-@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
- } else {
- list_add_tail(&fmr->list, &pool->dirty_list);
- if (++pool->dirty_len >= pool->dirty_watermark) {
-- atomic_inc(&pool->req_ser);
-+ atomic_inc_unchecked(&pool->req_ser);
- wake_up_process(pool->thread);
- }
- }
-diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
-index a8445b8..e43f9b9 100644
---- a/drivers/infiniband/core/uverbs_cmd.c
-+++ b/drivers/infiniband/core/uverbs_cmd.c
-@@ -928,6 +928,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
- if (copy_from_user(&cmd, buf, sizeof cmd))
- return -EFAULT;
-
-+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
-+ return -EFAULT;
-+
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
-diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
-index 40c8353..946b0e4 100644
---- a/drivers/infiniband/hw/cxgb4/mem.c
-+++ b/drivers/infiniband/hw/cxgb4/mem.c
-@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
- int err;
- struct fw_ri_tpte tpt;
- u32 stag_idx;
-- static atomic_t key;
-+ static atomic_unchecked_t key;
-
- if (c4iw_fatal_error(rdev))
- return -EIO;
-@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
- &rdev->resource.tpt_fifo_lock);
- if (!stag_idx)
- return -ENOMEM;
-- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
-+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
- }
- PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
-index e571e60..523c505 100644
---- a/drivers/infiniband/hw/ehca/ehca_irq.c
-+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
-@@ -883,7 +883,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
-+static struct notifier_block comp_pool_callback_nb = {
- .notifier_call = comp_pool_callback,
- .priority = 0,
- };
-diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
-index 644c2c7..ecf0879 100644
---- a/drivers/infiniband/hw/ipath/ipath_dma.c
-+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
-@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
- }
-
- struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
-- ipath_mapping_error,
-- ipath_dma_map_single,
-- ipath_dma_unmap_single,
-- ipath_dma_map_page,
-- ipath_dma_unmap_page,
-- ipath_map_sg,
-- ipath_unmap_sg,
-- ipath_sg_dma_address,
-- ipath_sg_dma_len,
-- ipath_sync_single_for_cpu,
-- ipath_sync_single_for_device,
-- ipath_dma_alloc_coherent,
-- ipath_dma_free_coherent
-+ .mapping_error = ipath_mapping_error,
-+ .map_single = ipath_dma_map_single,
-+ .unmap_single = ipath_dma_unmap_single,
-+ .map_page = ipath_dma_map_page,
-+ .unmap_page = ipath_dma_unmap_page,
-+ .map_sg = ipath_map_sg,
-+ .unmap_sg = ipath_unmap_sg,
-+ .dma_address = ipath_sg_dma_address,
-+ .dma_len = ipath_sg_dma_len,
-+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
-+ .sync_single_for_device = ipath_sync_single_for_device,
-+ .alloc_coherent = ipath_dma_alloc_coherent,
-+ .free_coherent = ipath_dma_free_coherent
- };
-diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
-index 31ae1b1..fe606ac 100644
---- a/drivers/infiniband/hw/ipath/ipath_fs.c
-+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
-@@ -410,6 +410,7 @@ static struct file_system_type ipathfs_fs_type = {
- .mount = ipathfs_mount,
- .kill_sb = ipathfs_kill_super,
- };
-+MODULE_ALIAS_FS("ipathfs");
-
- int __init ipath_init_ipathfs(void)
- {
-diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
-index 79b3dbc..96e5fcc 100644
---- a/drivers/infiniband/hw/ipath/ipath_rc.c
-+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
-@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- struct ib_atomic_eth *ateth;
- struct ipath_ack_entry *e;
- u64 vaddr;
-- atomic64_t *maddr;
-+ atomic64_unchecked_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
-- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-- (u64) atomic64_add_return(sdata, maddr) - sdata :
-+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
- sdata);
-diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
-index 1f95bba..9530f87 100644
---- a/drivers/infiniband/hw/ipath/ipath_ruc.c
-+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
-@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
-- atomic64_t *maddr;
-+ atomic64_unchecked_t *maddr;
- enum ib_wc_status send_status;
-
- /*
-@@ -382,11 +382,11 @@ again:
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
-- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->wr.wr.atomic.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-- (u64) atomic64_add_return(sdata, maddr) - sdata :
-+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->wr.wr.atomic.swap);
- goto send_comp;
-diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
-index 9d3e5c1..6f166df 100644
---- a/drivers/infiniband/hw/mthca/mthca_cmd.c
-+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
-@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
- mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
- }
-
--int mthca_QUERY_FW(struct mthca_dev *dev)
-+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
- {
- struct mthca_mailbox *mailbox;
- u32 *outbox;
-@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- CMD_TIME_CLASS_B);
- }
-
--int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
-+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int num_mtt)
- {
- return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
-@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
- 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
- }
-
--int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
-+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num)
- {
- return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
-@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
- CMD_TIME_CLASS_B);
- }
-
--int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
-+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad)
- {
-diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
-index aa12a53..c145bc2 100644
---- a/drivers/infiniband/hw/mthca/mthca_main.c
-+++ b/drivers/infiniband/hw/mthca/mthca_main.c
-@@ -692,7 +692,7 @@ err_close:
- return err;
- }
-
--static int mthca_setup_hca(struct mthca_dev *dev)
-+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
- {
- int err;
-
-diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
-index ed9a989..6aa5dc2 100644
---- a/drivers/infiniband/hw/mthca/mthca_mr.c
-+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
-@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
- * through the bitmaps)
- */
-
--static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
-+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
- {
- int o;
- int m;
-@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
- return key;
- }
-
--int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
-+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
- u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
- {
- struct mthca_mailbox *mailbox;
-@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
- return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
- }
-
--int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
-+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
- u64 *buffer_list, int buffer_size_shift,
- int list_len, u64 iova, u64 total_size,
- u32 access, struct mthca_mr *mr)
-diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
-index 42dde06..1257310 100644
---- a/drivers/infiniband/hw/mthca/mthca_provider.c
-+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
-@@ -764,7 +764,7 @@ unlock:
- return 0;
- }
-
--static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
-+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
- {
- struct mthca_dev *dev = to_mdev(ibcq->device);
- struct mthca_cq *cq = to_mcq(ibcq);
-diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
-index 5965b3d..16817fb 100644
---- a/drivers/infiniband/hw/nes/nes.c
-+++ b/drivers/infiniband/hw/nes/nes.c
-@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
- LIST_HEAD(nes_adapter_list);
- static LIST_HEAD(nes_dev_list);
-
--atomic_t qps_destroyed;
-+atomic_unchecked_t qps_destroyed;
-
- static unsigned int ee_flsh_adapter;
- static unsigned int sysfs_nonidx_addr;
-@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
- struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
-
-- atomic_inc(&qps_destroyed);
-+ atomic_inc_unchecked(&qps_destroyed);
-
- /* Free the control structures */
-
-diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
-index 3ade373..f3b68b7 100644
---- a/drivers/infiniband/hw/nes/nes.h
-+++ b/drivers/infiniband/hw/nes/nes.h
-@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
- extern unsigned int wqm_quanta;
- extern struct list_head nes_adapter_list;
-
--extern atomic_t cm_connects;
--extern atomic_t cm_accepts;
--extern atomic_t cm_disconnects;
--extern atomic_t cm_closes;
--extern atomic_t cm_connecteds;
--extern atomic_t cm_connect_reqs;
--extern atomic_t cm_rejects;
--extern atomic_t mod_qp_timouts;
--extern atomic_t qps_created;
--extern atomic_t qps_destroyed;
--extern atomic_t sw_qps_destroyed;
-+extern atomic_unchecked_t cm_connects;
-+extern atomic_unchecked_t cm_accepts;
-+extern atomic_unchecked_t cm_disconnects;
-+extern atomic_unchecked_t cm_closes;
-+extern atomic_unchecked_t cm_connecteds;
-+extern atomic_unchecked_t cm_connect_reqs;
-+extern atomic_unchecked_t cm_rejects;
-+extern atomic_unchecked_t mod_qp_timouts;
-+extern atomic_unchecked_t qps_created;
-+extern atomic_unchecked_t qps_destroyed;
-+extern atomic_unchecked_t sw_qps_destroyed;
- extern u32 mh_detected;
- extern u32 mh_pauses_sent;
- extern u32 cm_packets_sent;
-@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
- extern u32 cm_packets_received;
- extern u32 cm_packets_dropped;
- extern u32 cm_packets_retrans;
--extern atomic_t cm_listens_created;
--extern atomic_t cm_listens_destroyed;
-+extern atomic_unchecked_t cm_listens_created;
-+extern atomic_unchecked_t cm_listens_destroyed;
- extern u32 cm_backlog_drops;
--extern atomic_t cm_loopbacks;
--extern atomic_t cm_nodes_created;
--extern atomic_t cm_nodes_destroyed;
--extern atomic_t cm_accel_dropped_pkts;
--extern atomic_t cm_resets_recvd;
--extern atomic_t pau_qps_created;
--extern atomic_t pau_qps_destroyed;
-+extern atomic_unchecked_t cm_loopbacks;
-+extern atomic_unchecked_t cm_nodes_created;
-+extern atomic_unchecked_t cm_nodes_destroyed;
-+extern atomic_unchecked_t cm_accel_dropped_pkts;
-+extern atomic_unchecked_t cm_resets_recvd;
-+extern atomic_unchecked_t pau_qps_created;
-+extern atomic_unchecked_t pau_qps_destroyed;
-
- extern u32 int_mod_timer_init;
- extern u32 int_mod_cq_depth_256;
-diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index 0a52d72..23edf25 100644
---- a/drivers/infiniband/hw/nes/nes_cm.c
-+++ b/drivers/infiniband/hw/nes/nes_cm.c
-@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
- u32 cm_packets_retrans;
- u32 cm_packets_created;
- u32 cm_packets_received;
--atomic_t cm_listens_created;
--atomic_t cm_listens_destroyed;
-+atomic_unchecked_t cm_listens_created;
-+atomic_unchecked_t cm_listens_destroyed;
- u32 cm_backlog_drops;
--atomic_t cm_loopbacks;
--atomic_t cm_nodes_created;
--atomic_t cm_nodes_destroyed;
--atomic_t cm_accel_dropped_pkts;
--atomic_t cm_resets_recvd;
-+atomic_unchecked_t cm_loopbacks;
-+atomic_unchecked_t cm_nodes_created;
-+atomic_unchecked_t cm_nodes_destroyed;
-+atomic_unchecked_t cm_accel_dropped_pkts;
-+atomic_unchecked_t cm_resets_recvd;
-
- static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
- static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
-@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
- /* instance of function pointers for client API */
- /* set address of this instance to cm_core->cm_ops at cm_core alloc */
- static struct nes_cm_ops nes_cm_api = {
-- mini_cm_accelerated,
-- mini_cm_listen,
-- mini_cm_del_listen,
-- mini_cm_connect,
-- mini_cm_close,
-- mini_cm_accept,
-- mini_cm_reject,
-- mini_cm_recv_pkt,
-- mini_cm_dealloc_core,
-- mini_cm_get,
-- mini_cm_set
-+ .accelerated = mini_cm_accelerated,
-+ .listen = mini_cm_listen,
-+ .stop_listener = mini_cm_del_listen,
-+ .connect = mini_cm_connect,
-+ .close = mini_cm_close,
-+ .accept = mini_cm_accept,
-+ .reject = mini_cm_reject,
-+ .recv_pkt = mini_cm_recv_pkt,
-+ .destroy_cm_core = mini_cm_dealloc_core,
-+ .get = mini_cm_get,
-+ .set = mini_cm_set
- };
-
- static struct nes_cm_core *g_cm_core;
-
--atomic_t cm_connects;
--atomic_t cm_accepts;
--atomic_t cm_disconnects;
--atomic_t cm_closes;
--atomic_t cm_connecteds;
--atomic_t cm_connect_reqs;
--atomic_t cm_rejects;
-+atomic_unchecked_t cm_connects;
-+atomic_unchecked_t cm_accepts;
-+atomic_unchecked_t cm_disconnects;
-+atomic_unchecked_t cm_closes;
-+atomic_unchecked_t cm_connecteds;
-+atomic_unchecked_t cm_connect_reqs;
-+atomic_unchecked_t cm_rejects;
-
- int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
- {
-@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
- kfree(listener);
- listener = NULL;
- ret = 0;
-- atomic_inc(&cm_listens_destroyed);
-+ atomic_inc_unchecked(&cm_listens_destroyed);
- } else {
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- }
-@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
- cm_node->rem_mac);
-
- add_hte_node(cm_core, cm_node);
-- atomic_inc(&cm_nodes_created);
-+ atomic_inc_unchecked(&cm_nodes_created);
-
- return cm_node;
- }
-@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
- }
-
- atomic_dec(&cm_core->node_cnt);
-- atomic_inc(&cm_nodes_destroyed);
-+ atomic_inc_unchecked(&cm_nodes_destroyed);
- nesqp = cm_node->nesqp;
- if (nesqp) {
- nesqp->cm_node = NULL;
-@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
-
- static void drop_packet(struct sk_buff *skb)
- {
-- atomic_inc(&cm_accel_dropped_pkts);
-+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
- dev_kfree_skb_any(skb);
- }
-
-@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
- {
-
- int reset = 0; /* whether to send reset in case of err.. */
-- atomic_inc(&cm_resets_recvd);
-+ atomic_inc_unchecked(&cm_resets_recvd);
- nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
- " refcnt=%d\n", cm_node, cm_node->state,
- atomic_read(&cm_node->ref_count));
-@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
- rem_ref_cm_node(cm_node->cm_core, cm_node);
- return NULL;
- }
-- atomic_inc(&cm_loopbacks);
-+ atomic_inc_unchecked(&cm_loopbacks);
- loopbackremotenode->loopbackpartner = cm_node;
- loopbackremotenode->tcp_cntxt.rcv_wscale =
- NES_CM_DEFAULT_RCV_WND_SCALE;
-@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
- nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
- else {
- rem_ref_cm_node(cm_core, cm_node);
-- atomic_inc(&cm_accel_dropped_pkts);
-+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
- dev_kfree_skb_any(skb);
- }
- break;
-@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
-
- if ((cm_id) && (cm_id->event_handler)) {
- if (issue_disconn) {
-- atomic_inc(&cm_disconnects);
-+ atomic_inc_unchecked(&cm_disconnects);
- cm_event.event = IW_CM_EVENT_DISCONNECT;
- cm_event.status = disconn_status;
- cm_event.local_addr = cm_id->local_addr;
-@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
- }
-
- if (issue_close) {
-- atomic_inc(&cm_closes);
-+ atomic_inc_unchecked(&cm_closes);
- nes_disconnect(nesqp, 1);
-
- cm_id->provider_data = nesqp;
-@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-
- nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
- nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
-- atomic_inc(&cm_accepts);
-+ atomic_inc_unchecked(&cm_accepts);
-
- nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
- netdev_refcnt_read(nesvnic->netdev));
-@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
- struct nes_cm_core *cm_core;
- u8 *start_buff;
-
-- atomic_inc(&cm_rejects);
-+ atomic_inc_unchecked(&cm_rejects);
- cm_node = (struct nes_cm_node *)cm_id->provider_data;
- loopback = cm_node->loopbackpartner;
- cm_core = cm_node->cm_core;
-@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port));
-
-- atomic_inc(&cm_connects);
-+ atomic_inc_unchecked(&cm_connects);
- nesqp->active_conn = 1;
-
- /* cache the cm_id in the qp */
-@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
- g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
- return err;
- }
-- atomic_inc(&cm_listens_created);
-+ atomic_inc_unchecked(&cm_listens_created);
- }
-
- cm_id->add_ref(cm_id);
-@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
-
- if (nesqp->destroyed)
- return;
-- atomic_inc(&cm_connecteds);
-+ atomic_inc_unchecked(&cm_connecteds);
- nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
- " local port 0x%04X. jiffies = %lu.\n",
- nesqp->hwqp.qp_id,
-@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
-
- cm_id->add_ref(cm_id);
- ret = cm_id->event_handler(cm_id, &cm_event);
-- atomic_inc(&cm_closes);
-+ atomic_inc_unchecked(&cm_closes);
- cm_event.event = IW_CM_EVENT_CLOSE;
- cm_event.status = 0;
- cm_event.provider_data = cm_id->provider_data;
-@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
- return;
- cm_id = cm_node->cm_id;
-
-- atomic_inc(&cm_connect_reqs);
-+ atomic_inc_unchecked(&cm_connect_reqs);
- nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
-
-@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
- return;
- cm_id = cm_node->cm_id;
-
-- atomic_inc(&cm_connect_reqs);
-+ atomic_inc_unchecked(&cm_connect_reqs);
- nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
- cm_node, cm_id, jiffies);
-
-diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
-index b3b2a24..7bfaf1e 100644
---- a/drivers/infiniband/hw/nes/nes_mgt.c
-+++ b/drivers/infiniband/hw/nes/nes_mgt.c
-@@ -40,8 +40,8 @@
- #include "nes.h"
- #include "nes_mgt.h"
-
--atomic_t pau_qps_created;
--atomic_t pau_qps_destroyed;
-+atomic_unchecked_t pau_qps_created;
-+atomic_unchecked_t pau_qps_destroyed;
-
- static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
- {
-@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
- {
- struct sk_buff *skb;
- unsigned long flags;
-- atomic_inc(&pau_qps_destroyed);
-+ atomic_inc_unchecked(&pau_qps_destroyed);
-
- /* Free packets that have not yet been forwarded */
- /* Lock is acquired by skb_dequeue when removing the skb */
-@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
- cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
- skb_queue_head_init(&nesqp->pau_list);
- spin_lock_init(&nesqp->pau_lock);
-- atomic_inc(&pau_qps_created);
-+ atomic_inc_unchecked(&pau_qps_created);
- nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
- }
-
-diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
-index c00d2f3..8834298 100644
---- a/drivers/infiniband/hw/nes/nes_nic.c
-+++ b/drivers/infiniband/hw/nes/nes_nic.c
-@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
- target_stat_values[++index] = mh_detected;
- target_stat_values[++index] = mh_pauses_sent;
- target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
-- target_stat_values[++index] = atomic_read(&cm_connects);
-- target_stat_values[++index] = atomic_read(&cm_accepts);
-- target_stat_values[++index] = atomic_read(&cm_disconnects);
-- target_stat_values[++index] = atomic_read(&cm_connecteds);
-- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
-- target_stat_values[++index] = atomic_read(&cm_rejects);
-- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
-- target_stat_values[++index] = atomic_read(&qps_created);
-- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
-- target_stat_values[++index] = atomic_read(&qps_destroyed);
-- target_stat_values[++index] = atomic_read(&cm_closes);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
-+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
-+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
-+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
-+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
- target_stat_values[++index] = cm_packets_sent;
- target_stat_values[++index] = cm_packets_bounced;
- target_stat_values[++index] = cm_packets_created;
- target_stat_values[++index] = cm_packets_received;
- target_stat_values[++index] = cm_packets_dropped;
- target_stat_values[++index] = cm_packets_retrans;
-- target_stat_values[++index] = atomic_read(&cm_listens_created);
-- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
- target_stat_values[++index] = cm_backlog_drops;
-- target_stat_values[++index] = atomic_read(&cm_loopbacks);
-- target_stat_values[++index] = atomic_read(&cm_nodes_created);
-- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
-- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
-- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
-+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
- target_stat_values[++index] = nesadapter->free_4kpbl;
- target_stat_values[++index] = nesadapter->free_256pbl;
- target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-- target_stat_values[++index] = atomic_read(&pau_qps_created);
-- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
-+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
-+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
- }
-
- /**
-diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
-index 330eb6e..a4bc52b 100644
---- a/drivers/infiniband/hw/nes/nes_verbs.c
-+++ b/drivers/infiniband/hw/nes/nes_verbs.c
-@@ -46,9 +46,9 @@
-
- #include <rdma/ib_umem.h>
-
--atomic_t mod_qp_timouts;
--atomic_t qps_created;
--atomic_t sw_qps_destroyed;
-+atomic_unchecked_t mod_qp_timouts;
-+atomic_unchecked_t qps_created;
-+atomic_unchecked_t sw_qps_destroyed;
-
- static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
-
-@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
- if (init_attr->create_flags)
- return ERR_PTR(-EINVAL);
-
-- atomic_inc(&qps_created);
-+ atomic_inc_unchecked(&qps_created);
- switch (init_attr->qp_type) {
- case IB_QPT_RC:
- if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
-@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
- struct iw_cm_event cm_event;
- int ret = 0;
-
-- atomic_inc(&sw_qps_destroyed);
-+ atomic_inc_unchecked(&sw_qps_destroyed);
- nesqp->destroyed = 1;
-
- /* Blow away the connection if it exists. */
-diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
-index 4c2b079..76a0feb 100644
---- a/drivers/infiniband/hw/qib/qib.h
-+++ b/drivers/infiniband/hw/qib/qib.h
-@@ -51,6 +51,7 @@
- #include <linux/completion.h>
- #include <linux/kref.h>
- #include <linux/sched.h>
-+#include <linux/slab.h>
-
- #include "qib_common.h"
- #include "qib_verbs.h"
-diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
-index df7fa25..f11b448 100644
---- a/drivers/infiniband/hw/qib/qib_fs.c
-+++ b/drivers/infiniband/hw/qib/qib_fs.c
-@@ -603,6 +603,7 @@ static struct file_system_type qibfs_fs_type = {
- .mount = qibfs_mount,
- .kill_sb = qibfs_kill_super,
- };
-+MODULE_ALIAS_FS("ipathfs");
-
- int __init qib_init_qibfs(void)
- {
-diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
-index c351aa4..e6967c2 100644
---- a/drivers/input/gameport/gameport.c
-+++ b/drivers/input/gameport/gameport.c
-@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
- */
- static void gameport_init_port(struct gameport *gameport)
- {
-- static atomic_t gameport_no = ATOMIC_INIT(0);
-+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
-
- __module_get(THIS_MODULE);
-
- mutex_init(&gameport->drv_mutex);
- device_initialize(&gameport->dev);
- dev_set_name(&gameport->dev, "gameport%lu",
-- (unsigned long)atomic_inc_return(&gameport_no) - 1);
-+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
- gameport->dev.bus = &gameport_bus;
- gameport->dev.release = gameport_release_port;
- if (gameport->parent)
-diff --git a/drivers/input/input.c b/drivers/input/input.c
-index da38d97..2aa0b79 100644
---- a/drivers/input/input.c
-+++ b/drivers/input/input.c
-@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
- */
- int input_register_device(struct input_dev *dev)
- {
-- static atomic_t input_no = ATOMIC_INIT(0);
-+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
- struct input_handler *handler;
- const char *path;
- int error;
-@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
- dev->setkeycode = input_default_setkeycode;
-
- dev_set_name(&dev->dev, "input%ld",
-- (unsigned long) atomic_inc_return(&input_no) - 1);
-+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
-
- error = device_add(&dev->dev);
- if (error)
-diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
-index b8d8611..7a4a04b 100644
---- a/drivers/input/joystick/sidewinder.c
-+++ b/drivers/input/joystick/sidewinder.c
-@@ -30,6 +30,7 @@
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/slab.h>
-+#include <linux/sched.h>
- #include <linux/init.h>
- #include <linux/input.h>
- #include <linux/gameport.h>
-diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
-index 0c4c556..759171c 100644
---- a/drivers/input/joystick/xpad.c
-+++ b/drivers/input/joystick/xpad.c
-@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
-
- static int xpad_led_probe(struct usb_xpad *xpad)
- {
-- static atomic_t led_seq = ATOMIC_INIT(0);
-+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
- long led_no;
- struct xpad_led *led;
- struct led_classdev *led_cdev;
-@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
- if (!led)
- return -ENOMEM;
-
-- led_no = (long)atomic_inc_return(&led_seq) - 1;
-+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
-
- snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
- led->xpad = xpad;
-diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
-index 9b84b0c..027158f 100644
---- a/drivers/input/mouse/psmouse.h
-+++ b/drivers/input/mouse/psmouse.h
-@@ -110,7 +110,7 @@ struct psmouse_attribute {
- ssize_t (*set)(struct psmouse *psmouse, void *data,
- const char *buf, size_t count);
- bool protect;
--};
-+} __do_const;
- #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
-
- ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
-diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
-index 0110b5a..d3ad144 100644
---- a/drivers/input/mousedev.c
-+++ b/drivers/input/mousedev.c
-@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
-
- spin_unlock_irq(&client->packet_lock);
-
-- if (copy_to_user(buffer, data, count))
-+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
- return -EFAULT;
-
- return count;
-diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
-index ba70058..571d25d 100644
---- a/drivers/input/serio/serio.c
-+++ b/drivers/input/serio/serio.c
-@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
- */
- static void serio_init_port(struct serio *serio)
- {
-- static atomic_t serio_no = ATOMIC_INIT(0);
-+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
-
- __module_get(THIS_MODULE);
-
-@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
- mutex_init(&serio->drv_mutex);
- device_initialize(&serio->dev);
- dev_set_name(&serio->dev, "serio%ld",
-- (long)atomic_inc_return(&serio_no) - 1);
-+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
- serio->dev.bus = &serio_bus;
- serio->dev.release = serio_release_port;
- serio->dev.groups = serio_device_attr_groups;
-diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
-index 4d4cd14..d6fdd87 100644
---- a/drivers/input/serio/serio_raw.c
-+++ b/drivers/input/serio/serio_raw.c
-@@ -280,7 +280,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
-
- static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
- {
-- static atomic_t serio_raw_no = ATOMIC_INIT(0);
-+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
- struct serio_raw *serio_raw;
- int err;
-
-@@ -291,7 +291,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
- }
-
- snprintf(serio_raw->name, sizeof(serio_raw->name),
-- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
-+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
- kref_init(&serio_raw->kref);
- INIT_LIST_HEAD(&serio_raw->client_list);
- init_waitqueue_head(&serio_raw->wait);
-diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
-index 62811de..2e1bd7f 100644
---- a/drivers/input/touchscreen/htcpen.c
-+++ b/drivers/input/touchscreen/htcpen.c
-@@ -227,7 +227,7 @@ static struct isa_driver htcpen_isa_driver = {
- }
- };
-
--static struct dmi_system_id __initdata htcshift_dmi_table[] = {
-+static const struct dmi_system_id __initconst htcshift_dmi_table[] = {
- {
- .ident = "Shift",
- .matches = {
-diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 486982f..1e8058b 100644
---- a/drivers/iommu/amd_iommu.c
-+++ b/drivers/iommu/amd_iommu.c
-@@ -536,11 +536,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
-
- static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
- {
-+ phys_addr_t physaddr;
- WARN_ON(address & 0x7ULL);
-
- memset(cmd, 0, sizeof(*cmd));
-- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
-- cmd->data[1] = upper_32_bits(__pa(address));
-+
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ if (object_starts_on_stack((void *)address)) {
-+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
-+ physaddr = __pa((u64)adjbuf);
-+ } else
-+#endif
-+ physaddr = __pa(address);
-+
-+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
-+ cmd->data[1] = upper_32_bits(physaddr);
- cmd->data[2] = 1;
- CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
- }
-diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
-index e44933d..9ba484a 100644
---- a/drivers/isdn/capi/capi.c
-+++ b/drivers/isdn/capi/capi.c
-@@ -83,8 +83,8 @@ struct capiminor {
-
- struct capi20_appl *ap;
- u32 ncci;
-- atomic_t datahandle;
-- atomic_t msgid;
-+ atomic_unchecked_t datahandle;
-+ atomic_unchecked_t msgid;
-
- struct tty_port port;
- int ttyinstop;
-@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
- capimsg_setu16(s, 2, mp->ap->applid);
- capimsg_setu8 (s, 4, CAPI_DATA_B3);
- capimsg_setu8 (s, 5, CAPI_RESP);
-- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
-+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
- capimsg_setu32(s, 8, mp->ncci);
- capimsg_setu16(s, 12, datahandle);
- }
-@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
- mp->outbytes -= len;
- spin_unlock_bh(&mp->outlock);
-
-- datahandle = atomic_inc_return(&mp->datahandle);
-+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
- skb_push(skb, CAPI_DATA_B3_REQ_LEN);
- memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
- capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
- capimsg_setu16(skb->data, 2, mp->ap->applid);
- capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
- capimsg_setu8 (skb->data, 5, CAPI_REQ);
-- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
-+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
- capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
- capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
- capimsg_setu16(skb->data, 16, len); /* Data length */
-diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
-index 2b33b26..a9c638b 100644
---- a/drivers/isdn/capi/kcapi.c
-+++ b/drivers/isdn/capi/kcapi.c
-@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
-
- static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
- {
-- if (contr - 1 >= CAPI_MAXCONTR)
-+ if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
- return NULL;
-
- return capi_controller[contr - 1];
-@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
- {
- lockdep_assert_held(&capi_controller_lock);
-
-- if (applid - 1 >= CAPI_MAXAPPL)
-+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
- return NULL;
-
- return capi_applications[applid - 1];
-@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
-
- static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
- {
-- if (applid - 1 >= CAPI_MAXAPPL)
-+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
- return NULL;
-
- return rcu_dereference(capi_applications[applid - 1]);
-diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
-index 492aa52..fe79ae4 100644
---- a/drivers/isdn/gigaset/bas-gigaset.c
-+++ b/drivers/isdn/gigaset/bas-gigaset.c
-@@ -2559,22 +2559,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
-
-
- static const struct gigaset_ops gigops = {
-- gigaset_write_cmd,
-- gigaset_write_room,
-- gigaset_chars_in_buffer,
-- gigaset_brkchars,
-- gigaset_init_bchannel,
-- gigaset_close_bchannel,
-- gigaset_initbcshw,
-- gigaset_freebcshw,
-- gigaset_reinitbcshw,
-- gigaset_initcshw,
-- gigaset_freecshw,
-- gigaset_set_modem_ctrl,
-- gigaset_baud_rate,
-- gigaset_set_line_ctrl,
-- gigaset_isoc_send_skb,
-- gigaset_isoc_input,
-+ .write_cmd = gigaset_write_cmd,
-+ .write_room = gigaset_write_room,
-+ .chars_in_buffer = gigaset_chars_in_buffer,
-+ .brkchars = gigaset_brkchars,
-+ .init_bchannel = gigaset_init_bchannel,
-+ .close_bchannel = gigaset_close_bchannel,
-+ .initbcshw = gigaset_initbcshw,
-+ .freebcshw = gigaset_freebcshw,
-+ .reinitbcshw = gigaset_reinitbcshw,
-+ .initcshw = gigaset_initcshw,
-+ .freecshw = gigaset_freecshw,
-+ .set_modem_ctrl = gigaset_set_modem_ctrl,
-+ .baud_rate = gigaset_baud_rate,
-+ .set_line_ctrl = gigaset_set_line_ctrl,
-+ .send_skb = gigaset_isoc_send_skb,
-+ .handle_input = gigaset_isoc_input,
- };
-
- /* bas_gigaset_init
-diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
-index db621db..825ea1a 100644
---- a/drivers/isdn/gigaset/common.c
-+++ b/drivers/isdn/gigaset/common.c
-@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
- cs->commands_pending = 0;
- cs->cur_at_seq = 0;
- cs->gotfwver = -1;
-- cs->open_count = 0;
-+ local_set(&cs->open_count, 0);
- cs->dev = NULL;
- cs->tty = NULL;
- cs->tty_dev = NULL;
-diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
-index 212efaf..f187c6b 100644
---- a/drivers/isdn/gigaset/gigaset.h
-+++ b/drivers/isdn/gigaset/gigaset.h
-@@ -35,6 +35,7 @@
- #include <linux/tty_driver.h>
- #include <linux/list.h>
- #include <linux/atomic.h>
-+#include <asm/local.h>
-
- #define GIG_VERSION {0, 5, 0, 0}
- #define GIG_COMPAT {0, 4, 0, 0}
-@@ -433,7 +434,7 @@ struct cardstate {
- spinlock_t cmdlock;
- unsigned curlen, cmdbytes;
-
-- unsigned open_count;
-+ local_t open_count;
- struct tty_struct *tty;
- struct tasklet_struct if_wake_tasklet;
- unsigned control_state;
-diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
-index ee0a549..a7c9798 100644
---- a/drivers/isdn/gigaset/interface.c
-+++ b/drivers/isdn/gigaset/interface.c
-@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
- }
- tty->driver_data = cs;
-
-- ++cs->open_count;
--
-- if (cs->open_count == 1) {
-+ if (local_inc_return(&cs->open_count) == 1) {
- spin_lock_irqsave(&cs->lock, flags);
- cs->tty = tty;
- spin_unlock_irqrestore(&cs->lock, flags);
-@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
-- else if (!cs->open_count)
-+ else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else {
-- if (!--cs->open_count) {
-+ if (!local_dec_return(&cs->open_count)) {
- spin_lock_irqsave(&cs->lock, flags);
- cs->tty = NULL;
- spin_unlock_irqrestore(&cs->lock, flags);
-@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
-- } else if (!cs->open_count)
-+ } else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else {
- retval = 0;
-@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
- retval = -ENODEV;
- goto done;
- }
-- if (!cs->open_count) {
-+ if (!local_read(&cs->open_count)) {
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- retval = -ENODEV;
- goto done;
-@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
- if (!cs->connected) {
- gig_dbg(DEBUG_IF, "not connected");
- retval = -ENODEV;
-- } else if (!cs->open_count)
-+ } else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else if (cs->mstate != MS_LOCKED) {
- dev_warn(cs->dev, "can't write to unlocked device\n");
-@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected");
-- else if (!cs->open_count)
-+ else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else if (cs->mstate != MS_LOCKED)
- dev_warn(cs->dev, "can't write to unlocked device\n");
-@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
-- else if (!cs->open_count)
-+ else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
-
- if (!cs->connected)
- gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
-- else if (!cs->open_count)
-+ else if (!local_read(&cs->open_count))
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- else
- gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
-@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
- goto out;
- }
-
-- if (!cs->open_count) {
-+ if (!local_read(&cs->open_count)) {
- dev_warn(cs->dev, "%s: device not opened\n", __func__);
- goto out;
- }
-diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
-index 86a5c4f..5a564c2 100644
---- a/drivers/isdn/gigaset/ser-gigaset.c
-+++ b/drivers/isdn/gigaset/ser-gigaset.c
-@@ -454,22 +454,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
- }
-
- static const struct gigaset_ops ops = {
-- gigaset_write_cmd,
-- gigaset_write_room,
-- gigaset_chars_in_buffer,
-- gigaset_brkchars,
-- gigaset_init_bchannel,
-- gigaset_close_bchannel,
-- gigaset_initbcshw,
-- gigaset_freebcshw,
-- gigaset_reinitbcshw,
-- gigaset_initcshw,
-- gigaset_freecshw,
-- gigaset_set_modem_ctrl,
-- gigaset_baud_rate,
-- gigaset_set_line_ctrl,
-- gigaset_m10x_send_skb, /* asyncdata.c */
-- gigaset_m10x_input, /* asyncdata.c */
-+ .write_cmd = gigaset_write_cmd,
-+ .write_room = gigaset_write_room,
-+ .chars_in_buffer = gigaset_chars_in_buffer,
-+ .brkchars = gigaset_brkchars,
-+ .init_bchannel = gigaset_init_bchannel,
-+ .close_bchannel = gigaset_close_bchannel,
-+ .initbcshw = gigaset_initbcshw,
-+ .freebcshw = gigaset_freebcshw,
-+ .reinitbcshw = gigaset_reinitbcshw,
-+ .initcshw = gigaset_initcshw,
-+ .freecshw = gigaset_freecshw,
-+ .set_modem_ctrl = gigaset_set_modem_ctrl,
-+ .baud_rate = gigaset_baud_rate,
-+ .set_line_ctrl = gigaset_set_line_ctrl,
-+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
-+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
- };
-
-
-diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
-index 5e3300d..b187acf 100644
---- a/drivers/isdn/gigaset/usb-gigaset.c
-+++ b/drivers/isdn/gigaset/usb-gigaset.c
-@@ -546,7 +546,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
- gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
- memcpy(cs->hw.usb->bchars, buf, 6);
- return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
-- 0, 0, &buf, 6, 2000);
-+ 0, 0, buf, 6, 2000);
- }
-
- static int gigaset_freebcshw(struct bc_state *bcs)
-@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
- }
-
- static const struct gigaset_ops ops = {
-- gigaset_write_cmd,
-- gigaset_write_room,
-- gigaset_chars_in_buffer,
-- gigaset_brkchars,
-- gigaset_init_bchannel,
-- gigaset_close_bchannel,
-- gigaset_initbcshw,
-- gigaset_freebcshw,
-- gigaset_reinitbcshw,
-- gigaset_initcshw,
-- gigaset_freecshw,
-- gigaset_set_modem_ctrl,
-- gigaset_baud_rate,
-- gigaset_set_line_ctrl,
-- gigaset_m10x_send_skb,
-- gigaset_m10x_input,
-+ .write_cmd = gigaset_write_cmd,
-+ .write_room = gigaset_write_room,
-+ .chars_in_buffer = gigaset_chars_in_buffer,
-+ .brkchars = gigaset_brkchars,
-+ .init_bchannel = gigaset_init_bchannel,
-+ .close_bchannel = gigaset_close_bchannel,
-+ .initbcshw = gigaset_initbcshw,
-+ .freebcshw = gigaset_freebcshw,
-+ .reinitbcshw = gigaset_reinitbcshw,
-+ .initcshw = gigaset_initcshw,
-+ .freecshw = gigaset_freecshw,
-+ .set_modem_ctrl = gigaset_set_modem_ctrl,
-+ .baud_rate = gigaset_baud_rate,
-+ .set_line_ctrl = gigaset_set_line_ctrl,
-+ .send_skb = gigaset_m10x_send_skb,
-+ .handle_input = gigaset_m10x_input,
- };
-
- /*
-diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
-index 2a57da59..e7a12ed 100644
---- a/drivers/isdn/hardware/avm/b1.c
-+++ b/drivers/isdn/hardware/avm/b1.c
-@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
- }
- if (left) {
- if (t4file->user) {
-- if (copy_from_user(buf, dp, left))
-+ if (left > sizeof buf || copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
-@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
- }
- if (left) {
- if (config->user) {
-- if (copy_from_user(buf, dp, left))
-+ if (left > sizeof buf || copy_from_user(buf, dp, left))
- return -EFAULT;
- } else {
- memcpy(buf, dp, left);
-diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
-index 6ddb795e..bd2e875 100644
---- a/drivers/isdn/i4l/isdn_common.c
-+++ b/drivers/isdn/i4l/isdn_common.c
-@@ -1656,6 +1656,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
- } else
- return -EINVAL;
- case IIOCDBGVAR:
-+ if (!capable(CAP_SYS_RAWIO))
-+ return -EPERM;
- if (arg) {
- if (copy_to_user(argp, &dev, sizeof(ulong)))
- return -EFAULT;
-diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
-index d568689..a53b90a 100644
---- a/drivers/isdn/i4l/isdn_concap.c
-+++ b/drivers/isdn/i4l/isdn_concap.c
-@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
- }
-
- struct concap_device_ops isdn_concap_reliable_dl_dops = {
-- &isdn_concap_dl_data_req,
-- &isdn_concap_dl_connect_req,
-- &isdn_concap_dl_disconn_req
-+ .data_req = &isdn_concap_dl_data_req,
-+ .connect_req = &isdn_concap_dl_connect_req,
-+ .disconn_req = &isdn_concap_dl_disconn_req
- };
-
- /* The following should better go into a dedicated source file such that
-diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
-index 2339d73..802ab87a 100644
---- a/drivers/isdn/i4l/isdn_net.c
-+++ b/drivers/isdn/i4l/isdn_net.c
-@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
- {
- isdn_net_local *lp = netdev_priv(dev);
- unsigned char *p;
-- ushort len = 0;
-+ int len = 0;
-
- switch (lp->p_encap) {
- case ISDN_NET_ENCAP_ETHER:
-diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
-index fd10d7c..1eaf1f4 100644
---- a/drivers/isdn/i4l/isdn_x25iface.c
-+++ b/drivers/isdn/i4l/isdn_x25iface.c
-@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind( struct concap_proto * );
-
-
- static struct concap_proto_ops ix25_pops = {
-- &isdn_x25iface_proto_new,
-- &isdn_x25iface_proto_del,
-- &isdn_x25iface_proto_restart,
-- &isdn_x25iface_proto_close,
-- &isdn_x25iface_xmit,
-- &isdn_x25iface_receive,
-- &isdn_x25iface_connect_ind,
-- &isdn_x25iface_disconn_ind
-+ .proto_new = &isdn_x25iface_proto_new,
-+ .proto_del = &isdn_x25iface_proto_del,
-+ .restart = &isdn_x25iface_proto_restart,
-+ .close = &isdn_x25iface_proto_close,
-+ .encap_and_xmit = &isdn_x25iface_xmit,
-+ .data_ind = &isdn_x25iface_receive,
-+ .connect_ind = &isdn_x25iface_connect_ind,
-+ .disconn_ind = &isdn_x25iface_disconn_ind
- };
-
- /* error message helper function */
-diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
-index 1f355bb..3efedbb 100644
---- a/drivers/isdn/icn/icn.c
-+++ b/drivers/isdn/icn/icn.c
-@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
- if (count > len)
- count = len;
- if (user) {
-- if (copy_from_user(msg, buf, count))
-+ if (count > sizeof msg || copy_from_user(msg, buf, count))
- return -EFAULT;
- } else
- memcpy(msg, buf, count);
-@@ -1611,7 +1611,7 @@ icn_setup(char *line)
- if (ints[0] > 1)
- membase = (unsigned long)ints[2];
- if (str && *str) {
-- strcpy(sid, str);
-+ strlcpy(sid, str, sizeof(sid));
- icn_id = sid;
- if ((p = strchr(sid, ','))) {
- *p++ = 0;
-diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
-index 4d395de..c504763 100644
---- a/drivers/isdn/mISDN/dsp_cmx.c
-+++ b/drivers/isdn/mISDN/dsp_cmx.c
-@@ -1623,7 +1623,7 @@ u32 dsp_spl_jiffies; /* calculate the next time to fire */
- static u16 dsp_count; /* last sample count */
- static int dsp_count_valid ; /* if we have last sample count */
-
--void
-+void __intentional_overflow(-1)
- dsp_cmx_send(void *arg)
- {
- struct dsp_conf *conf;
-diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
-index a498135..6a39f48 100644
---- a/drivers/leds/leds-clevo-mail.c
-+++ b/drivers/leds/leds-clevo-mail.c
-@@ -39,7 +39,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
- * detected as working, but in reality it is not) as low as
- * possible.
- */
--static struct dmi_system_id __initdata mail_led_whitelist[] = {
-+static const struct dmi_system_id __initconst mail_led_whitelist[] = {
- {
- .callback = clevo_mail_led_dmi_callback,
- .ident = "Clevo D410J",
-diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
-index b3393a9..33f6979 100644
---- a/drivers/leds/leds-mc13783.c
-+++ b/drivers/leds/leds-mc13783.c
-@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
- return -EINVAL;
- }
-
-- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
-+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
- if (led == NULL) {
- dev_err(&pdev->dev, "failed to alloc memory\n");
- return -ENOMEM;
-diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
-index 614ebeb..ce439fd 100644
---- a/drivers/leds/leds-ss4200.c
-+++ b/drivers/leds/leds-ss4200.c
-@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
- * detected as working, but in reality it is not) as low as
- * possible.
- */
--static struct dmi_system_id __initdata nas_led_whitelist[] = {
-+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
- {
- .callback = ss4200_led_dmi_callback,
- .ident = "Intel SS4200-E",
-diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
-index b5fdcb7..3cb34b8 100644
---- a/drivers/lguest/core.c
-+++ b/drivers/lguest/core.c
-@@ -92,9 +92,17 @@ static __init int map_switcher(void)
- * it's worked so far. The end address needs +1 because __get_vm_area
- * allocates an extra guard page, so we need space for that.
- */
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
-+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
-+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
-+#else
- switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
- VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
- + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
-+#endif
-+
- if (!switcher_vma) {
- err = -ENOMEM;
- printk("lguest: could not map switcher pages high\n");
-@@ -119,7 +127,7 @@ static __init int map_switcher(void)
- * Now the Switcher is mapped at the right address, we can't fail!
- * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
- */
-- memcpy(switcher_vma->addr, start_switcher_text,
-+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
- end_switcher_text - start_switcher_text);
-
- printk(KERN_INFO "lguest: mapped switcher at %p\n",
-@@ -171,7 +179,7 @@ static void unmap_switcher(void)
- bool lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len)
- {
-- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
-+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
- }
-
- /*
-diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
-index 3b62be16..e33134a 100644
---- a/drivers/lguest/page_tables.c
-+++ b/drivers/lguest/page_tables.c
-@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
- /*:*/
-
- #ifdef CONFIG_X86_PAE
--static void release_pmd(pmd_t *spmd)
-+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
- {
- /* If the entry's not present, there's nothing to release. */
- if (pmd_flags(*spmd) & _PAGE_PRESENT) {
-diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
-index 65af42f..530c87a 100644
---- a/drivers/lguest/x86/core.c
-+++ b/drivers/lguest/x86/core.c
-@@ -59,7 +59,7 @@ static struct {
- /* Offset from where switcher.S was compiled to where we've copied it */
- static unsigned long switcher_offset(void)
- {
-- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
-+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
- }
-
- /* This cpu's struct lguest_pages. */
-@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
- * These copies are pretty cheap, so we do them unconditionally: */
- /* Save the current Host top-level page directory.
- */
-+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ pages->state.host_cr3 = read_cr3();
-+#else
- pages->state.host_cr3 = __pa(current->mm->pgd);
-+#endif
-+
- /*
- * Set up the Guest's page tables to see this CPU's pages (and no
- * other CPU's pages).
-@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
- * compiled-in switcher code and the high-mapped copy we just made.
- */
- for (i = 0; i < IDT_ENTRIES; i++)
-- default_idt_entries[i] += switcher_offset();
-+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
-
- /*
- * Set up the Switcher's per-cpu areas.
-@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
- * it will be undisturbed when we switch. To change %cs and jump we
- * need this structure to feed to Intel's "lcall" instruction.
- */
-- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
-+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
- lguest_entry.segment = LGUEST_CS;
-
- /*
-diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
-index 40634b0..4f5855e 100644
---- a/drivers/lguest/x86/switcher_32.S
-+++ b/drivers/lguest/x86/switcher_32.S
-@@ -87,6 +87,7 @@
- #include <asm/page.h>
- #include <asm/segment.h>
- #include <asm/lguest.h>
-+#include <asm/processor-flags.h>
-
- // We mark the start of the code to copy
- // It's placed in .text tho it's never run here
-@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
- // Changes type when we load it: damn Intel!
- // For after we switch over our page tables
- // That entry will be read-only: we'd crash.
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mov %cr0, %edx
-+ xor $X86_CR0_WP, %edx
-+ mov %edx, %cr0
-+#endif
-+
- movl $(GDT_ENTRY_TSS*8), %edx
- ltr %dx
-
-@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
- // Let's clear it again for our return.
- // The GDT descriptor of the Host
- // Points to the table after two "size" bytes
-- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
-+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
- // Clear "used" from type field (byte 5, bit 2)
-- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
-+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mov %cr0, %eax
-+ xor $X86_CR0_WP, %eax
-+ mov %eax, %cr0
-+#endif
-
- // Once our page table's switched, the Guest is live!
- // The Host fades as we run this final step.
-@@ -295,13 +309,12 @@ deliver_to_host:
- // I consulted gcc, and it gave
- // These instructions, which I gladly credit:
- leal (%edx,%ebx,8), %eax
-- movzwl (%eax),%edx
-- movl 4(%eax), %eax
-- xorw %ax, %ax
-- orl %eax, %edx
-+ movl 4(%eax), %edx
-+ movw (%eax), %dx
- // Now the address of the handler's in %edx
- // We call it now: its "iret" drops us home.
-- jmp *%edx
-+ ljmp $__KERNEL_CS, $1f
-+1: jmp *%edx
-
- // Every interrupt can come to us here
- // But we must truly tell each apart.
-diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
-index 4daf9e5..b8d1d0f 100644
---- a/drivers/macintosh/macio_asic.c
-+++ b/drivers/macintosh/macio_asic.c
-@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
- * MacIO is matched against any Apple ID, it's probe() function
- * will then decide wether it applies or not
- */
--static const struct pci_device_id __devinitdata pci_ids [] = { {
-+static const struct pci_device_id __devinitconst pci_ids [] = { {
- .vendor = PCI_VENDOR_ID_APPLE,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
-diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
-index e6a300c..cc9c96c 100644
---- a/drivers/md/dm-ioctl.c
-+++ b/drivers/md/dm-ioctl.c
-@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
- cmd == DM_LIST_VERSIONS_CMD)
- return 0;
-
-- if ((cmd == DM_DEV_CREATE_CMD)) {
-+ if (cmd == DM_DEV_CREATE_CMD) {
- if (!*param->name) {
- DMWARN("name not supplied when creating device");
- return -EINVAL;
-diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
-index e5bd3ef..c69d0b7 100644
---- a/drivers/md/dm-log-userspace-transfer.c
-+++ b/drivers/md/dm-log-userspace-transfer.c
-@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
- {
- struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
-
-- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
-+ if (!capable(CAP_SYS_ADMIN))
- return;
-
- spin_lock(&receiving_list_lock);
-diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
-index b4361eb..4ad13a0 100644
---- a/drivers/md/dm-raid1.c
-+++ b/drivers/md/dm-raid1.c
-@@ -40,7 +40,7 @@ enum dm_raid1_error {
-
- struct mirror {
- struct mirror_set *ms;
-- atomic_t error_count;
-+ atomic_unchecked_t error_count;
- unsigned long error_type;
- struct dm_dev *dev;
- sector_t offset;
-@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
- struct mirror *m;
-
- for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
-- if (!atomic_read(&m->error_count))
-+ if (!atomic_read_unchecked(&m->error_count))
- return m;
-
- return NULL;
-@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
- * simple way to tell if a device has encountered
- * errors.
- */
-- atomic_inc(&m->error_count);
-+ atomic_inc_unchecked(&m->error_count);
-
- if (test_and_set_bit(error_type, &m->error_type))
- return;
-@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
- struct mirror *m = get_default_mirror(ms);
-
- do {
-- if (likely(!atomic_read(&m->error_count)))
-+ if (likely(!atomic_read_unchecked(&m->error_count)))
- return m;
-
- if (m-- == ms->mirror)
-@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
- {
- struct mirror *default_mirror = get_default_mirror(m->ms);
-
-- return !atomic_read(&default_mirror->error_count);
-+ return !atomic_read_unchecked(&default_mirror->error_count);
- }
-
- static int mirror_available(struct mirror_set *ms, struct bio *bio)
-@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
- */
- if (likely(region_in_sync(ms, region, 1)))
- m = choose_mirror(ms, bio->bi_sector);
-- else if (m && atomic_read(&m->error_count))
-+ else if (m && atomic_read_unchecked(&m->error_count))
- m = NULL;
-
- if (likely(m))
-@@ -946,7 +946,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
- }
-
- ms->mirror[mirror].ms = ms;
-- atomic_set(&(ms->mirror[mirror].error_count), 0);
-+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
- ms->mirror[mirror].error_type = 0;
- ms->mirror[mirror].offset = offset;
-
-@@ -1357,7 +1357,7 @@ static void mirror_resume(struct dm_target *ti)
- */
- static char device_status_char(struct mirror *m)
- {
-- if (!atomic_read(&(m->error_count)))
-+ if (!atomic_read_unchecked(&(m->error_count)))
- return 'A';
-
- return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
-diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
-index cbd41d2..1717044 100644
---- a/drivers/md/dm-stripe.c
-+++ b/drivers/md/dm-stripe.c
-@@ -20,7 +20,7 @@ struct stripe {
- struct dm_dev *dev;
- sector_t physical_start;
-
-- atomic_t error_count;
-+ atomic_unchecked_t error_count;
- };
-
- struct stripe_c {
-@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
- kfree(sc);
- return r;
- }
-- atomic_set(&(sc->stripe[i].error_count), 0);
-+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
- }
-
- ti->private = sc;
-@@ -314,7 +314,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
- DMEMIT("%d ", sc->stripes);
- for (i = 0; i < sc->stripes; i++) {
- DMEMIT("%s ", sc->stripe[i].dev->name);
-- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
-+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
- 'D' : 'A';
- }
- buffer[i] = '\0';
-@@ -360,8 +360,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
- */
- for (i = 0; i < sc->stripes; i++)
- if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
-- atomic_inc(&(sc->stripe[i].error_count));
-- if (atomic_read(&(sc->stripe[i].error_count)) <
-+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
-+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
- DM_IO_ERROR_THRESHOLD)
- schedule_work(&sc->trigger_event);
- }
-diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
-index 5c52582..91793db 100644
---- a/drivers/md/dm-table.c
-+++ b/drivers/md/dm-table.c
-@@ -328,7 +328,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
- static int open_dev(struct dm_dev_internal *d, dev_t dev,
- struct mapped_device *md)
- {
-- static char *_claim_ptr = "I belong to device-mapper";
-+ static char _claim_ptr[] = "I belong to device-mapper";
- struct block_device *bdev;
-
- int r;
-@@ -396,7 +396,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
- if (!dev_size)
- return 0;
-
-- if ((start >= dev_size) || (start + len > dev_size)) {
-+ if ((start >= dev_size) || (len > dev_size - start)) {
- DMWARN("%s: %s too small for target: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdevname(bdev, b),
-diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index 237571a..fb6d19b 100644
---- a/drivers/md/dm-thin-metadata.c
-+++ b/drivers/md/dm-thin-metadata.c
-@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
-
- pmd->info.tm = tm;
- pmd->info.levels = 2;
-- pmd->info.value_type.context = pmd->data_sm;
-+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
- pmd->info.value_type.size = sizeof(__le64);
- pmd->info.value_type.inc = data_block_inc;
- pmd->info.value_type.dec = data_block_dec;
-@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
-
- pmd->bl_info.tm = tm;
- pmd->bl_info.levels = 1;
-- pmd->bl_info.value_type.context = pmd->data_sm;
-+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
- pmd->bl_info.value_type.size = sizeof(__le64);
- pmd->bl_info.value_type.inc = data_block_inc;
- pmd->bl_info.value_type.dec = data_block_dec;
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index c00bcdc..bb5792b 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -177,9 +177,9 @@ struct mapped_device {
- /*
- * Event handling.
- */
-- atomic_t event_nr;
-+ atomic_unchecked_t event_nr;
- wait_queue_head_t eventq;
-- atomic_t uevent_seq;
-+ atomic_unchecked_t uevent_seq;
- struct list_head uevent_list;
- spinlock_t uevent_lock; /* Protect access to uevent_list */
-
-@@ -1871,8 +1871,8 @@ static struct mapped_device *alloc_dev(int minor)
- rwlock_init(&md->map_lock);
- atomic_set(&md->holders, 1);
- atomic_set(&md->open_count, 0);
-- atomic_set(&md->event_nr, 0);
-- atomic_set(&md->uevent_seq, 0);
-+ atomic_set_unchecked(&md->event_nr, 0);
-+ atomic_set_unchecked(&md->uevent_seq, 0);
- INIT_LIST_HEAD(&md->uevent_list);
- spin_lock_init(&md->uevent_lock);
-
-@@ -2007,7 +2007,7 @@ static void event_callback(void *context)
-
- dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
-
-- atomic_inc(&md->event_nr);
-+ atomic_inc_unchecked(&md->event_nr);
- wake_up(&md->eventq);
- }
-
-@@ -2648,18 +2648,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
-
- uint32_t dm_next_uevent_seq(struct mapped_device *md)
- {
-- return atomic_add_return(1, &md->uevent_seq);
-+ return atomic_add_return_unchecked(1, &md->uevent_seq);
- }
-
- uint32_t dm_get_event_nr(struct mapped_device *md)
- {
-- return atomic_read(&md->event_nr);
-+ return atomic_read_unchecked(&md->event_nr);
- }
-
- int dm_wait_event(struct mapped_device *md, int event_nr)
- {
- return wait_event_interruptible(md->eventq,
-- (event_nr != atomic_read(&md->event_nr)));
-+ (event_nr != atomic_read_unchecked(&md->event_nr)));
- }
-
- void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index ea8a181..4d3faed 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
- * start build, activate spare
- */
- static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
--static atomic_t md_event_count;
-+static atomic_unchecked_t md_event_count;
- void md_new_event(struct mddev *mddev)
- {
-- atomic_inc(&md_event_count);
-+ atomic_inc_unchecked(&md_event_count);
- wake_up(&md_event_waiters);
- }
- EXPORT_SYMBOL_GPL(md_new_event);
-@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
- */
- static void md_new_event_inintr(struct mddev *mddev)
- {
-- atomic_inc(&md_event_count);
-+ atomic_inc_unchecked(&md_event_count);
- wake_up(&md_event_waiters);
- }
-
-@@ -1534,7 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
-
- rdev->preferred_minor = 0xffff;
- rdev->data_offset = le64_to_cpu(sb->data_offset);
-- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
-+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
-
- rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
- bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
-@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
- else
- sb->resync_offset = cpu_to_le64(0);
-
-- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
-+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
-
- sb->raid_disks = cpu_to_le32(mddev->raid_disks);
- sb->size = cpu_to_le64(mddev->dev_sectors);
-@@ -2649,7 +2649,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
- static ssize_t
- errors_show(struct md_rdev *rdev, char *page)
- {
-- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
-+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
- }
-
- static ssize_t
-@@ -2658,7 +2658,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
- char *e;
- unsigned long n = simple_strtoul(buf, &e, 10);
- if (*buf && (*e == 0 || *e == '\n')) {
-- atomic_set(&rdev->corrected_errors, n);
-+ atomic_set_unchecked(&rdev->corrected_errors, n);
- return len;
- }
- return -EINVAL;
-@@ -3052,8 +3052,8 @@ int md_rdev_init(struct md_rdev *rdev)
- rdev->sb_loaded = 0;
- rdev->bb_page = NULL;
- atomic_set(&rdev->nr_pending, 0);
-- atomic_set(&rdev->read_errors, 0);
-- atomic_set(&rdev->corrected_errors, 0);
-+ atomic_set_unchecked(&rdev->read_errors, 0);
-+ atomic_set_unchecked(&rdev->corrected_errors, 0);
-
- INIT_LIST_HEAD(&rdev->same_set);
- init_waitqueue_head(&rdev->blocked_wait);
-@@ -6703,7 +6703,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
-
- spin_unlock(&pers_lock);
- seq_printf(seq, "\n");
-- seq->poll_event = atomic_read(&md_event_count);
-+ seq->poll_event = atomic_read_unchecked(&md_event_count);
- return 0;
- }
- if (v == (void*)2) {
-@@ -6792,7 +6792,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
- chunk_kb ? "KB" : "B");
- if (bitmap->file) {
- seq_printf(seq, ", file: ");
-- seq_path(seq, &bitmap->file->f_path, " \t\n");
-+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
- }
-
- seq_printf(seq, "\n");
-@@ -6823,7 +6823,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
- return error;
-
- seq = file->private_data;
-- seq->poll_event = atomic_read(&md_event_count);
-+ seq->poll_event = atomic_read_unchecked(&md_event_count);
- return error;
- }
-
-@@ -6837,7 +6837,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
- /* always allow read */
- mask = POLLIN | POLLRDNORM;
-
-- if (seq->poll_event != atomic_read(&md_event_count))
-+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
- mask |= POLLERR | POLLPRI;
- return mask;
- }
-@@ -6881,7 +6881,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
- struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
- (int)part_stat_read(&disk->part0, sectors[1]) -
-- atomic_read(&disk->sync_io);
-+ atomic_read_unchecked(&disk->sync_io);
- /* sync IO will cause sync_io to increase before the disk_stats
- * as sync_io is counted when a request starts, and
- * disk_stats is counted when it completes.
-diff --git a/drivers/md/md.h b/drivers/md/md.h
-index cf742d9..7c7c745 100644
---- a/drivers/md/md.h
-+++ b/drivers/md/md.h
-@@ -120,13 +120,13 @@ struct md_rdev {
- * only maintained for arrays that
- * support hot removal
- */
-- atomic_t read_errors; /* number of consecutive read errors that
-+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
- * we have tried to ignore.
- */
- struct timespec last_read_error; /* monotonic time since our
- * last read error
- */
-- atomic_t corrected_errors; /* number of corrected read errors,
-+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
- * for reporting to userspace and storing
- * in superblock.
- */
-@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
-
- static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
- {
-- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
-+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
- }
-
- struct md_personality
-diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
-index fc90c11..c8cd9a9 100644
---- a/drivers/md/persistent-data/dm-space-map-checker.c
-+++ b/drivers/md/persistent-data/dm-space-map-checker.c
-@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
- /*----------------------------------------------------------------*/
-
- struct sm_checker {
-- struct dm_space_map sm;
-+ dm_space_map_no_const sm;
-
- struct count_array old_counts;
- struct count_array counts;
-diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
-index 1cbfc6b..56e1dbb 100644
---- a/drivers/md/persistent-data/dm-space-map.h
-+++ b/drivers/md/persistent-data/dm-space-map.h
-@@ -60,6 +60,7 @@ struct dm_space_map {
- int (*root_size)(struct dm_space_map *sm, size_t *result);
- int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
- };
-+typedef struct dm_space_map __no_const dm_space_map_no_const;
-
- /*----------------------------------------------------------------*/
-
-diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index aec029a..d2c133e 100644
---- a/drivers/md/raid1.c
-+++ b/drivers/md/raid1.c
-@@ -1591,7 +1591,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
- if (r1_sync_page_io(rdev, sect, s,
- bio->bi_io_vec[idx].bv_page,
- READ) != 0)
-- atomic_add(s, &rdev->corrected_errors);
-+ atomic_add_unchecked(s, &rdev->corrected_errors);
- }
- sectors -= s;
- sect += s;
-@@ -1810,7 +1810,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
- test_bit(In_sync, &rdev->flags)) {
- if (r1_sync_page_io(rdev, sect, s,
- conf->tmppage, READ)) {
-- atomic_add(s, &rdev->corrected_errors);
-+ atomic_add_unchecked(s, &rdev->corrected_errors);
- printk(KERN_INFO
- "md/raid1:%s: read error corrected "
- "(%d sectors at %llu on %s)\n",
-diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index abac83a..3652f35 100644
---- a/drivers/md/raid10.c
-+++ b/drivers/md/raid10.c
-@@ -1465,7 +1465,7 @@ static void end_sync_read(struct bio *bio, int error)
- /* The write handler will notice the lack of
- * R10BIO_Uptodate and record any errors etc
- */
-- atomic_add(r10_bio->sectors,
-+ atomic_add_unchecked(r10_bio->sectors,
- &conf->mirrors[d].rdev->corrected_errors);
-
- /* for reconstruct, we always reschedule after a read.
-@@ -1765,7 +1765,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
- {
- struct timespec cur_time_mon;
- unsigned long hours_since_last;
-- unsigned int read_errors = atomic_read(&rdev->read_errors);
-+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
-
- ktime_get_ts(&cur_time_mon);
-
-@@ -1787,9 +1787,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
- * overflowing the shift of read_errors by hours_since_last.
- */
- if (hours_since_last >= 8 * sizeof(read_errors))
-- atomic_set(&rdev->read_errors, 0);
-+ atomic_set_unchecked(&rdev->read_errors, 0);
- else
-- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
-+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
- }
-
- static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-@@ -1839,8 +1839,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
- return;
-
- check_decay_read_errors(mddev, rdev);
-- atomic_inc(&rdev->read_errors);
-- if (atomic_read(&rdev->read_errors) > max_read_errors) {
-+ atomic_inc_unchecked(&rdev->read_errors);
-+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
- char b[BDEVNAME_SIZE];
- bdevname(rdev->bdev, b);
-
-@@ -1848,7 +1848,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
- "md/raid10:%s: %s: Raid device exceeded "
- "read_error threshold [cur %d:max %d]\n",
- mdname(mddev), b,
-- atomic_read(&rdev->read_errors), max_read_errors);
-+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
- printk(KERN_NOTICE
- "md/raid10:%s: %s: Failing raid device\n",
- mdname(mddev), b);
-@@ -1993,7 +1993,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
-- atomic_add(s, &rdev->corrected_errors);
-+ atomic_add_unchecked(s, &rdev->corrected_errors);
- }
-
- rdev_dec_pending(rdev, mddev);
-diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index c293d9c..808ae97 100644
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -598,23 +598,23 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
- struct bio_vec *bvl;
- struct page *bio_page;
- int i;
-- int page_offset;
-+ s64 page_offset;
- struct async_submit_ctl submit;
- enum async_tx_flags flags = 0;
-
- if (bio->bi_sector >= sector)
-- page_offset = (signed)(bio->bi_sector - sector) * 512;
-+ page_offset = (s64)(bio->bi_sector - sector) * 512;
- else
-- page_offset = (signed)(sector - bio->bi_sector) * -512;
-+ page_offset = (s64)(sector - bio->bi_sector) * -512;
-
- if (frombio)
- flags |= ASYNC_TX_FENCE;
- init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
-
- bio_for_each_segment(bvl, bio, i) {
-- int len = bvl->bv_len;
-- int clen;
-- int b_offset = 0;
-+ s64 len = bvl->bv_len;
-+ s64 clen;
-+ s64 b_offset = 0;
-
- if (page_offset < 0) {
- b_offset = -page_offset;
-@@ -1364,6 +1364,10 @@ static int grow_one_stripe(struct r5conf *conf)
- return 1;
- }
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
-+#endif
-+
- static int grow_stripes(struct r5conf *conf, int num)
- {
- struct kmem_cache *sc;
-@@ -1374,7 +1378,11 @@ static int grow_stripes(struct r5conf *conf, int num)
- "raid%d-%s", conf->level, mdname(conf->mddev));
- else
- sprintf(conf->cache_name[0],
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
-+#else
- "raid%d-%p", conf->level, conf->mddev);
-+#endif
- sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
-
- conf->active_name = 0;
-@@ -1618,19 +1626,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdevname(rdev->bdev, b));
-- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
-+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
- clear_bit(R5_ReadError, &sh->dev[i].flags);
- clear_bit(R5_ReWrite, &sh->dev[i].flags);
- }
-- if (atomic_read(&conf->disks[i].rdev->read_errors))
-- atomic_set(&conf->disks[i].rdev->read_errors, 0);
-+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
-+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
- } else {
- const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
- int retry = 0;
- rdev = conf->disks[i].rdev;
-
- clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-- atomic_inc(&rdev->read_errors);
-+ atomic_inc_unchecked(&rdev->read_errors);
- if (conf->mddev->degraded >= conf->max_degraded)
- printk_ratelimited(
- KERN_WARNING
-@@ -1650,7 +1658,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdn);
-- else if (atomic_read(&rdev->read_errors)
-+ else if (atomic_read_unchecked(&rdev->read_errors)
- > conf->max_nr_stripes)
- printk(KERN_WARNING
- "md/raid:%s: Too many read errors, failing device %s.\n",
-diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
-index ba9a643..e474ab5 100644
---- a/drivers/media/dvb/ddbridge/ddbridge-core.c
-+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
-@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
- .subvendor = _subvend, .subdevice = _subdev, \
- .driver_data = (unsigned long)&_driverdata }
-
--static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
-+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
- DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
-diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
-index d5cda35..017af46 100644
---- a/drivers/media/dvb/dvb-core/dvbdev.c
-+++ b/drivers/media/dvb/dvb-core/dvbdev.c
-@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
- const struct dvb_device *template, void *priv, int type)
- {
- struct dvb_device *dvbdev;
-- struct file_operations *dvbdevfops;
-+ file_operations_no_const *dvbdevfops;
- struct device *clsdev;
- int minor;
- int id;
-diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
-index f9d9050..d7a9d4e 100644
---- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
-+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
-@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
-
- static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
- {
-- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
-- char result[64];
-- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
-- sizeof(result), 0);
-+ char *buf;
-+ char *result;
-+ int retval;
-+
-+ buf = kmalloc(2, GFP_KERNEL);
-+ if (buf == NULL)
-+ return -ENOMEM;
-+ result = kmalloc(64, GFP_KERNEL);
-+ if (result == NULL) {
-+ kfree(buf);
-+ return -ENOMEM;
-+ }
-+
-+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
-+ buf[1] = enable ? 1 : 0;
-+
-+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
-+
-+ kfree(buf);
-+ kfree(result);
-+ return retval;
- }
-
- static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
- {
-- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
-- char state[3];
-- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
-+ char *buf;
-+ char *state;
-+ int retval;
-+
-+ buf = kmalloc(2, GFP_KERNEL);
-+ if (buf == NULL)
-+ return -ENOMEM;
-+ state = kmalloc(3, GFP_KERNEL);
-+ if (state == NULL) {
-+ kfree(buf);
-+ return -ENOMEM;
-+ }
-+
-+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
-+ buf[1] = enable ? 1 : 0;
-+
-+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
-+
-+ kfree(buf);
-+ kfree(state);
-+ return retval;
- }
-
- static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
- {
-- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
-- char state[3];
-+ char *query;
-+ char *state;
- int ret;
-+ query = kmalloc(1, GFP_KERNEL);
-+ if (query == NULL)
-+ return -ENOMEM;
-+ state = kmalloc(3, GFP_KERNEL);
-+ if (state == NULL) {
-+ kfree(query);
-+ return -ENOMEM;
-+ }
-+
-+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
-
- adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
-
-- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
-- sizeof(state), 0);
-+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
- if (ret < 0) {
- deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
- "state info\n");
-@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
-
- /* Copy this pointer as we are gonna need it in the release phase */
- cinergyt2_usb_device = adap->dev;
--
-+ kfree(query);
-+ kfree(state);
- return 0;
- }
-
-@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
- static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
- {
- struct cinergyt2_state *st = d->priv;
-- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
-+ u8 *key, *cmd;
- int i;
-
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -EINVAL;
-+ key = kzalloc(5, GFP_KERNEL);
-+ if (key == NULL) {
-+ kfree(cmd);
-+ return -EINVAL;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
-+
- *state = REMOTE_NO_KEY_PRESSED;
-
-- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
-+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
- if (key[4] == 0xff) {
- /* key repeat */
- st->rc_counter++;
-@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
- *event = d->last_event;
- deb_rc("repeat key, event %x\n",
- *event);
-- return 0;
-+ goto out;
- }
- }
- deb_rc("repeated key (non repeatable)\n");
- }
-- return 0;
-+ goto out;
- }
-
- /* hack to pass checksum on the custom field */
-@@ -175,6 +231,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
- deb_rc("key: %x %x %x %x %x\n",
- key[0], key[1], key[2], key[3], key[4]);
- }
-+out:
-+ kfree(cmd);
-+ kfree(key);
- return 0;
- }
-
-diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
-index 9cd51ac..0967e20 100644
---- a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
-+++ b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c
-@@ -146,103 +146,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
- fe_status_t *status)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_get_status_msg result;
-- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
-+ struct dvbt_get_status_msg *result;
-+ u8 *cmd;
- int ret;
-
-- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
-- sizeof(result), 0);
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -ENOMEM;
-+ result = kmalloc(sizeof(*result), GFP_KERNEL);
-+ if (result == NULL) {
-+ kfree(cmd);
-+ return -ENOMEM;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
-+
-+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
-+ sizeof(*result), 0);
- if (ret < 0)
-- return ret;
-+ goto out;
-
- *status = 0;
-
-- if (0xffff - le16_to_cpu(result.gain) > 30)
-+ if (0xffff - le16_to_cpu(result->gain) > 30)
- *status |= FE_HAS_SIGNAL;
-- if (result.lock_bits & (1 << 6))
-+ if (result->lock_bits & (1 << 6))
- *status |= FE_HAS_LOCK;
-- if (result.lock_bits & (1 << 5))
-+ if (result->lock_bits & (1 << 5))
- *status |= FE_HAS_SYNC;
-- if (result.lock_bits & (1 << 4))
-+ if (result->lock_bits & (1 << 4))
- *status |= FE_HAS_CARRIER;
-- if (result.lock_bits & (1 << 1))
-+ if (result->lock_bits & (1 << 1))
- *status |= FE_HAS_VITERBI;
-
- if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
- (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
- *status &= ~FE_HAS_LOCK;
-
-- return 0;
-+out:
-+ kfree(cmd);
-+ kfree(result);
-+ return ret;
- }
-
- static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_get_status_msg status;
-- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
-+ struct dvbt_get_status_msg *status;
-+ char *cmd;
- int ret;
-
-- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-- sizeof(status), 0);
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -ENOMEM;
-+ status = kmalloc(sizeof(*status), GFP_KERNEL);
-+ if (status == NULL) {
-+ kfree(cmd);
-+ return -ENOMEM;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
-+
-+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
-+ sizeof(*status), 0);
- if (ret < 0)
-- return ret;
-+ goto out;
-
-- *ber = le32_to_cpu(status.viterbi_error_rate);
-+ *ber = le32_to_cpu(status->viterbi_error_rate);
-+out:
-+ kfree(cmd);
-+ kfree(status);
- return 0;
- }
-
- static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_get_status_msg status;
-- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
-+ struct dvbt_get_status_msg *status;
-+ u8 *cmd;
- int ret;
-
-- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
-- sizeof(status), 0);
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -ENOMEM;
-+ status = kmalloc(sizeof(*status), GFP_KERNEL);
-+ if (status == NULL) {
-+ kfree(cmd);
-+ return -ENOMEM;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
-+
-+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
-+ sizeof(*status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
- ret);
-- return ret;
-+ goto out;
- }
-- *unc = le32_to_cpu(status.uncorrected_block_count);
-- return 0;
-+ *unc = le32_to_cpu(status->uncorrected_block_count);
-+
-+out:
-+ kfree(cmd);
-+ kfree(status);
-+ return ret;
- }
-
- static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_get_status_msg status;
-- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
-+ struct dvbt_get_status_msg *status;
-+ char *cmd;
- int ret;
-
-- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-- sizeof(status), 0);
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -ENOMEM;
-+ status = kmalloc(sizeof(*status), GFP_KERNEL);
-+ if (status == NULL) {
-+ kfree(cmd);
-+ return -ENOMEM;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
-+
-+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
-+ sizeof(*status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_signal_strength() Failed!"
- " (Error=%d)\n", ret);
-- return ret;
-+ goto out;
- }
-- *strength = (0xffff - le16_to_cpu(status.gain));
-+ *strength = (0xffff - le16_to_cpu(status->gain));
-+
-+out:
-+ kfree(cmd);
-+ kfree(status);
- return 0;
- }
-
- static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_get_status_msg status;
-- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
-+ struct dvbt_get_status_msg *status;
-+ char *cmd;
- int ret;
-
-- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-- sizeof(status), 0);
-+ cmd = kmalloc(1, GFP_KERNEL);
-+ if (cmd == NULL)
-+ return -ENOMEM;
-+ status = kmalloc(sizeof(*status), GFP_KERNEL);
-+ if (status == NULL) {
-+ kfree(cmd);
-+ return -ENOMEM;
-+ }
-+
-+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
-+
-+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
-+ sizeof(*status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
-- return ret;
-+ goto out;
- }
-- *snr = (status.snr << 8) | status.snr;
-- return 0;
-+ *snr = (status->snr << 8) | status->snr;
-+
-+out:
-+ kfree(cmd);
-+ kfree(status);
-+ return ret;
- }
-
- static int cinergyt2_fe_init(struct dvb_frontend *fe)
-@@ -267,23 +340,34 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
- {
- struct cinergyt2_fe_state *state = fe->demodulator_priv;
-- struct dvbt_set_parameters_msg param;
-- char result[2];
-+ struct dvbt_set_parameters_msg *param;
-+ char *result;
- int err;
-
-- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
-- param.tps = cpu_to_le16(compute_tps(fep));
-- param.freq = cpu_to_le32(fep->frequency / 1000);
-- param.bandwidth = 8 - fep->u.ofdm.bandwidth - BANDWIDTH_8_MHZ;
-- param.flags = 0;
-+ result = kmalloc(2, GFP_KERNEL);
-+ if (result == NULL)
-+ return -ENOMEM;
-+ param = kmalloc(sizeof(*param), GFP_KERNEL);
-+ if (param == NULL) {
-+ kfree(result);
-+ return -ENOMEM;
-+ }
-+
-+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
-+ param->tps = cpu_to_le16(compute_tps(fep));
-+ param->freq = cpu_to_le32(fep->frequency / 1000);
-+ param->bandwidth = 8 - fep->u.ofdm.bandwidth - BANDWIDTH_8_MHZ;
-+ param->flags = 0;
-
- err = dvb_usb_generic_rw(state->d,
-- (char *)&param, sizeof(param),
-- result, sizeof(result), 0);
-+ (char *)param, sizeof(*param),
-+ result, 2, 0);
- if (err < 0)
- err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
-
-- return (err < 0) ? err : 0;
-+ kfree(result);
-+ kfree(param);
-+ return err;
- }
-
- static int cinergyt2_fe_get_frontend(struct dvb_frontend *fe,
-diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
-index 9f2a02c..5920f88 100644
---- a/drivers/media/dvb/dvb-usb/cxusb.c
-+++ b/drivers/media/dvb/dvb-usb/cxusb.c
-@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
- struct dib0700_adapter_state {
- int (*set_param_save) (struct dvb_frontend *,
- struct dvb_frontend_parameters *);
--};
-+} __no_const;
-
- static int dib7070_set_param_override(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
-diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
-index 733a7ff..f8b52e3 100644
---- a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
-+++ b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
-@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
-
- int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
- {
-- struct hexline hx;
-- u8 reset;
-+ struct hexline *hx;
-+ u8 *reset;
- int ret,pos=0;
-
-+ reset = kmalloc(1, GFP_KERNEL);
-+ if (reset == NULL)
-+ return -ENOMEM;
-+
-+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
-+ if (hx == NULL) {
-+ kfree(reset);
-+ return -ENOMEM;
-+ }
-+
- /* stop the CPU */
-- reset = 1;
-- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
-+ reset[0] = 1;
-+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
- err("could not stop the USB controller CPU.");
-
-- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
-- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
-- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
-+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
-+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
-+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
-
-- if (ret != hx.len) {
-+ if (ret != hx->len) {
- err("error while transferring firmware "
- "(transferred size: %d, block size: %d)",
-- ret,hx.len);
-+ ret,hx->len);
- ret = -EINVAL;
- break;
- }
- }
- if (ret < 0) {
- err("firmware download failed at %d with %d",pos,ret);
-+ kfree(reset);
-+ kfree(hx);
- return ret;
- }
-
- if (ret == 0) {
- /* restart the CPU */
-- reset = 0;
-- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
-+ reset[0] = 0;
-+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
- err("could not restart the USB controller CPU.");
- ret = -EINVAL;
- }
- } else
- ret = -EIO;
-
-+ kfree(reset);
-+ kfree(hx);
-+
- return ret;
- }
- EXPORT_SYMBOL(usb_cypress_load_firmware);
-diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
-index f103ec1..5e8968b 100644
---- a/drivers/media/dvb/dvb-usb/dw2102.c
-+++ b/drivers/media/dvb/dvb-usb/dw2102.c
-@@ -95,7 +95,7 @@ struct su3000_state {
-
- struct s6x0_state {
- int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
--};
-+} __no_const;
-
- /* debug */
- static int dvb_usb_dw2102_debug;
-diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
-index 404f63a..4796533 100644
---- a/drivers/media/dvb/frontends/dib3000.h
-+++ b/drivers/media/dvb/frontends/dib3000.h
-@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
- int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
- int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
- int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
--};
-+} __no_const;
-
- #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
- extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
-diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
-index 2151c99..a4bf818 100644
---- a/drivers/media/dvb/frontends/ds3000.c
-+++ b/drivers/media/dvb/frontends/ds3000.c
-@@ -1217,7 +1217,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
-
- for (i = 0; i < 30 ; i++) {
- ds3000_read_status(fe, &status);
-- if (status && FE_HAS_LOCK)
-+ if (status & FE_HAS_LOCK)
- break;
-
- msleep(10);
-diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
-index 0564192..75b16f5 100644
---- a/drivers/media/dvb/ngene/ngene-cards.c
-+++ b/drivers/media/dvb/ngene/ngene-cards.c
-@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
-
- /****************************************************************************/
-
--static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
-+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
- NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
-diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
-index 16a089f..1661b11 100644
---- a/drivers/media/radio/radio-cadet.c
-+++ b/drivers/media/radio/radio-cadet.c
-@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
- unsigned char readbuf[RDS_BUFFER];
- int i = 0;
-
-+ if (count > RDS_BUFFER)
-+ return -EFAULT;
- mutex_lock(&dev->lock);
- if (dev->rdsstat == 0) {
- dev->rdsstat = 1;
-@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
- readbuf[i++] = dev->rdsbuf[dev->rdsout++];
- mutex_unlock(&dev->lock);
-
-- if (copy_to_user(data, readbuf, i))
-+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
- return -EFAULT;
- return i;
- }
-diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
-index 5991ab6..049f4ac 100644
---- a/drivers/media/radio/wl128x/fmdrv_common.c
-+++ b/drivers/media/radio/wl128x/fmdrv_common.c
-@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
- MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
-
- /* Radio Nr */
--static u32 radio_nr = -1;
-+static int radio_nr = -1;
- module_param(radio_nr, int, 0444);
- MODULE_PARM_DESC(radio_nr, "Radio Nr");
-
-diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
-index a47ba33..deafb02 100644
---- a/drivers/media/rc/rc-main.c
-+++ b/drivers/media/rc/rc-main.c
-@@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
-
- int rc_register_device(struct rc_dev *dev)
- {
-- static atomic_t devno = ATOMIC_INIT(0);
-+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
- struct rc_map *rc_map;
- const char *path;
- int rc;
-@@ -1063,7 +1063,7 @@ int rc_register_device(struct rc_dev *dev)
- */
- mutex_lock(&dev->lock);
-
-- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
-+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
- dev_set_name(&dev->dev, "rc%ld", dev->devno);
- dev_set_drvdata(&dev->dev, dev);
- rc = device_add(&dev->dev);
-diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
-index 61287fc..8b08712 100644
---- a/drivers/media/rc/redrat3.c
-+++ b/drivers/media/rc/redrat3.c
-@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
- return carrier;
- }
-
--static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
-+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
- {
- struct redrat3_dev *rr3 = rcdev->priv;
- struct device *dev = rr3->dev;
-diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
-index 68d1240..46b32eb 100644
---- a/drivers/media/video/cx88/cx88-alsa.c
-+++ b/drivers/media/video/cx88/cx88-alsa.c
-@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
- * Only boards with eeprom and byte 1 at eeprom=1 have it
- */
-
--static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
- {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
- {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
- {0, }
-diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
-index 921c56d..7e6c4b2 100644
---- a/drivers/media/video/cx88/cx88-video.c
-+++ b/drivers/media/video/cx88/cx88-video.c
-@@ -49,9 +49,9 @@ MODULE_VERSION(CX88_VERSION);
-
- /* ------------------------------------------------------------------ */
-
--static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
--static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
--static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-
- module_param_array(video_nr, int, NULL, 0444);
- module_param_array(vbi_nr, int, NULL, 0444);
-diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
-index 41108a9a..8ad2437 100644
---- a/drivers/media/video/ivtv/ivtv-driver.c
-+++ b/drivers/media/video/ivtv/ivtv-driver.c
-@@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
- MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
-
- /* ivtv instance counter */
--static atomic_t ivtv_instance = ATOMIC_INIT(0);
-+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
-
- /* Parameter declarations */
- static int cardtype[IVTV_MAX_CARDS];
-diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
-index d345215..b607565 100644
---- a/drivers/media/video/omap/omap_vout.c
-+++ b/drivers/media/video/omap/omap_vout.c
-@@ -64,7 +64,6 @@ enum omap_vout_channels {
- OMAP_VIDEO2,
- };
-
--static struct videobuf_queue_ops video_vbq_ops;
- /* Variables configurable through module params*/
- static u32 video1_numbuffers = 3;
- static u32 video2_numbuffers = 3;
-@@ -1001,6 +1000,12 @@ static int omap_vout_open(struct file *file)
- {
- struct videobuf_queue *q;
- struct omap_vout_device *vout = NULL;
-+ static struct videobuf_queue_ops video_vbq_ops = {
-+ .buf_setup = omap_vout_buffer_setup,
-+ .buf_prepare = omap_vout_buffer_prepare,
-+ .buf_release = omap_vout_buffer_release,
-+ .buf_queue = omap_vout_buffer_queue,
-+ };
-
- vout = video_drvdata(file);
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
-@@ -1018,10 +1023,6 @@ static int omap_vout_open(struct file *file)
- vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-
- q = &vout->vbq;
-- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
-- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
-- video_vbq_ops.buf_release = omap_vout_buffer_release;
-- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
- spin_lock_init(&vout->vbq_lock);
-
- videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
-diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
-index a0895bf..b451f5b 100644
---- a/drivers/media/video/timblogiw.c
-+++ b/drivers/media/video/timblogiw.c
-@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
-
- /* Platform device functions */
-
--static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
-+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
- .vidioc_querycap = timblogiw_querycap,
- .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
- .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
-@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
- .vidioc_enum_framesizes = timblogiw_enum_framesizes,
- };
-
--static __devinitconst struct v4l2_file_operations timblogiw_fops = {
-+static struct v4l2_file_operations timblogiw_fops = {
- .owner = THIS_MODULE,
- .open = timblogiw_open,
- .release = timblogiw_close,
-diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
-index 2671959..fc2af92 100644
---- a/drivers/media/video/v4l2-compat-ioctl32.c
-+++ b/drivers/media/video/v4l2-compat-ioctl32.c
-@@ -334,7 +334,7 @@ struct v4l2_buffer32 {
- __u32 reserved;
- };
-
--static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
-+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
- enum v4l2_memory memory)
- {
- void __user *up_pln;
-@@ -360,7 +360,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
- return 0;
- }
-
--static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
-+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
- enum v4l2_memory memory)
- {
- if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
-@@ -426,7 +426,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- * by passing a very big num_planes value */
- uplane = compat_alloc_user_space(num_planes *
- sizeof(struct v4l2_plane));
-- kp->m.planes = uplane;
-+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
-
- while (--num_planes >= 0) {
- ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
-@@ -493,7 +493,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- if (num_planes == 0)
- return 0;
-
-- uplane = kp->m.planes;
-+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
- if (get_user(p, &up->m.planes))
- return -EFAULT;
- uplane32 = compat_ptr(p);
-@@ -543,7 +543,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
- get_user(kp->capability, &up->capability) ||
- get_user(kp->flags, &up->flags))
- return -EFAULT;
-- kp->base = compat_ptr(tmp);
-+ kp->base = (void __force_kernel *)compat_ptr(tmp);
- get_v4l2_pix_format(&kp->fmt, &up->fmt);
- return 0;
- }
-@@ -649,7 +649,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
- n * sizeof(struct v4l2_ext_control32)))
- return -EFAULT;
- kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
-- kp->controls = kcontrols;
-+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
- while (--n >= 0) {
- if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
- return -EFAULT;
-@@ -671,7 +671,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
- static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
- {
- struct v4l2_ext_control32 __user *ucontrols;
-- struct v4l2_ext_control __user *kcontrols = kp->controls;
-+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
- int n = kp->count;
- compat_caddr_t p;
-
-diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
-index 0f415da..5dc98ef 100644
---- a/drivers/media/video/v4l2-ctrls.c
-+++ b/drivers/media/video/v4l2-ctrls.c
-@@ -1075,8 +1075,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c
- return 0;
-
- case V4L2_CTRL_TYPE_STRING:
-- len = strlen(s);
-- if (len < ctrl->minimum)
-+ len = strlen_user(s);
-+ if (!len || len < ctrl->minimum)
- return -ERANGE;
- if ((len - ctrl->minimum) % ctrl->step)
- return -ERANGE;
-diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
-index 8b0777f..e29f31e 100644
---- a/drivers/media/video/v4l2-device.c
-+++ b/drivers/media/video/v4l2-device.c
-@@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
- EXPORT_SYMBOL_GPL(v4l2_device_put);
-
- int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-- atomic_t *instance)
-+ atomic_unchecked_t *instance)
- {
-- int num = atomic_inc_return(instance) - 1;
-+ int num = atomic_inc_return_unchecked(instance) - 1;
- int len = strlen(basename);
-
- if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
-diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
-index 639abee..a027e4f 100644
---- a/drivers/media/video/v4l2-ioctl.c
-+++ b/drivers/media/video/v4l2-ioctl.c
-@@ -2197,7 +2197,7 @@ static unsigned long cmd_input_size(unsigned int cmd)
- }
-
- static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
-- void * __user *user_ptr, void ***kernel_ptr)
-+ void __user **user_ptr, void ***kernel_ptr)
- {
- int ret = 0;
-
-@@ -2212,7 +2212,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
- ret = -EINVAL;
- break;
- }
-- *user_ptr = (void __user *)buf->m.planes;
-+ *user_ptr = (void __force_user *)buf->m.planes;
- *kernel_ptr = (void *)&buf->m.planes;
- *array_size = sizeof(struct v4l2_plane) * buf->length;
- ret = 1;
-@@ -2230,7 +2230,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
- ret = -EINVAL;
- break;
- }
-- *user_ptr = (void __user *)ctrls->controls;
-+ *user_ptr = (void __force_user *)ctrls->controls;
- *kernel_ptr = (void *)&ctrls->controls;
- *array_size = sizeof(struct v4l2_ext_control)
- * ctrls->count;
-@@ -2312,7 +2312,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
- err = -EINVAL;
-
- if (has_array_args) {
-- *kernel_ptr = user_ptr;
-+ *kernel_ptr = (void __force_kernel *)user_ptr;
- if (copy_to_user(user_ptr, mbuf, array_size))
- err = -EFAULT;
- goto out_array_args;
-diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
-index 7d754fb..474273b 100644
---- a/drivers/media/video/vivi.c
-+++ b/drivers/media/video/vivi.c
-@@ -51,8 +51,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
- MODULE_LICENSE("Dual BSD/GPL");
- MODULE_VERSION(VIVI_VERSION);
-
--static unsigned video_nr = -1;
--module_param(video_nr, uint, 0644);
-+static int video_nr = -1;
-+module_param(video_nr, int, 0644);
- MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
-
- static unsigned n_devs = 1;
-diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
-index 668f5c6..65df5f2 100644
---- a/drivers/memstick/host/r592.c
-+++ b/drivers/memstick/host/r592.c
-@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
- /* Executes one TPC (data is read/written from small or large fifo) */
- static void r592_execute_tpc(struct r592_device *dev)
- {
-- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
-+ bool is_write;
- int len, error;
- u32 status, reg;
-
-@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
- return;
- }
-
-+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
- len = dev->req->long_data ?
- dev->req->sg.length : dev->req->data_len;
-
-diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
-index e9c6a60..a1d04d6 100644
---- a/drivers/message/fusion/mptbase.c
-+++ b/drivers/message/fusion/mptbase.c
-@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
- seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
- seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
-+#else
- seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
- (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
-+#endif
-+
- /*
- * Rounding UP to nearest 4-kB boundary here...
- */
-@@ -6767,7 +6772,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
- ioc->facts.GlobalCredits);
-
- seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL, NULL);
-+#else
- (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
-+#endif
- sz = (ioc->reply_sz * ioc->reply_depth) + 128;
- seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
- ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
-diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
-index 9d950429..b808101 100644
---- a/drivers/message/fusion/mptsas.c
-+++ b/drivers/message/fusion/mptsas.c
-@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
- return 0;
- }
-
-+static inline void
-+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
-+{
-+ if (phy_info->port_details) {
-+ phy_info->port_details->rphy = rphy;
-+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
-+ ioc->name, rphy));
-+ }
-+
-+ if (rphy) {
-+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
-+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
-+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
-+ ioc->name, rphy, rphy->dev.release));
-+ }
-+}
-+
- /* no mutex */
- static void
- mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
-@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
- return NULL;
- }
-
--static inline void
--mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
--{
-- if (phy_info->port_details) {
-- phy_info->port_details->rphy = rphy;
-- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
-- ioc->name, rphy));
-- }
--
-- if (rphy) {
-- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
-- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
-- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
-- ioc->name, rphy, rphy->dev.release));
-- }
--}
--
- static inline struct sas_port *
- mptsas_get_port(struct mptsas_phyinfo *phy_info)
- {
-diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
-index 0c3ced7..1fe34ec 100644
---- a/drivers/message/fusion/mptscsih.c
-+++ b/drivers/message/fusion/mptscsih.c
-@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
-
- h = shost_priv(SChost);
-
-- if (h) {
-- if (h->info_kbuf == NULL)
-- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
-- return h->info_kbuf;
-- h->info_kbuf[0] = '\0';
-+ if (!h)
-+ return NULL;
-
-- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
-- h->info_kbuf[size-1] = '\0';
-- }
-+ if (h->info_kbuf == NULL)
-+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
-+ return h->info_kbuf;
-+ h->info_kbuf[0] = '\0';
-+
-+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
-+ h->info_kbuf[size-1] = '\0';
-
- return h->info_kbuf;
- }
-diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
-index 07dbeaf..59a658c 100644
---- a/drivers/message/i2o/i2o_proc.c
-+++ b/drivers/message/i2o/i2o_proc.c
-@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
- "Array Controller Device"
- };
-
--static char *chtostr(u8 * chars, int n)
--{
-- char tmp[256];
-- tmp[0] = 0;
-- return strncat(tmp, (char *)chars, n);
--}
--
- static int i2o_report_query_status(struct seq_file *seq, int block_status,
- char *group)
- {
-@@ -721,9 +714,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
- static int i2o_seq_show_hw(struct seq_file *seq, void *v)
- {
- struct i2o_controller *c = (struct i2o_controller *)seq->private;
-- static u32 work32[5];
-- static u8 *work8 = (u8 *) work32;
-- static u16 *work16 = (u16 *) work32;
-+ u32 work32[5];
-+ u8 *work8 = (u8 *) work32;
-+ u16 *work16 = (u16 *) work32;
- int token;
- u32 hwcap;
-
-@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
-
- seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
- seq_printf(seq, "%-#8x", ddm_table.module_id);
-- seq_printf(seq, "%-29s",
-- chtostr(ddm_table.module_name_version, 28));
-+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
- seq_printf(seq, "%9d ", ddm_table.data_size);
- seq_printf(seq, "%8d", ddm_table.code_size);
-
-@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
-
- seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
- seq_printf(seq, "%-#8x", dst->module_id);
-- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
-- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
-+ seq_printf(seq, "%-.28s", dst->module_name_version);
-+ seq_printf(seq, "%-.8s", dst->date);
- seq_printf(seq, "%8d ", dst->module_size);
- seq_printf(seq, "%8d ", dst->mpb_size);
- seq_printf(seq, "0x%04x", dst->module_flags);
-@@ -1257,9 +1249,9 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
- static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
- {
- struct i2o_device *d = (struct i2o_device *)seq->private;
-- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
-+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
- // == (allow) 512d bytes (max)
-- static u16 *work16 = (u16 *) work32;
-+ u16 *work16 = (u16 *) work32;
- int token;
-
- token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
-@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
- seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
- seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
- seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
-- seq_printf(seq, "Vendor info : %s\n",
-- chtostr((u8 *) (work32 + 2), 16));
-- seq_printf(seq, "Product info : %s\n",
-- chtostr((u8 *) (work32 + 6), 16));
-- seq_printf(seq, "Description : %s\n",
-- chtostr((u8 *) (work32 + 10), 16));
-- seq_printf(seq, "Product rev. : %s\n",
-- chtostr((u8 *) (work32 + 14), 8));
-+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
-+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
-+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
-+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
-
- seq_printf(seq, "Serial number : ");
- print_serial_number(seq, (u8 *) (work32 + 16),
-@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
- }
-
- seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
-- seq_printf(seq, "Module name : %s\n",
-- chtostr(result.module_name, 24));
-- seq_printf(seq, "Module revision : %s\n",
-- chtostr(result.module_rev, 8));
-+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
-+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
-
- seq_printf(seq, "Serial number : ");
- print_serial_number(seq, result.serial_number, sizeof(result) - 36);
-@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
- return 0;
- }
-
-- seq_printf(seq, "Device name : %s\n",
-- chtostr(result.device_name, 64));
-- seq_printf(seq, "Service name : %s\n",
-- chtostr(result.service_name, 64));
-- seq_printf(seq, "Physical name : %s\n",
-- chtostr(result.physical_location, 64));
-- seq_printf(seq, "Instance number : %s\n",
-- chtostr(result.instance_number, 4));
-+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
-+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
-+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
-+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
-
- return 0;
- }
-@@ -1374,9 +1356,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
- static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
- {
- struct i2o_device *d = (struct i2o_device *)seq->private;
-- static u32 work32[12];
-- static u16 *work16 = (u16 *) work32;
-- static u8 *work8 = (u8 *) work32;
-+ u32 work32[12];
-+ u16 *work16 = (u16 *) work32;
-+ u8 *work8 = (u8 *) work32;
- int token;
-
- token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
-diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
-index a8c08f3..155fe3d 100644
---- a/drivers/message/i2o/iop.c
-+++ b/drivers/message/i2o/iop.c
-@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
-
- spin_lock_irqsave(&c->context_list_lock, flags);
-
-- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
-- atomic_inc(&c->context_list_counter);
-+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
-+ atomic_inc_unchecked(&c->context_list_counter);
-
-- entry->context = atomic_read(&c->context_list_counter);
-+ entry->context = atomic_read_unchecked(&c->context_list_counter);
-
- list_add(&entry->list, &c->context_list);
-
-@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
-
- #if BITS_PER_LONG == 64
- spin_lock_init(&c->context_list_lock);
-- atomic_set(&c->context_list_counter, 0);
-+ atomic_set_unchecked(&c->context_list_counter, 0);
- INIT_LIST_HEAD(&c->context_list);
- #endif
-
-diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
-index 4eec7b7..f468a4e 100644
---- a/drivers/mfd/ab3100-core.c
-+++ b/drivers/mfd/ab3100-core.c
-@@ -937,9 +937,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
-
- err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
- IRQF_ONESHOT, "ab3100-core", ab3100);
-- /* This real unpredictable IRQ is of course sampled for entropy */
-- rand_initialize_irq(client->irq);
--
- if (err)
- goto exit_no_irq;
-
-diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
-index 90b450c..7a52413 100644
---- a/drivers/mfd/max8925-i2c.c
-+++ b/drivers/mfd/max8925-i2c.c
-@@ -139,7 +139,7 @@ static int __devinit max8925_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
- {
- struct max8925_platform_data *pdata = client->dev.platform_data;
-- static struct max8925_chip *chip;
-+ struct max8925_chip *chip;
-
- if (!pdata) {
- pr_info("%s: platform data is missing\n", __func__);
-diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
-index 6dad2ef..ef80da6 100644
---- a/drivers/mfd/mfd-core.c
-+++ b/drivers/mfd/mfd-core.c
-@@ -167,7 +167,7 @@ int mfd_add_devices(struct device *parent, int id,
- atomic_t *cnts;
-
- /* initialize reference counting for all cells */
-- cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
-+ cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL);
- if (!cnts)
- return -ENOMEM;
-
-diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
-index 29f11e0..89f0c3d 100644
---- a/drivers/mfd/twl4030-irq.c
-+++ b/drivers/mfd/twl4030-irq.c
-@@ -33,6 +33,7 @@
- #include <linux/slab.h>
-
- #include <linux/i2c/twl.h>
-+#include <asm/pgtable.h>
-
- #include "twl-core.h"
-
-@@ -713,10 +714,12 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
- /* install an irq handler for each of the SIH modules;
- * clone dummy irq_chip since PIH can't *do* anything
- */
-- twl4030_irq_chip = dummy_irq_chip;
-- twl4030_irq_chip.name = "twl4030";
-+ pax_open_kernel();
-+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
-+ *(const char **)&twl4030_irq_chip.name = "twl4030";
-
-- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
-+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
-+ pax_close_kernel();
-
- for (i = irq_base; i < irq_end; i++) {
- irq_set_chip_and_handler(i, &twl4030_irq_chip,
-diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
-index 83f4988..80f7a01 100644
---- a/drivers/mfd/twl6030-irq.c
-+++ b/drivers/mfd/twl6030-irq.c
-@@ -376,10 +376,12 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
- /* install an irq handler for each of the modules;
- * clone dummy irq_chip since PIH can't *do* anything
- */
-- twl6030_irq_chip = dummy_irq_chip;
-- twl6030_irq_chip.name = "twl6030";
-- twl6030_irq_chip.irq_set_type = NULL;
-- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
-+ pax_open_kernel();
-+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
-+ *(const char **)&twl6030_irq_chip.name = "twl6030";
-+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
-+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
-+ pax_close_kernel();
-
- for (i = irq_base; i < irq_end; i++) {
- irq_set_chip_and_handler(i, &twl6030_irq_chip,
-diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
-index 19fc7c1..09a4d26 100644
---- a/drivers/misc/c2port/core.c
-+++ b/drivers/misc/c2port/core.c
-@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
- mutex_init(&c2dev->mutex);
-
- /* Create binary file */
-- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
-+ pax_open_kernel();
-+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
-+ pax_close_kernel();
- ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
- if (unlikely(ret))
- goto error_device_create_bin_file;
-diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
-index 8994772..f8453cc 100644
---- a/drivers/misc/ibmasm/ibmasmfs.c
-+++ b/drivers/misc/ibmasm/ibmasmfs.c
-@@ -110,6 +110,7 @@ static struct file_system_type ibmasmfs_type = {
- .mount = ibmasmfs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("ibmasmfs");
-
- static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
- {
-diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
-index 3aa9a96..59cf685 100644
---- a/drivers/misc/kgdbts.c
-+++ b/drivers/misc/kgdbts.c
-@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
- char before[BREAK_INSTR_SIZE];
- char after[BREAK_INSTR_SIZE];
-
-- probe_kernel_read(before, (char *)kgdbts_break_test,
-+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
- BREAK_INSTR_SIZE);
- init_simple_test();
- ts.tst = plant_and_detach_test;
-@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
- /* Activate test with initial breakpoint */
- if (!is_early)
- kgdb_breakpoint();
-- probe_kernel_read(after, (char *)kgdbts_break_test,
-+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
- BREAK_INSTR_SIZE);
- if (memcmp(before, after, BREAK_INSTR_SIZE)) {
- printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
-diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
-index 29d12a7..f900ba4 100644
---- a/drivers/misc/lis3lv02d/lis3lv02d.c
-+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
-@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
- * the lid is closed. This leads to interrupts as soon as a little move
- * is done.
- */
-- atomic_inc(&lis3->count);
-+ atomic_inc_unchecked(&lis3->count);
-
- wake_up_interruptible(&lis3->misc_wait);
- kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
-@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
- if (lis3->pm_dev)
- pm_runtime_get_sync(lis3->pm_dev);
-
-- atomic_set(&lis3->count, 0);
-+ atomic_set_unchecked(&lis3->count, 0);
- return 0;
- }
-
-@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
- add_wait_queue(&lis3->misc_wait, &wait);
- while (true) {
- set_current_state(TASK_INTERRUPTIBLE);
-- data = atomic_xchg(&lis3->count, 0);
-+ data = atomic_xchg_unchecked(&lis3->count, 0);
- if (data)
- break;
-
-@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
- struct lis3lv02d, miscdev);
-
- poll_wait(file, &lis3->misc_wait, wait);
-- if (atomic_read(&lis3->count))
-+ if (atomic_read_unchecked(&lis3->count))
- return POLLIN | POLLRDNORM;
- return 0;
- }
-diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
-index 2b1482a..5d33616 100644
---- a/drivers/misc/lis3lv02d/lis3lv02d.h
-+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
-@@ -266,7 +266,7 @@ struct lis3lv02d {
- struct input_polled_dev *idev; /* input device */
- struct platform_device *pdev; /* platform device */
- struct regulator_bulk_data regulators[2];
-- atomic_t count; /* interrupt count after last read */
-+ atomic_unchecked_t count; /* interrupt count after last read */
- union axis_conversion ac; /* hw -> logical axis */
- int mapped_btns[3];
-
-diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
-index 150cd70..1d5d99b 100644
---- a/drivers/misc/lkdtm.c
-+++ b/drivers/misc/lkdtm.c
-@@ -473,6 +473,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
- int i, n, out;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
-+ if (buf == NULL)
-+ return -ENOMEM;
-
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(cp_type); i++)
-diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
-index 2f30bad..c4c13d0 100644
---- a/drivers/misc/sgi-gru/gruhandles.c
-+++ b/drivers/misc/sgi-gru/gruhandles.c
-@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
- unsigned long nsec;
-
- nsec = CLKS2NSEC(clks);
-- atomic_long_inc(&mcs_op_statistics[op].count);
-- atomic_long_add(nsec, &mcs_op_statistics[op].total);
-+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
-+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
- if (mcs_op_statistics[op].max < nsec)
- mcs_op_statistics[op].max = nsec;
- }
-diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
-index 7768b87..f8aac38 100644
---- a/drivers/misc/sgi-gru/gruprocfs.c
-+++ b/drivers/misc/sgi-gru/gruprocfs.c
-@@ -32,9 +32,9 @@
-
- #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
-
--static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
-+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
- {
-- unsigned long val = atomic_long_read(v);
-+ unsigned long val = atomic_long_read_unchecked(v);
-
- seq_printf(s, "%16lu %s\n", val, id);
- }
-@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
-
- seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
- for (op = 0; op < mcsop_last; op++) {
-- count = atomic_long_read(&mcs_op_statistics[op].count);
-- total = atomic_long_read(&mcs_op_statistics[op].total);
-+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
-+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
- max = mcs_op_statistics[op].max;
- seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
- count ? total / count : 0, max);
-diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
-index 5c3ce24..4915ccb 100644
---- a/drivers/misc/sgi-gru/grutables.h
-+++ b/drivers/misc/sgi-gru/grutables.h
-@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
- * GRU statistics.
- */
- struct gru_stats_s {
-- atomic_long_t vdata_alloc;
-- atomic_long_t vdata_free;
-- atomic_long_t gts_alloc;
-- atomic_long_t gts_free;
-- atomic_long_t gms_alloc;
-- atomic_long_t gms_free;
-- atomic_long_t gts_double_allocate;
-- atomic_long_t assign_context;
-- atomic_long_t assign_context_failed;
-- atomic_long_t free_context;
-- atomic_long_t load_user_context;
-- atomic_long_t load_kernel_context;
-- atomic_long_t lock_kernel_context;
-- atomic_long_t unlock_kernel_context;
-- atomic_long_t steal_user_context;
-- atomic_long_t steal_kernel_context;
-- atomic_long_t steal_context_failed;
-- atomic_long_t nopfn;
-- atomic_long_t asid_new;
-- atomic_long_t asid_next;
-- atomic_long_t asid_wrap;
-- atomic_long_t asid_reuse;
-- atomic_long_t intr;
-- atomic_long_t intr_cbr;
-- atomic_long_t intr_tfh;
-- atomic_long_t intr_spurious;
-- atomic_long_t intr_mm_lock_failed;
-- atomic_long_t call_os;
-- atomic_long_t call_os_wait_queue;
-- atomic_long_t user_flush_tlb;
-- atomic_long_t user_unload_context;
-- atomic_long_t user_exception;
-- atomic_long_t set_context_option;
-- atomic_long_t check_context_retarget_intr;
-- atomic_long_t check_context_unload;
-- atomic_long_t tlb_dropin;
-- atomic_long_t tlb_preload_page;
-- atomic_long_t tlb_dropin_fail_no_asid;
-- atomic_long_t tlb_dropin_fail_upm;
-- atomic_long_t tlb_dropin_fail_invalid;
-- atomic_long_t tlb_dropin_fail_range_active;
-- atomic_long_t tlb_dropin_fail_idle;
-- atomic_long_t tlb_dropin_fail_fmm;
-- atomic_long_t tlb_dropin_fail_no_exception;
-- atomic_long_t tfh_stale_on_fault;
-- atomic_long_t mmu_invalidate_range;
-- atomic_long_t mmu_invalidate_page;
-- atomic_long_t flush_tlb;
-- atomic_long_t flush_tlb_gru;
-- atomic_long_t flush_tlb_gru_tgh;
-- atomic_long_t flush_tlb_gru_zero_asid;
-+ atomic_long_unchecked_t vdata_alloc;
-+ atomic_long_unchecked_t vdata_free;
-+ atomic_long_unchecked_t gts_alloc;
-+ atomic_long_unchecked_t gts_free;
-+ atomic_long_unchecked_t gms_alloc;
-+ atomic_long_unchecked_t gms_free;
-+ atomic_long_unchecked_t gts_double_allocate;
-+ atomic_long_unchecked_t assign_context;
-+ atomic_long_unchecked_t assign_context_failed;
-+ atomic_long_unchecked_t free_context;
-+ atomic_long_unchecked_t load_user_context;
-+ atomic_long_unchecked_t load_kernel_context;
-+ atomic_long_unchecked_t lock_kernel_context;
-+ atomic_long_unchecked_t unlock_kernel_context;
-+ atomic_long_unchecked_t steal_user_context;
-+ atomic_long_unchecked_t steal_kernel_context;
-+ atomic_long_unchecked_t steal_context_failed;
-+ atomic_long_unchecked_t nopfn;
-+ atomic_long_unchecked_t asid_new;
-+ atomic_long_unchecked_t asid_next;
-+ atomic_long_unchecked_t asid_wrap;
-+ atomic_long_unchecked_t asid_reuse;
-+ atomic_long_unchecked_t intr;
-+ atomic_long_unchecked_t intr_cbr;
-+ atomic_long_unchecked_t intr_tfh;
-+ atomic_long_unchecked_t intr_spurious;
-+ atomic_long_unchecked_t intr_mm_lock_failed;
-+ atomic_long_unchecked_t call_os;
-+ atomic_long_unchecked_t call_os_wait_queue;
-+ atomic_long_unchecked_t user_flush_tlb;
-+ atomic_long_unchecked_t user_unload_context;
-+ atomic_long_unchecked_t user_exception;
-+ atomic_long_unchecked_t set_context_option;
-+ atomic_long_unchecked_t check_context_retarget_intr;
-+ atomic_long_unchecked_t check_context_unload;
-+ atomic_long_unchecked_t tlb_dropin;
-+ atomic_long_unchecked_t tlb_preload_page;
-+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
-+ atomic_long_unchecked_t tlb_dropin_fail_upm;
-+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
-+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
-+ atomic_long_unchecked_t tlb_dropin_fail_idle;
-+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
-+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
-+ atomic_long_unchecked_t tfh_stale_on_fault;
-+ atomic_long_unchecked_t mmu_invalidate_range;
-+ atomic_long_unchecked_t mmu_invalidate_page;
-+ atomic_long_unchecked_t flush_tlb;
-+ atomic_long_unchecked_t flush_tlb_gru;
-+ atomic_long_unchecked_t flush_tlb_gru_tgh;
-+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
-
-- atomic_long_t copy_gpa;
-- atomic_long_t read_gpa;
-+ atomic_long_unchecked_t copy_gpa;
-+ atomic_long_unchecked_t read_gpa;
-
-- atomic_long_t mesq_receive;
-- atomic_long_t mesq_receive_none;
-- atomic_long_t mesq_send;
-- atomic_long_t mesq_send_failed;
-- atomic_long_t mesq_noop;
-- atomic_long_t mesq_send_unexpected_error;
-- atomic_long_t mesq_send_lb_overflow;
-- atomic_long_t mesq_send_qlimit_reached;
-- atomic_long_t mesq_send_amo_nacked;
-- atomic_long_t mesq_send_put_nacked;
-- atomic_long_t mesq_page_overflow;
-- atomic_long_t mesq_qf_locked;
-- atomic_long_t mesq_qf_noop_not_full;
-- atomic_long_t mesq_qf_switch_head_failed;
-- atomic_long_t mesq_qf_unexpected_error;
-- atomic_long_t mesq_noop_unexpected_error;
-- atomic_long_t mesq_noop_lb_overflow;
-- atomic_long_t mesq_noop_qlimit_reached;
-- atomic_long_t mesq_noop_amo_nacked;
-- atomic_long_t mesq_noop_put_nacked;
-- atomic_long_t mesq_noop_page_overflow;
-+ atomic_long_unchecked_t mesq_receive;
-+ atomic_long_unchecked_t mesq_receive_none;
-+ atomic_long_unchecked_t mesq_send;
-+ atomic_long_unchecked_t mesq_send_failed;
-+ atomic_long_unchecked_t mesq_noop;
-+ atomic_long_unchecked_t mesq_send_unexpected_error;
-+ atomic_long_unchecked_t mesq_send_lb_overflow;
-+ atomic_long_unchecked_t mesq_send_qlimit_reached;
-+ atomic_long_unchecked_t mesq_send_amo_nacked;
-+ atomic_long_unchecked_t mesq_send_put_nacked;
-+ atomic_long_unchecked_t mesq_page_overflow;
-+ atomic_long_unchecked_t mesq_qf_locked;
-+ atomic_long_unchecked_t mesq_qf_noop_not_full;
-+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
-+ atomic_long_unchecked_t mesq_qf_unexpected_error;
-+ atomic_long_unchecked_t mesq_noop_unexpected_error;
-+ atomic_long_unchecked_t mesq_noop_lb_overflow;
-+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
-+ atomic_long_unchecked_t mesq_noop_amo_nacked;
-+ atomic_long_unchecked_t mesq_noop_put_nacked;
-+ atomic_long_unchecked_t mesq_noop_page_overflow;
-
- };
-
-@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
- tghop_invalidate, mcsop_last};
-
- struct mcs_op_statistic {
-- atomic_long_t count;
-- atomic_long_t total;
-+ atomic_long_unchecked_t count;
-+ atomic_long_unchecked_t total;
- unsigned long max;
- };
-
-@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
-
- #define STAT(id) do { \
- if (gru_options & OPT_STATS) \
-- atomic_long_inc(&gru_stats.id); \
-+ atomic_long_inc_unchecked(&gru_stats.id); \
- } while (0)
-
- #ifdef CONFIG_SGI_GRU_DEBUG
-diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
-index 851b2f2..a4ec097 100644
---- a/drivers/misc/sgi-xp/xp.h
-+++ b/drivers/misc/sgi-xp/xp.h
-@@ -289,7 +289,7 @@ struct xpc_interface {
- xpc_notify_func, void *);
- void (*received) (short, int, void *);
- enum xp_retval (*partid_to_nasids) (short, void *);
--};
-+} __no_const;
-
- extern struct xpc_interface xpc_interface;
-
-diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
-index 01be66d..e3a0c7e 100644
---- a/drivers/misc/sgi-xp/xp_main.c
-+++ b/drivers/misc/sgi-xp/xp_main.c
-@@ -78,13 +78,13 @@ xpc_notloaded(void)
- }
-
- struct xpc_interface xpc_interface = {
-- (void (*)(int))xpc_notloaded,
-- (void (*)(int))xpc_notloaded,
-- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-+ .connect = (void (*)(int))xpc_notloaded,
-+ .disconnect = (void (*)(int))xpc_notloaded,
-+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
- void *))xpc_notloaded,
-- (void (*)(short, int, void *))xpc_notloaded,
-- (enum xp_retval(*)(short, void *))xpc_notloaded
-+ .received = (void (*)(short, int, void *))xpc_notloaded,
-+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
- };
- EXPORT_SYMBOL_GPL(xpc_interface);
-
-diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
-index b94d5f7..7f494c5 100644
---- a/drivers/misc/sgi-xp/xpc.h
-+++ b/drivers/misc/sgi-xp/xpc.h
-@@ -835,6 +835,7 @@ struct xpc_arch_operations {
- void (*received_payload) (struct xpc_channel *, void *);
- void (*notify_senders_of_disconnect) (struct xpc_channel *);
- };
-+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
-
- /* struct xpc_partition act_state values (for XPC HB) */
-
-@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
- /* found in xpc_main.c */
- extern struct device *xpc_part;
- extern struct device *xpc_chan;
--extern struct xpc_arch_operations xpc_arch_ops;
-+extern xpc_arch_operations_no_const xpc_arch_ops;
- extern int xpc_disengage_timelimit;
- extern int xpc_disengage_timedout;
- extern int xpc_activate_IRQ_rcvd;
-diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
-index d971817..33bdca5 100644
---- a/drivers/misc/sgi-xp/xpc_main.c
-+++ b/drivers/misc/sgi-xp/xpc_main.c
-@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
- .notifier_call = xpc_system_die,
- };
-
--struct xpc_arch_operations xpc_arch_ops;
-+xpc_arch_operations_no_const xpc_arch_ops;
-
- /*
- * Timer function to enforce the timelimit on the partition disengage.
-@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
-
- if (((die_args->trapnr == X86_TRAP_MF) ||
- (die_args->trapnr == X86_TRAP_XF)) &&
-- !user_mode_vm(die_args->regs))
-+ !user_mode(die_args->regs))
- xpc_die_deactivate();
-
- break;
-diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
-index ba168a7..399925d6 100644
---- a/drivers/misc/ti-st/st_core.c
-+++ b/drivers/misc/ti-st/st_core.c
-@@ -347,6 +347,11 @@ void st_int_recv(void *disc_data,
- st_gdata->rx_skb = alloc_skb(
- st_gdata->list[type]->max_frame_size,
- GFP_ATOMIC);
-+ if (st_gdata->rx_skb == NULL) {
-+ pr_err("out of memory: dropping\n");
-+ goto done;
-+ }
-+
- skb_reserve(st_gdata->rx_skb,
- st_gdata->list[type]->reserve);
- /* next 2 required for BT only */
-diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index 4802f7f..5ae431e 100644
---- a/drivers/mmc/card/block.c
-+++ b/drivers/mmc/card/block.c
-@@ -399,7 +399,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
- if (idata->ic.postsleep_min_us)
- usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
-
-- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
-+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
- err = -EFAULT;
- goto cmd_rel_host;
- }
-diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
-index 83b51b5..ec2396c 100644
---- a/drivers/mmc/host/sdhci-pci.c
-+++ b/drivers/mmc/host/sdhci-pci.c
-@@ -674,7 +674,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
- .probe = via_probe,
- };
-
--static const struct pci_device_id pci_ids[] __devinitdata = {
-+static const struct pci_device_id pci_ids[] __devinitconst = {
- {
- .vendor = PCI_VENDOR_ID_RICOH,
- .device = PCI_DEVICE_ID_RICOH_R5C822,
-diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
-index 179814a..01cb750 100644
---- a/drivers/mtd/chips/cfi_cmdset_0020.c
-+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
-@@ -674,7 +674,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
- size_t totlen = 0, thislen;
- int ret = 0;
- size_t buflen = 0;
-- static char *buffer;
-+ char *buffer;
-
- if (!ECCBUF_SIZE) {
- /* We should fall back to a general writev implementation.
-diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
-index e9fad91..0a7a16a 100644
---- a/drivers/mtd/devices/doc2000.c
-+++ b/drivers/mtd/devices/doc2000.c
-@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-
- /* The ECC will not be calculated correctly if less than 512 is written */
- /* DBB-
-- if (len != 0x200 && eccbuf)
-+ if (len != 0x200)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
-diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
-index a3f7a27..234016e 100644
---- a/drivers/mtd/devices/doc2001.c
-+++ b/drivers/mtd/devices/doc2001.c
-@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow read past end of device */
-- if (from >= this->totlen)
-+ if (from >= this->totlen || !len)
- return -EINVAL;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
-diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
-index 1d90e26..865d439 100644
---- a/drivers/mtd/mtdchar.c
-+++ b/drivers/mtd/mtdchar.c
-@@ -1215,6 +1215,7 @@ static struct file_system_type mtd_inodefs_type = {
- .mount = mtd_inodefs_mount,
- .kill_sb = kill_anon_super,
- };
-+MODULE_ALIAS_FS("mtd_inodefs");
-
- static void mtdchar_notify_add(struct mtd_info *mtd)
- {
-diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
-index 3984d48..28aa897 100644
---- a/drivers/mtd/nand/denali.c
-+++ b/drivers/mtd/nand/denali.c
-@@ -26,6 +26,7 @@
- #include <linux/pci.h>
- #include <linux/mtd/mtd.h>
- #include <linux/module.h>
-+#include <linux/slab.h>
-
- #include "denali.h"
-
-diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
-index ac40925..483b753 100644
---- a/drivers/mtd/nftlmount.c
-+++ b/drivers/mtd/nftlmount.c
-@@ -24,6 +24,7 @@
- #include <asm/errno.h>
- #include <linux/delay.h>
- #include <linux/slab.h>
-+#include <linux/sched.h>
- #include <linux/mtd/mtd.h>
- #include <linux/mtd/nand.h>
- #include <linux/mtd/nftl.h>
-diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
-index a9ff89ff..461d313 100644
---- a/drivers/mtd/sm_ftl.c
-+++ b/drivers/mtd/sm_ftl.c
-@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
- #define SM_CIS_VENDOR_OFFSET 0x59
- struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
- {
-- struct attribute_group *attr_group;
-+ attribute_group_no_const *attr_group;
- struct attribute **attributes;
- struct sm_sysfs_attribute *vendor_attribute;
-
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index 5af2a8f..728995b 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -4803,7 +4803,7 @@ static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
- return 0;
- }
-
--static struct rtnl_link_ops bond_link_ops __read_mostly = {
-+static struct rtnl_link_ops bond_link_ops = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
-diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
-index cf95bd8d..f61f675 100644
---- a/drivers/net/bonding/bond_sysfs.c
-+++ b/drivers/net/bonding/bond_sysfs.c
-@@ -1063,7 +1063,7 @@ static ssize_t bonding_store_primary(struct device *d,
- goto out;
- }
-
-- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
-+ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing primary */
- if (!strlen(ifname) || buf[0] == '\n') {
-@@ -1236,7 +1236,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
- goto out;
- }
-
-- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
-+ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
- /* check to see if we are clearing active */
- if (!strlen(ifname) || buf[0] == '\n') {
-diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
-index db9100f..612019d 100644
---- a/drivers/net/can/dev.c
-+++ b/drivers/net/can/dev.c
-@@ -730,7 +730,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
- return -EOPNOTSUPP;
- }
-
--static struct rtnl_link_ops can_link_ops __read_mostly = {
-+static struct rtnl_link_ops can_link_ops = {
- .kind = "can",
- .maxtype = IFLA_CAN_MAX,
- .policy = can_policy,
-diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
-index f93e2d6..f1cbbc2 100644
---- a/drivers/net/can/vcan.c
-+++ b/drivers/net/can/vcan.c
-@@ -154,7 +154,7 @@ static void vcan_setup(struct net_device *dev)
- dev->destructor = free_netdev;
- }
-
--static struct rtnl_link_ops vcan_link_ops __read_mostly = {
-+static struct rtnl_link_ops vcan_link_ops = {
- .kind = "vcan",
- .setup = vcan_setup,
- };
-diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
-index 99b1145..54ce102 100644
---- a/drivers/net/dummy.c
-+++ b/drivers/net/dummy.c
-@@ -150,7 +150,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
- return 0;
- }
-
--static struct rtnl_link_ops dummy_link_ops __read_mostly = {
-+static struct rtnl_link_ops dummy_link_ops = {
- .kind = "dummy",
- .setup = dummy_setup,
- .validate = dummy_validate,
-diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
-index e9f8432..45308e6 100644
---- a/drivers/net/ethernet/8390/ax88796.c
-+++ b/drivers/net/ethernet/8390/ax88796.c
-@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
- if (ax->plat->reg_offsets)
- ei_local->reg_offset = ax->plat->reg_offsets;
- else {
-+ resource_size_t _mem_size = mem_size;
-+ do_div(_mem_size, 0x18);
- ei_local->reg_offset = ax->reg_offsets;
- for (ret = 0; ret < 0x18; ret++)
-- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
-+ ax->reg_offsets[ret] = _mem_size * ret;
- }
-
- if (!request_mem_region(mem->start, mem_size, pdev->name)) {
-diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
-index 1feae59..c2a61d2 100644
---- a/drivers/net/ethernet/atheros/atlx/atl2.c
-+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
-@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
- */
-
- #define ATL2_PARAM(X, desc) \
-- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
-+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
- MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
- MODULE_PARM_DESC(X, desc);
- #else
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
-index 283d663..4373534 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
-@@ -1240,7 +1240,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
- static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
- {
- /* RX_MODE controlling object */
-- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
-+ bnx2x_init_rx_mode_obj(bp);
-
- /* multicast configuration controlling object */
- bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
-index 1451769..0275580 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
-@@ -2290,15 +2290,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
- return rc;
- }
-
--void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-- struct bnx2x_rx_mode_obj *o)
-+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
- {
- if (CHIP_IS_E1x(bp)) {
-- o->wait_comp = bnx2x_empty_rx_mode_wait;
-- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
-+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
-+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
- } else {
-- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
-- o->config_rx_mode = bnx2x_set_rx_mode_e2;
-+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
-+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
- }
- }
-
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
-index 9a517c2..6d245e1 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
-@@ -1207,8 +1207,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
-
- /********************* RX MODE ****************/
-
--void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-- struct bnx2x_rx_mode_obj *o);
-+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
-
- /**
- * Send and RX_MODE ramrod according to the provided parameters.
-diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
-index a398a6f..4b6f6df 100644
---- a/drivers/net/ethernet/broadcom/tg3.h
-+++ b/drivers/net/ethernet/broadcom/tg3.h
-@@ -134,6 +134,7 @@
- #define CHIPREV_ID_5750_A0 0x4000
- #define CHIPREV_ID_5750_A1 0x4001
- #define CHIPREV_ID_5750_A3 0x4003
-+#define CHIPREV_ID_5750_C1 0x4201
- #define CHIPREV_ID_5750_C2 0x4202
- #define CHIPREV_ID_5752_A0_HW 0x5000
- #define CHIPREV_ID_5752_A0 0x6000
-diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
-index 26f5c5a..9482b63 100644
---- a/drivers/net/ethernet/brocade/bna/bna_enet.c
-+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
-@@ -1688,10 +1688,10 @@ bna_cb_ioceth_reset(void *arg)
- }
-
- static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
-- bna_cb_ioceth_enable,
-- bna_cb_ioceth_disable,
-- bna_cb_ioceth_hbfail,
-- bna_cb_ioceth_reset
-+ .enable_cbfn = bna_cb_ioceth_enable,
-+ .disable_cbfn = bna_cb_ioceth_disable,
-+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
-+ .reset_cbfn = bna_cb_ioceth_reset
- };
-
- static void bna_attr_init(struct bna_ioceth *ioceth)
-diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
-index 4d15c8f..1bc7689 100644
---- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
-+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
-@@ -3031,7 +3031,9 @@ static void t3_io_resume(struct pci_dev *pdev)
- CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
- t3_read_reg(adapter, A_PCIE_PEX_ERR));
-
-+ rtnl_lock();
- t3_resume_ports(adapter);
-+ rtnl_unlock();
- }
-
- static struct pci_error_handlers t3_err_handler = {
-diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
-index c5f5479..2e8c260 100644
---- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
-+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
-@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
- */
- struct l2t_skb_cb {
- arp_failure_handler_func arp_failure_handler;
--};
-+} __no_const;
-
- #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
-
-diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
-index cfb60e1..f0fe46f 100644
---- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
-+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
-@@ -1537,9 +1537,9 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
- dui = (struct deferred_unmap_info *)skb->head;
- p = dui->addr;
-
-- if (skb->tail - skb->transport_header)
-+ if (skb_tail_pointer(skb) - skb_transport_header(skb))
- pci_unmap_single(dui->pdev, *p++,
-- skb->tail - skb->transport_header,
-+ skb_tail_pointer(skb) - skb_transport_header(skb),
- PCI_DMA_TODEVICE);
-
- si = skb_shinfo(skb);
-@@ -1600,7 +1600,7 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
- flits = skb_transport_offset(skb) / 8;
- sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
-- skb->tail - skb->transport_header,
-+ skb_tail_pointer(skb) - skb_transport_header(skb),
- adap->pdev);
- if (need_skb_unmap()) {
- setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
-diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
-index 871bcaa..4043505 100644
---- a/drivers/net/ethernet/dec/tulip/de4x5.c
-+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
-@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
-- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
-+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
- case DE4X5_SET_HWADDR: /* Set the hardware address */
-@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(&statbuf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
-- if (copy_to_user(ioc->data, &statbuf, ioc->len))
-+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
- break;
- }
-diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
-index 14d5b61..1398636 100644
---- a/drivers/net/ethernet/dec/tulip/eeprom.c
-+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
-@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
- {NULL}};
-
-
--static const char *block_name[] __devinitdata = {
-+static const char *block_name[] __devinitconst = {
- "21140 non-MII",
- "21140 MII PHY",
- "21142 Serial PHY",
-diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
-index 4d01219..b58d26d 100644
---- a/drivers/net/ethernet/dec/tulip/winbond-840.c
-+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
-@@ -236,7 +236,7 @@ struct pci_id_info {
- int drv_flags; /* Driver use, intended as capability flags. */
- };
-
--static const struct pci_id_info pci_id_tbl[] __devinitdata = {
-+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
- { /* Sometime a Level-One switch card. */
- "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
- { "Winbond W89c840", CanHaveMII | HasBrokenTx},
-diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
-index dcd7f7a..ecb7fb3 100644
---- a/drivers/net/ethernet/dlink/sundance.c
-+++ b/drivers/net/ethernet/dlink/sundance.c
-@@ -218,7 +218,7 @@ enum {
- struct pci_id_info {
- const char *name;
- };
--static const struct pci_id_info pci_id_tbl[] __devinitdata = {
-+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
- {"D-Link DFE-550TX FAST Ethernet Adapter"},
- {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
- {"D-Link DFE-580TX 4 port Server Adapter"},
-diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index 10c9acf..d1a0b0f 100644
---- a/drivers/net/ethernet/emulex/benet/be_main.c
-+++ b/drivers/net/ethernet/emulex/benet/be_main.c
-@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
-
- if (wrapped)
- newacc += 65536;
-- ACCESS_ONCE(*acc) = newacc;
-+ ACCESS_ONCE_RW(*acc) = newacc;
- }
-
- void be_parse_stats(struct be_adapter *adapter)
-diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
-index fb5579a..debdffa 100644
---- a/drivers/net/ethernet/faraday/ftgmac100.c
-+++ b/drivers/net/ethernet/faraday/ftgmac100.c
-@@ -30,6 +30,8 @@
- #include <linux/netdevice.h>
- #include <linux/phy.h>
- #include <linux/platform_device.h>
-+#include <linux/interrupt.h>
-+#include <linux/irqreturn.h>
- #include <net/ip.h>
-
- #include "ftgmac100.h"
-diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
-index a127cb2..0d043cd 100644
---- a/drivers/net/ethernet/faraday/ftmac100.c
-+++ b/drivers/net/ethernet/faraday/ftmac100.c
-@@ -30,6 +30,8 @@
- #include <linux/module.h>
- #include <linux/netdevice.h>
- #include <linux/platform_device.h>
-+#include <linux/interrupt.h>
-+#include <linux/irqreturn.h>
-
- #include "ftmac100.h"
-
-diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
-index 61d2bdd..7f1154a 100644
---- a/drivers/net/ethernet/fealnx.c
-+++ b/drivers/net/ethernet/fealnx.c
-@@ -150,7 +150,7 @@ struct chip_info {
- int flags;
- };
-
--static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
-+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
- { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
- { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
- { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
-diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
-index ed79b2d..b17b19d 100644
---- a/drivers/net/ethernet/ibm/emac/core.c
-+++ b/drivers/net/ethernet/ibm/emac/core.c
-@@ -2309,7 +2309,7 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
- return 0;
- }
-
--static struct notifier_block emac_of_bus_notifier __devinitdata = {
-+static struct notifier_block emac_of_bus_notifier = {
- .notifier_call = emac_of_bus_notify
- };
-
-diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
-index e1159e5..34efe3e 100644
---- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
-+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
-@@ -205,7 +205,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
- {
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
-- struct e1000_mac_operations *func = &mac->ops;
-
- /* Set media type */
- switch (adapter->pdev->device) {
-@@ -233,16 +232,16 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
-- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
-- func->check_for_link = e1000e_check_for_copper_link;
-+ mac->ops.setup_physical_interface = e1000_setup_copper_link_80003es2lan;
-+ mac->ops.check_for_link = e1000e_check_for_copper_link;
- break;
- case e1000_media_type_fiber:
-- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
-- func->check_for_link = e1000e_check_for_fiber_link;
-+ mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
-+ mac->ops.check_for_link = e1000e_check_for_fiber_link;
- break;
- case e1000_media_type_internal_serdes:
-- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
-- func->check_for_link = e1000e_check_for_serdes_link;
-+ mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
-+ mac->ops.check_for_link = e1000e_check_for_serdes_link;
- break;
- default:
- return -E1000_ERR_CONFIG;
-diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
-index 4f4d52a..faf0fa4 100644
---- a/drivers/net/ethernet/intel/e1000e/82571.c
-+++ b/drivers/net/ethernet/intel/e1000e/82571.c
-@@ -239,7 +239,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
- {
- struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
-- struct e1000_mac_operations *func = &mac->ops;
- u32 swsm = 0;
- u32 swsm2 = 0;
- bool force_clear_smbi = false;
-@@ -272,22 +271,22 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
- /* check for link */
- switch (hw->phy.media_type) {
- case e1000_media_type_copper:
-- func->setup_physical_interface = e1000_setup_copper_link_82571;
-- func->check_for_link = e1000e_check_for_copper_link;
-- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
-+ mac->ops.setup_physical_interface = e1000_setup_copper_link_82571;
-+ mac->ops.check_for_link = e1000e_check_for_copper_link;
-+ mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
- break;
- case e1000_media_type_fiber:
-- func->setup_physical_interface =
-+ mac->ops.setup_physical_interface =
- e1000_setup_fiber_serdes_link_82571;
-- func->check_for_link = e1000e_check_for_fiber_link;
-- func->get_link_up_info =
-+ mac->ops.check_for_link = e1000e_check_for_fiber_link;
-+ mac->ops.get_link_up_info =
- e1000e_get_speed_and_duplex_fiber_serdes;
- break;
- case e1000_media_type_internal_serdes:
-- func->setup_physical_interface =
-+ mac->ops.setup_physical_interface =
- e1000_setup_fiber_serdes_link_82571;
-- func->check_for_link = e1000_check_for_serdes_link_82571;
-- func->get_link_up_info =
-+ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
-+ mac->ops.get_link_up_info =
- e1000e_get_speed_and_duplex_fiber_serdes;
- break;
- default:
-@@ -297,10 +296,10 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
-
- switch (hw->mac.type) {
- case e1000_82573:
-- func->set_lan_id = e1000_set_lan_id_single_port;
-- func->check_mng_mode = e1000e_check_mng_mode_generic;
-- func->led_on = e1000e_led_on_generic;
-- func->blink_led = e1000e_blink_led_generic;
-+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
-+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
-+ mac->ops.led_on = e1000e_led_on_generic;
-+ mac->ops.blink_led = e1000e_blink_led_generic;
-
- /* FWSM register */
- mac->has_fwsm = true;
-@@ -314,14 +313,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
- break;
- case e1000_82574:
- case e1000_82583:
-- func->set_lan_id = e1000_set_lan_id_single_port;
-- func->check_mng_mode = e1000_check_mng_mode_82574;
-- func->led_on = e1000_led_on_82574;
-+ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
-+ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
-+ mac->ops.led_on = e1000_led_on_82574;
- break;
- default:
-- func->check_mng_mode = e1000e_check_mng_mode_generic;
-- func->led_on = e1000e_led_on_generic;
-- func->blink_led = e1000e_blink_led_generic;
-+ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
-+ mac->ops.led_on = e1000e_led_on_generic;
-+ mac->ops.blink_led = e1000e_blink_led_generic;
-
- /* FWSM register */
- mac->has_fwsm = true;
-diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
-index 8e362bb..679d9da 100644
---- a/drivers/net/ethernet/intel/e1000e/e1000.h
-+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
-@@ -175,7 +175,7 @@ struct e1000_info;
- #define E1000_TXDCTL_DMA_BURST_ENABLE \
- (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
- E1000_TXDCTL_COUNT_DESC | \
-- (5 << 16) | /* wthresh must be +1 more than desired */\
-+ (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
-
-diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
-index e571356..088ad8d 100644
---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
-+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
-@@ -895,6 +895,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
- struct ixgbe_hw *hw = &adapter->hw;
- u32 regval;
-
-+ if (vf >= adapter->num_vfs)
-+ return -EINVAL;
-+
- adapter->vfinfo[vf].spoofchk_enabled = setting;
-
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-index 4c8e199..f7f5587 100644
---- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-@@ -956,8 +956,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
-- tx_ring->total_bytes = 0;
-- tx_ring->total_packets = 0;
- ixgbevf_clean_tx_irq(adapter, tx_ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
-@@ -981,16 +979,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- int r_idx;
-- int i;
--
-- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-- for (i = 0; i < q_vector->rxr_count; i++) {
-- rx_ring = &(adapter->rx_ring[r_idx]);
-- rx_ring->total_bytes = 0;
-- rx_ring->total_packets = 0;
-- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-- r_idx + 1);
-- }
-
- if (!q_vector->rxr_count)
- return IRQ_HANDLED;
-diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
-index 0b3567a..49bc4bc 100644
---- a/drivers/net/ethernet/lantiq_etop.c
-+++ b/drivers/net/ethernet/lantiq_etop.c
-@@ -756,7 +756,7 @@ ltq_etop_probe(struct platform_device *pdev)
- return 0;
-
- err_free:
-- kfree(dev);
-+ free_netdev(dev);
- err_out:
- return err;
- }
-diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
-index 24ee967..9a07e41 100644
---- a/drivers/net/ethernet/mellanox/mlx4/eq.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
-@@ -570,8 +570,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
- int err;
- int i;
-
-- priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
-- mlx4_num_eq_uar(dev), GFP_KERNEL);
-+ priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
-+ sizeof *priv->eq_table.uar_map, GFP_KERNEL);
- if (!priv->eq_table.uar_map) {
- err = -ENOMEM;
- goto err_out_free;
-diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
-index b02adbc..4285b65 100644
---- a/drivers/net/ethernet/mellanox/mlx4/main.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
-@@ -40,6 +40,7 @@
- #include <linux/dma-mapping.h>
- #include <linux/slab.h>
- #include <linux/io-mapping.h>
-+#include <linux/sched.h>
-
- #include <linux/mlx4/device.h>
- #include <linux/mlx4/doorbell.h>
-diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
-index c27fb3d..c54df57 100644
---- a/drivers/net/ethernet/neterion/s2io.c
-+++ b/drivers/net/ethernet/neterion/s2io.c
-@@ -6994,7 +6994,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
- if (sp->s2io_entries[i].in_use == MSIX_FLG) {
- if (sp->s2io_entries[i].type ==
- MSIX_RING_TYPE) {
-- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
-+ snprintf(sp->desc[i],
-+ sizeof(sp->desc[i]),
-+ "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle,
-@@ -7003,7 +7005,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
- sp->s2io_entries[i].arg);
- } else if (sp->s2io_entries[i].type ==
- MSIX_ALARM_TYPE) {
-- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
-+ snprintf(sp->desc[i],
-+ sizeof(sp->desc[i]),
-+ "%s:MSI-X-%d-TX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle,
-@@ -8166,7 +8170,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
- "%s: UDP Fragmentation Offload(UFO) enabled\n",
- dev->name);
- /* Initialize device name */
-- sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
-+ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
-+ sp->product_name);
-
- if (vlan_tag_strip)
- sp->vlan_strip_flag = 1;
-diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
-index 98e2c10..79af7f8 100644
---- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
-+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
-@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- struct __vxge_hw_fifo *fifo;
- struct vxge_hw_fifo_config *config;
- u32 txdl_size, txdl_per_memblock;
-- struct vxge_hw_mempool_cbs fifo_mp_callback;
-+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
-+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
-+ };
-+
- struct __vxge_hw_virtualpath *vpath;
-
- if ((vp == NULL) || (attr == NULL)) {
-@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
- goto exit;
- }
-
-- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
--
- fifo->mempool =
- __vxge_hw_mempool_create(vpath->hldev,
- fifo->config->memblock_size,
-diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
-index 212f43b..fb31b51 100644
---- a/drivers/net/ethernet/octeon/octeon_mgmt.c
-+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
-@@ -683,10 +683,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
- p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
-
-- if (IS_ERR(p->phydev)) {
-- p->phydev = NULL;
-+ if (!p->phydev)
- return -1;
-- }
-
- phy_start_aneg(p->phydev);
-
-diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
-index 49b549f..13d648c 100644
---- a/drivers/net/ethernet/pasemi/pasemi_mac.c
-+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
-@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
- phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
- PHY_INTERFACE_MODE_SGMII);
-
-- if (IS_ERR(phydev)) {
-+ if (!phydev) {
- printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
-- return PTR_ERR(phydev);
-+ return -ENODEV;
- }
-
- mac->phydev = phydev;
-diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
-index 0c26787..f847e66 100644
---- a/drivers/net/ethernet/realtek/r8169.c
-+++ b/drivers/net/ethernet/realtek/r8169.c
-@@ -704,17 +704,17 @@ struct rtl8169_private {
- struct mdio_ops {
- void (*write)(void __iomem *, int, int);
- int (*read)(void __iomem *, int);
-- } mdio_ops;
-+ } __no_const mdio_ops;
-
- struct pll_power_ops {
- void (*down)(struct rtl8169_private *);
- void (*up)(struct rtl8169_private *);
-- } pll_power_ops;
-+ } __no_const pll_power_ops;
-
- struct jumbo_ops {
- void (*enable)(struct rtl8169_private *);
- void (*disable)(struct rtl8169_private *);
-- } jumbo_ops;
-+ } __no_const jumbo_ops;
-
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
-diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
-index 4907885..af3d197 100644
---- a/drivers/net/ethernet/sfc/selftest.c
-+++ b/drivers/net/ethernet/sfc/selftest.c
-@@ -37,7 +37,7 @@ struct efx_loopback_payload {
- struct iphdr ip;
- struct udphdr udp;
- __be16 iteration;
-- const char msg[64];
-+ char msg[64];
- } __packed;
-
- /* Loopback test source MAC address */
-diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
-index 1b4658c..a30dabb 100644
---- a/drivers/net/ethernet/sis/sis190.c
-+++ b/drivers/net/ethernet/sis/sis190.c
-@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
- static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
- struct net_device *dev)
- {
-- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
-+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
- struct sis190_private *tp = netdev_priv(dev);
- struct pci_dev *isa_bridge;
- u8 reg, tmp8;
-diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-index c07cfe9..81cbf7e 100644
---- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
-@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
-
- writel(value, ioaddr + MMC_CNTRL);
-
-- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
-- MMC_CNTRL, value);
-+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
-+// MMC_CNTRL, value);
- }
-
- /* To mask all all interrupts.*/
-diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-index d4d2bc1..14b8672 100644
---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-@@ -1602,7 +1602,7 @@ static const struct file_operations stmmac_rings_status_fops = {
- .open = stmmac_sysfs_ring_open,
- .read = seq_read,
- .llseek = seq_lseek,
-- .release = seq_release,
-+ .release = single_release,
- };
-
- static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
-@@ -1674,7 +1674,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
- .open = stmmac_sysfs_dma_cap_open,
- .read = seq_read,
- .llseek = seq_lseek,
-- .release = seq_release,
-+ .release = single_release,
- };
-
- static int stmmac_init_fs(struct net_device *dev)
-diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
-index f37e0ae..a273c4a 100644
---- a/drivers/net/ethernet/via/via-rhine.c
-+++ b/drivers/net/ethernet/via/via-rhine.c
-@@ -2299,7 +2299,7 @@ static struct pci_driver rhine_driver = {
- .shutdown = rhine_shutdown,
- };
-
--static struct dmi_system_id __initdata rhine_dmi_table[] = {
-+static const struct dmi_system_id __initconst rhine_dmi_table[] = {
- {
- .ident = "EPIA-M",
- .matches = {
-diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
-index 00f1367..bfcb2f6 100644
---- a/drivers/net/ifb.c
-+++ b/drivers/net/ifb.c
-@@ -251,7 +251,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
- return 0;
- }
-
--static struct rtnl_link_ops ifb_link_ops __read_mostly = {
-+static struct rtnl_link_ops ifb_link_ops = {
- .kind = "ifb",
- .priv_size = sizeof(struct ifb_private),
- .setup = ifb_setup,
-diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
-index d0893e4..14b0d44 100644
---- a/drivers/net/loopback.c
-+++ b/drivers/net/loopback.c
-@@ -216,6 +216,6 @@ out:
- }
-
- /* Registered in net/core/dev.c */
--struct pernet_operations __net_initdata loopback_net_ops = {
-+struct pernet_operations __net_initconst loopback_net_ops = {
- .init = loopback_net_init,
- };
-diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index fed39de..8adf3152 100644
---- a/drivers/net/macvlan.c
-+++ b/drivers/net/macvlan.c
-@@ -790,13 +790,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
- int macvlan_link_register(struct rtnl_link_ops *ops)
- {
- /* common fields */
-- ops->priv_size = sizeof(struct macvlan_dev);
-- ops->validate = macvlan_validate;
-- ops->maxtype = IFLA_MACVLAN_MAX;
-- ops->policy = macvlan_policy;
-- ops->changelink = macvlan_changelink;
-- ops->get_size = macvlan_get_size;
-- ops->fill_info = macvlan_fill_info;
-+ pax_open_kernel();
-+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
-+ *(void **)&ops->validate = macvlan_validate;
-+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
-+ *(const void **)&ops->policy = macvlan_policy;
-+ *(void **)&ops->changelink = macvlan_changelink;
-+ *(void **)&ops->get_size = macvlan_get_size;
-+ *(void **)&ops->fill_info = macvlan_fill_info;
-+ pax_close_kernel();
-
- return rtnl_link_register(ops);
- };
-@@ -852,7 +854,7 @@ static int macvlan_device_event(struct notifier_block *unused,
- return NOTIFY_DONE;
- }
-
--static struct notifier_block macvlan_notifier_block __read_mostly = {
-+static struct notifier_block macvlan_notifier_block = {
- .notifier_call = macvlan_device_event,
- };
-
-diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 7300447..cb83d3e 100644
---- a/drivers/net/macvtap.c
-+++ b/drivers/net/macvtap.c
-@@ -351,7 +351,7 @@ static void macvtap_setup(struct net_device *dev)
- dev->tx_queue_len = TUN_READQ_SIZE;
- }
-
--static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
-+static struct rtnl_link_ops macvtap_link_ops = {
- .kind = "macvtap",
- .setup = macvtap_setup,
- .newlink = macvtap_newlink,
-@@ -936,7 +936,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
- return -ENOLINK;
-
- ret = 0;
-- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
-+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
- put_user(q->flags, &ifr->ifr_flags))
- ret = -EFAULT;
- dev_put(vlan->dev);
-@@ -1097,7 +1097,7 @@ static int macvtap_device_event(struct notifier_block *unused,
- return NOTIFY_DONE;
- }
-
--static struct notifier_block macvtap_notifier_block __read_mostly = {
-+static struct notifier_block macvtap_notifier_block = {
- .notifier_call = macvtap_device_event,
- };
-
-@@ -1151,6 +1151,7 @@ static void macvtap_exit(void)
- class_unregister(macvtap_class);
- cdev_del(&macvtap_cdev);
- unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
-+ idr_destroy(&minor_idr);
- }
- module_exit(macvtap_exit);
-
-diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
-index 83a5a5a..9a9d0ae 100644
---- a/drivers/net/phy/phy_device.c
-+++ b/drivers/net/phy/phy_device.c
-@@ -207,7 +207,7 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
- * Description: Reads the ID registers of the PHY at @addr on the
- * @bus, stores it in @phy_id and returns zero on success.
- */
--int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
-+int get_phy_id(struct mii_bus *bus, int addr, int *phy_id)
- {
- int phy_reg;
-
-@@ -243,7 +243,7 @@ EXPORT_SYMBOL(get_phy_id);
- struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
- {
- struct phy_device *dev = NULL;
-- u32 phy_id;
-+ int phy_id;
- int r;
-
- r = get_phy_id(bus, addr, &phy_id);
-diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index 4782d79..359f1b9 100644
---- a/drivers/net/ppp/ppp_generic.c
-+++ b/drivers/net/ppp/ppp_generic.c
-@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
- struct ppp_stats stats;
- struct ppp_comp_stats cstats;
-- char *vers;
-
- switch (cmd) {
- case SIOCGPPPSTATS:
-@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- break;
-
- case SIOCGPPPVER:
-- vers = PPP_VERSION;
-- if (copy_to_user(addr, vers, strlen(vers) + 1))
-+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
- break;
- err = 0;
- break;
-diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
-index c974581..9556f92 100644
---- a/drivers/net/ppp/pptp.c
-+++ b/drivers/net/ppp/pptp.c
-@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
- int len = sizeof(struct sockaddr_pppox);
- struct sockaddr_pppox sp;
-
-- sp.sa_family = AF_PPPOX;
-+ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
-+
-+ sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_PPTP;
- sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
-
-diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
-index 0a0a664..7e7deef 100644
---- a/drivers/net/slip/slhc.c
-+++ b/drivers/net/slip/slhc.c
-@@ -489,7 +489,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
- register struct tcphdr *thp;
- register struct iphdr *ip;
- register struct cstate *cs;
-- int len, hdrlen;
-+ long len, hdrlen;
- unsigned char *cp = icp;
-
- /* We've got a compressed packet; read the change byte */
-diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
-index 515f122..41dd273 100644
---- a/drivers/net/tokenring/abyss.c
-+++ b/drivers/net/tokenring/abyss.c
-@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
-
- static int __init abyss_init (void)
- {
-- abyss_netdev_ops = tms380tr_netdev_ops;
-+ pax_open_kernel();
-+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
-
-- abyss_netdev_ops.ndo_open = abyss_open;
-- abyss_netdev_ops.ndo_stop = abyss_close;
-+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
-+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
-+ pax_close_kernel();
-
- return pci_register_driver(&abyss_driver);
- }
-diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
-index 6153cfd..cf69c1c 100644
---- a/drivers/net/tokenring/madgemc.c
-+++ b/drivers/net/tokenring/madgemc.c
-@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
-
- static int __init madgemc_init (void)
- {
-- madgemc_netdev_ops = tms380tr_netdev_ops;
-- madgemc_netdev_ops.ndo_open = madgemc_open;
-- madgemc_netdev_ops.ndo_stop = madgemc_close;
-+ pax_open_kernel();
-+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
-+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
-+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
-+ pax_close_kernel();
-
- return mca_register_driver (&madgemc_driver);
- }
-diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
-index 8d362e6..f91cc52 100644
---- a/drivers/net/tokenring/proteon.c
-+++ b/drivers/net/tokenring/proteon.c
-@@ -353,9 +353,11 @@ static int __init proteon_init(void)
- struct platform_device *pdev;
- int i, num = 0, err = 0;
-
-- proteon_netdev_ops = tms380tr_netdev_ops;
-- proteon_netdev_ops.ndo_open = proteon_open;
-- proteon_netdev_ops.ndo_stop = tms380tr_close;
-+ pax_open_kernel();
-+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
-+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
-+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
-+ pax_close_kernel();
-
- err = platform_driver_register(&proteon_driver);
- if (err)
-diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
-index 46db5c5..37c1536 100644
---- a/drivers/net/tokenring/skisa.c
-+++ b/drivers/net/tokenring/skisa.c
-@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
- struct platform_device *pdev;
- int i, num = 0, err = 0;
-
-- sk_isa_netdev_ops = tms380tr_netdev_ops;
-- sk_isa_netdev_ops.ndo_open = sk_isa_open;
-- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
-+ pax_open_kernel();
-+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
-+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
-+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
-+ pax_close_kernel();
-
- err = platform_driver_register(&sk_isa_driver);
- if (err)
-diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 2fbbca6..761d265 100644
---- a/drivers/net/tun.c
-+++ b/drivers/net/tun.c
-@@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
- netif_tx_lock_bh(tun->dev);
- netif_carrier_off(tun->dev);
- tun->tfile = NULL;
-- tun->socket.file = NULL;
- netif_tx_unlock_bh(tun->dev);
-
- /* Drop read queue */
-@@ -360,7 +359,7 @@ static void tun_free_netdev(struct net_device *dev)
- {
- struct tun_struct *tun = netdev_priv(dev);
-
-- sock_put(tun->socket.sk);
-+ sk_release_kernel(tun->socket.sk);
- }
-
- /* Net device open. */
-@@ -931,7 +930,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
- return -EINVAL;
- }
-
--static struct rtnl_link_ops tun_link_ops __read_mostly = {
-+static struct rtnl_link_ops tun_link_ops = {
- .kind = DRV_NAME,
- .priv_size = sizeof(struct tun_struct),
- .setup = tun_setup,
-@@ -988,10 +987,18 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
- return ret;
- }
-
-+static int tun_release(struct socket *sock)
-+{
-+ if (sock->sk)
-+ sock_put(sock->sk);
-+ return 0;
-+}
-+
- /* Ops structure to mimic raw sockets with tun */
- static const struct proto_ops tun_socket_ops = {
- .sendmsg = tun_sendmsg,
- .recvmsg = tun_recvmsg,
-+ .release = tun_release,
- };
-
- static struct proto tun_proto = {
-@@ -1118,10 +1125,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
- tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-
- err = -ENOMEM;
-- sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
-+ sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
- if (!sk)
- goto err_free_dev;
-
-+ sk_change_net(sk, net);
- tun->socket.wq = &tun->wq;
- init_waitqueue_head(&tun->wq.wait);
- tun->socket.ops = &tun_socket_ops;
-@@ -1182,7 +1190,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
- return 0;
-
- err_free_sk:
-- sock_put(sk);
-+ tun_free_netdev(dev);
- err_free_dev:
- free_netdev(dev);
- failed:
-@@ -1241,7 +1249,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
- }
-
- static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
-- unsigned long arg, int ifreq_len)
-+ unsigned long arg, size_t ifreq_len)
- {
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun;
-@@ -1252,6 +1260,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
- int vnet_hdr_sz;
- int ret;
-
-+ if (ifreq_len > sizeof ifr)
-+ return -EFAULT;
-+
- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
- if (copy_from_user(&ifr, argp, ifreq_len))
- return -EFAULT;
-diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
-index 304fe78..e505bdd 100644
---- a/drivers/net/usb/hso.c
-+++ b/drivers/net/usb/hso.c
-@@ -71,7 +71,7 @@
- #include <asm/byteorder.h>
- #include <linux/serial_core.h>
- #include <linux/serial.h>
--
-+#include <asm/local.h>
-
- #define MOD_AUTHOR "Option Wireless"
- #define MOD_DESCRIPTION "USB High Speed Option driver"
-@@ -257,7 +257,7 @@ struct hso_serial {
-
- /* from usb_serial_port */
- struct tty_struct *tty;
-- int open_count;
-+ local_t open_count;
- spinlock_t serial_lock;
-
- int (*write_data) (struct hso_serial *serial);
-@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
- struct urb *urb;
-
- urb = serial->rx_urb[0];
-- if (serial->open_count > 0) {
-+ if (local_read(&serial->open_count) > 0) {
- count = put_rxbuf_data(urb, serial);
- if (count == -1)
- return;
-@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
- DUMP1(urb->transfer_buffer, urb->actual_length);
-
- /* Anyone listening? */
-- if (serial->open_count == 0)
-+ if (local_read(&serial->open_count) == 0)
- return;
-
- if (status == 0) {
-@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
- spin_unlock_irq(&serial->serial_lock);
-
- /* check for port already opened, if not set the termios */
-- serial->open_count++;
-- if (serial->open_count == 1) {
-+ if (local_inc_return(&serial->open_count) == 1) {
- serial->rx_state = RX_IDLE;
- /* Force default termio settings */
- _hso_serial_set_termios(tty, NULL);
-@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
- result = hso_start_serial_device(serial->parent, GFP_KERNEL);
- if (result) {
- hso_stop_serial_device(serial->parent);
-- serial->open_count--;
-+ local_dec(&serial->open_count);
- kref_put(&serial->parent->ref, hso_serial_ref_free);
- }
- } else {
-@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
-
- /* reset the rts and dtr */
- /* do the actual close */
-- serial->open_count--;
-+ local_dec(&serial->open_count);
-
-- if (serial->open_count <= 0) {
-- serial->open_count = 0;
-+ if (local_read(&serial->open_count) <= 0) {
-+ local_set(&serial->open_count, 0);
- spin_lock_irq(&serial->serial_lock);
- if (serial->tty == tty) {
- serial->tty->driver_data = NULL;
-@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
-
- /* the actual setup */
- spin_lock_irqsave(&serial->serial_lock, flags);
-- if (serial->open_count)
-+ if (local_read(&serial->open_count))
- _hso_serial_set_termios(tty, old);
- else
- tty->termios = old;
-@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
- D1("Pending read interrupt on port %d\n", i);
- spin_lock(&serial->serial_lock);
- if (serial->rx_state == RX_IDLE &&
-- serial->open_count > 0) {
-+ local_read(&serial->open_count) > 0) {
- /* Setup and send a ctrl req read on
- * port i */
- if (!serial->rx_urb_filled[0]) {
-@@ -2857,13 +2856,16 @@ exit:
- static int hso_get_config_data(struct usb_interface *interface)
- {
- struct usb_device *usbdev = interface_to_usbdev(interface);
-- u8 config_data[17];
-+ u8 *config_data = kmalloc(17, GFP_KERNEL);
- u32 if_num = interface->altsetting->desc.bInterfaceNumber;
- s32 result;
-
-+ if (!config_data)
-+ return -ENOMEM;
- if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
- 0x86, 0xC0, 0, 0, config_data, 17,
- USB_CTRL_SET_TIMEOUT) != 0x11) {
-+ kfree(config_data);
- return -EIO;
- }
-
-@@ -2914,6 +2916,7 @@ static int hso_get_config_data(struct usb_interface *interface)
- if (config_data[16] & 0x1)
- result |= HSO_INFO_CRC_BUG;
-
-+ kfree(config_data);
- return result;
- }
-
-@@ -3098,7 +3101,7 @@ static int hso_resume(struct usb_interface *iface)
- /* Start all serial ports */
- for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
- if (serial_table[i] && (serial_table[i]->interface == iface)) {
-- if (dev2ser(serial_table[i])->open_count) {
-+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
- result =
- hso_start_serial_device(serial_table[i], GFP_NOIO);
- hso_kick_transmit(dev2ser(serial_table[i]));
-diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
-index e773250..9ee61ab 100644
---- a/drivers/net/usb/sierra_net.c
-+++ b/drivers/net/usb/sierra_net.c
-@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
- /* atomic counter partially included in MAC address to make sure 2 devices
- * do not end up with the same MAC - concept breaks in case of > 255 ifaces
- */
--static atomic_t iface_counter = ATOMIC_INIT(0);
-+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
-
- /*
- * SYNC Timer Delay definition used to set the expiry time
-@@ -738,7 +738,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
- dev->net->netdev_ops = &sierra_net_device_ops;
-
- /* change MAC addr to include, ifacenum, and to be unique */
-- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
-+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
- dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
-
- /* we will have to manufacture ethernet headers, prepare template */
-diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
-index 3d21742..b8e03e7 100644
---- a/drivers/net/usb/usbnet.c
-+++ b/drivers/net/usb/usbnet.c
-@@ -344,6 +344,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
- unsigned long lockflags;
- size_t size = dev->rx_urb_size;
-
-+ /* prevent rx skb allocation when error ratio is high */
-+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
-+ usb_free_urb(urb);
-+ return -ENOLINK;
-+ }
-+
- if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
- netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
- usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
-@@ -498,6 +504,17 @@ block:
- break;
- }
-
-+ /* stop rx if packet error rate is high */
-+ if (++dev->pkt_cnt > 30) {
-+ dev->pkt_cnt = 0;
-+ dev->pkt_err = 0;
-+ } else {
-+ if (state == rx_cleanup)
-+ dev->pkt_err++;
-+ if (dev->pkt_err > 20)
-+ set_bit(EVENT_RX_KILL, &dev->flags);
-+ }
-+
- state = defer_bh(dev, skb, &dev->rxq, state);
-
- if (urb) {
-@@ -784,6 +801,11 @@ int usbnet_open (struct net_device *net)
- (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
- "simple");
-
-+ /* reset rx error state */
-+ dev->pkt_cnt = 0;
-+ dev->pkt_err = 0;
-+ clear_bit(EVENT_RX_KILL, &dev->flags);
-+
- // delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
- if (info->manage_power) {
-@@ -1078,7 +1100,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
- struct net_device *net)
- {
- struct usbnet *dev = netdev_priv(net);
-- int length;
-+ unsigned int length;
- struct urb *urb = NULL;
- struct skb_data *entry;
- struct driver_info *info = dev->driver_info;
-@@ -1187,7 +1209,7 @@ not_drop:
- usb_free_urb (urb);
- } else
- netif_dbg(dev, tx_queued, dev->net,
-- "> tx, len %d, type 0x%x\n", length, skb->protocol);
-+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
- #ifdef CONFIG_PM
- deferred:
- #endif
-@@ -1222,6 +1244,9 @@ static void usbnet_bh (unsigned long param)
- }
- }
-
-+ /* restart RX again after disabling due to high error rate */
-+ clear_bit(EVENT_RX_KILL, &dev->flags);
-+
- // waiting for all pending urbs to complete?
- if (dev->wait) {
- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
-diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
-index 28ceef2..655b059 100644
---- a/drivers/net/vmxnet3/vmxnet3_drv.c
-+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
-@@ -1140,7 +1140,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
- static const u32 rxprod_reg[2] = {
- VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
- };
-- u32 num_rxd = 0;
-+ u32 num_pkts = 0;
- bool skip_page_frags = false;
- struct Vmxnet3_RxCompDesc *rcd;
- struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
-@@ -1158,13 +1158,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
- struct Vmxnet3_RxDesc *rxd;
- u32 idx, ring_idx;
- struct vmxnet3_cmd_ring *ring = NULL;
-- if (num_rxd >= quota) {
-+ if (num_pkts >= quota) {
- /* we may stop even before we see the EOP desc of
- * the current pkt
- */
- break;
- }
-- num_rxd++;
- BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
- idx = rcd->rxdIdx;
- ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
-@@ -1288,6 +1287,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
- napi_gro_receive(&rq->napi, skb);
-
- ctx->skb = NULL;
-+ num_pkts++;
- }
-
- rcd_done:
-@@ -1319,7 +1319,7 @@ rcd_done:
- &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
- }
-
-- return num_rxd;
-+ return num_pkts;
- }
-
-
-diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
-index e662cbc..8d4a102 100644
---- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
-+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
-@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
- * Return with error code if any of the queue indices
- * is out of range
- */
-- if (p->ring_index[i] < 0 ||
-- p->ring_index[i] >= adapter->num_rx_queues)
-+ if (p->ring_index[i] >= adapter->num_rx_queues)
- return -EINVAL;
- }
-
-diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
-index 5920c99..ff2e4a5 100644
---- a/drivers/net/wan/lmc/lmc_media.c
-+++ b/drivers/net/wan/lmc/lmc_media.c
-@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
- static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
-
- lmc_media_t lmc_ds3_media = {
-- lmc_ds3_init, /* special media init stuff */
-- lmc_ds3_default, /* reset to default state */
-- lmc_ds3_set_status, /* reset status to state provided */
-- lmc_dummy_set_1, /* set clock source */
-- lmc_dummy_set2_1, /* set line speed */
-- lmc_ds3_set_100ft, /* set cable length */
-- lmc_ds3_set_scram, /* set scrambler */
-- lmc_ds3_get_link_status, /* get link status */
-- lmc_dummy_set_1, /* set link status */
-- lmc_ds3_set_crc_length, /* set CRC length */
-- lmc_dummy_set_1, /* set T1 or E1 circuit type */
-- lmc_ds3_watchdog
-+ .init = lmc_ds3_init, /* special media init stuff */
-+ .defaults = lmc_ds3_default, /* reset to default state */
-+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
-+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
-+ .set_speed = lmc_dummy_set2_1, /* set line speed */
-+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
-+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
-+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
-+ .set_link_status = lmc_dummy_set_1, /* set link status */
-+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
-+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
-+ .watchdog = lmc_ds3_watchdog
- };
-
- lmc_media_t lmc_hssi_media = {
-- lmc_hssi_init, /* special media init stuff */
-- lmc_hssi_default, /* reset to default state */
-- lmc_hssi_set_status, /* reset status to state provided */
-- lmc_hssi_set_clock, /* set clock source */
-- lmc_dummy_set2_1, /* set line speed */
-- lmc_dummy_set_1, /* set cable length */
-- lmc_dummy_set_1, /* set scrambler */
-- lmc_hssi_get_link_status, /* get link status */
-- lmc_hssi_set_link_status, /* set link status */
-- lmc_hssi_set_crc_length, /* set CRC length */
-- lmc_dummy_set_1, /* set T1 or E1 circuit type */
-- lmc_hssi_watchdog
-+ .init = lmc_hssi_init, /* special media init stuff */
-+ .defaults = lmc_hssi_default, /* reset to default state */
-+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
-+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
-+ .set_speed = lmc_dummy_set2_1, /* set line speed */
-+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
-+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
-+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
-+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
-+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
-+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
-+ .watchdog = lmc_hssi_watchdog
- };
-
--lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
-- lmc_ssi_default, /* reset to default state */
-- lmc_ssi_set_status, /* reset status to state provided */
-- lmc_ssi_set_clock, /* set clock source */
-- lmc_ssi_set_speed, /* set line speed */
-- lmc_dummy_set_1, /* set cable length */
-- lmc_dummy_set_1, /* set scrambler */
-- lmc_ssi_get_link_status, /* get link status */
-- lmc_ssi_set_link_status, /* set link status */
-- lmc_ssi_set_crc_length, /* set CRC length */
-- lmc_dummy_set_1, /* set T1 or E1 circuit type */
-- lmc_ssi_watchdog
-+lmc_media_t lmc_ssi_media = {
-+ .init = lmc_ssi_init, /* special media init stuff */
-+ .defaults = lmc_ssi_default, /* reset to default state */
-+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
-+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
-+ .set_speed = lmc_ssi_set_speed, /* set line speed */
-+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
-+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
-+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
-+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
-+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
-+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
-+ .watchdog = lmc_ssi_watchdog
- };
-
- lmc_media_t lmc_t1_media = {
-- lmc_t1_init, /* special media init stuff */
-- lmc_t1_default, /* reset to default state */
-- lmc_t1_set_status, /* reset status to state provided */
-- lmc_t1_set_clock, /* set clock source */
-- lmc_dummy_set2_1, /* set line speed */
-- lmc_dummy_set_1, /* set cable length */
-- lmc_dummy_set_1, /* set scrambler */
-- lmc_t1_get_link_status, /* get link status */
-- lmc_dummy_set_1, /* set link status */
-- lmc_t1_set_crc_length, /* set CRC length */
-- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
-- lmc_t1_watchdog
-+ .init = lmc_t1_init, /* special media init stuff */
-+ .defaults = lmc_t1_default, /* reset to default state */
-+ .set_status = lmc_t1_set_status, /* reset status to state provided */
-+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
-+ .set_speed = lmc_dummy_set2_1, /* set line speed */
-+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
-+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
-+ .get_link_status = lmc_t1_get_link_status, /* get link status */
-+ .set_link_status = lmc_dummy_set_1, /* set link status */
-+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
-+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
-+ .watchdog = lmc_t1_watchdog
- };
-
- static void
-diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
-index 8a10bb7..7560422 100644
---- a/drivers/net/wan/x25_asy.c
-+++ b/drivers/net/wan/x25_asy.c
-@@ -123,8 +123,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
- {
- struct x25_asy *sl = netdev_priv(dev);
- unsigned char *xbuff, *rbuff;
-- int len = 2 * newmtu;
-+ int len;
-
-+ if (newmtu > 65534)
-+ return -EINVAL;
-+
-+ len = 2 * newmtu;
- xbuff = kmalloc(len + 4, GFP_ATOMIC);
- rbuff = kmalloc(len + 4, GFP_ATOMIC);
-
-diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
-index 0e57690..ad698bb 100644
---- a/drivers/net/wan/z85230.c
-+++ b/drivers/net/wan/z85230.c
-@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
-
- struct z8530_irqhandler z8530_sync =
- {
-- z8530_rx,
-- z8530_tx,
-- z8530_status
-+ .rx = z8530_rx,
-+ .tx = z8530_tx,
-+ .status = z8530_status
- };
-
- EXPORT_SYMBOL(z8530_sync);
-@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
- }
-
- static struct z8530_irqhandler z8530_dma_sync = {
-- z8530_dma_rx,
-- z8530_dma_tx,
-- z8530_dma_status
-+ .rx = z8530_dma_rx,
-+ .tx = z8530_dma_tx,
-+ .status = z8530_dma_status
- };
-
- static struct z8530_irqhandler z8530_txdma_sync = {
-- z8530_rx,
-- z8530_dma_tx,
-- z8530_dma_status
-+ .rx = z8530_rx,
-+ .tx = z8530_dma_tx,
-+ .status = z8530_dma_status
- };
-
- /**
-@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
-
- struct z8530_irqhandler z8530_nop=
- {
-- z8530_rx_clear,
-- z8530_tx_clear,
-- z8530_status_clear
-+ .rx = z8530_rx_clear,
-+ .tx = z8530_tx_clear,
-+ .status = z8530_status_clear
- };
-
-
-diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
-index ac1176a..79e93d4 100644
---- a/drivers/net/wireless/airo.c
-+++ b/drivers/net/wireless/airo.c
-@@ -7885,7 +7885,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
- struct airo_info *ai = dev->ml_priv;
- int ridcode;
- int enabled;
-- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
-+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
- unsigned char *iobuf;
-
- /* Only super-user can write RIDs */
-diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
-index 4045e5a..506f1cfd 100644
---- a/drivers/net/wireless/at76c50x-usb.c
-+++ b/drivers/net/wireless/at76c50x-usb.c
-@@ -353,7 +353,7 @@ static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state)
- }
-
- /* Convert timeout from the DFU status to jiffies */
--static inline unsigned long at76_get_timeout(struct dfu_status *s)
-+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
- {
- return msecs_to_jiffies((s->poll_timeout[2] << 16)
- | (s->poll_timeout[1] << 8)
-diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
-index b346d04..04436fa 100644
---- a/drivers/net/wireless/ath/ath5k/base.c
-+++ b/drivers/net/wireless/ath/ath5k/base.c
-@@ -1791,7 +1791,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
- {
- int ret;
- struct ath5k_hw *ah = hw->priv;
-- struct ath5k_vif *avf = (void *)vif->drv_priv;
-+ struct ath5k_vif *avf;
- struct sk_buff *skb;
-
- if (WARN_ON(!vif)) {
-@@ -1806,6 +1806,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
- goto out;
- }
-
-+ avf = (void *)vif->drv_priv;
- ath5k_txbuf_free_skb(ah, avf->bbuf);
- avf->bbuf->skb = skb;
- ret = ath5k_beacon_setup(ah, avf->bbuf);
-diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
-index f4caeb3..8da6f5d 100644
---- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
-+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
-@@ -217,8 +217,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-
-- ACCESS_ONCE(ads->ds_link) = i->link;
-- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
-+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
-+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
-
- ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
- ctl6 = SM(i->keytype, AR_EncrType);
-@@ -232,26 +232,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
-
- if ((i->is_first || i->is_last) &&
- i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
-- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
- | set11nTries(i->rates, 1)
- | set11nTries(i->rates, 2)
- | set11nTries(i->rates, 3)
- | (i->dur_update ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
-- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
- | set11nRate(i->rates, 1)
- | set11nRate(i->rates, 2)
- | set11nRate(i->rates, 3);
- } else {
-- ACCESS_ONCE(ads->ds_ctl2) = 0;
-- ACCESS_ONCE(ads->ds_ctl3) = 0;
-+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
-+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
- }
-
- if (!i->is_first) {
-- ACCESS_ONCE(ads->ds_ctl0) = 0;
-- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
-+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
-+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
-+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
- return;
- }
-
-@@ -276,7 +276,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
- break;
- }
-
-- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
-+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
- | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower)
- | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
-@@ -286,19 +286,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
- | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
- (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
-
-- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
-+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
-+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
-
- if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
- return;
-
-- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
- | set11nPktDurRTSCTS(i->rates, 1);
-
-- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
-+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
- | set11nPktDurRTSCTS(i->rates, 3);
-
-- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
-diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
-index f5ae3c67..7936af3 100644
---- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
-+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
-@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
- (i->qcu << AR_TxQcuNum_S) | 0x17;
-
- checksum += val;
-- ACCESS_ONCE(ads->info) = val;
-+ ACCESS_ONCE_RW(ads->info) = val;
-
- checksum += i->link;
-- ACCESS_ONCE(ads->link) = i->link;
-+ ACCESS_ONCE_RW(ads->link) = i->link;
-
- checksum += i->buf_addr[0];
-- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
-+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
- checksum += i->buf_addr[1];
-- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
-+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
- checksum += i->buf_addr[2];
-- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
-+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
- checksum += i->buf_addr[3];
-- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
-+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
-
- checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
-- ACCESS_ONCE(ads->ctl3) = val;
-+ ACCESS_ONCE_RW(ads->ctl3) = val;
- checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
-- ACCESS_ONCE(ads->ctl5) = val;
-+ ACCESS_ONCE_RW(ads->ctl5) = val;
- checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
-- ACCESS_ONCE(ads->ctl7) = val;
-+ ACCESS_ONCE_RW(ads->ctl7) = val;
- checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
-- ACCESS_ONCE(ads->ctl9) = val;
-+ ACCESS_ONCE_RW(ads->ctl9) = val;
-
- checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
-- ACCESS_ONCE(ads->ctl10) = checksum;
-+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
-
- if (i->is_first || i->is_last) {
-- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
- | set11nTries(i->rates, 1)
- | set11nTries(i->rates, 2)
- | set11nTries(i->rates, 3)
- | (i->dur_update ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
-- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
- | set11nRate(i->rates, 1)
- | set11nRate(i->rates, 2)
- | set11nRate(i->rates, 3);
- } else {
-- ACCESS_ONCE(ads->ctl13) = 0;
-- ACCESS_ONCE(ads->ctl14) = 0;
-+ ACCESS_ONCE_RW(ads->ctl13) = 0;
-+ ACCESS_ONCE_RW(ads->ctl14) = 0;
- }
-
- ads->ctl20 = 0;
-@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
-
- ctl17 = SM(i->keytype, AR_EncrType);
- if (!i->is_first) {
-- ACCESS_ONCE(ads->ctl11) = 0;
-- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
-- ACCESS_ONCE(ads->ctl15) = 0;
-- ACCESS_ONCE(ads->ctl16) = 0;
-- ACCESS_ONCE(ads->ctl17) = ctl17;
-- ACCESS_ONCE(ads->ctl18) = 0;
-- ACCESS_ONCE(ads->ctl19) = 0;
-+ ACCESS_ONCE_RW(ads->ctl11) = 0;
-+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
-+ ACCESS_ONCE_RW(ads->ctl15) = 0;
-+ ACCESS_ONCE_RW(ads->ctl16) = 0;
-+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
-+ ACCESS_ONCE_RW(ads->ctl18) = 0;
-+ ACCESS_ONCE_RW(ads->ctl19) = 0;
- return;
- }
-
-- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
-+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
- | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(i->txpower, AR_XmitPower)
- | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
-@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
- val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
- ctl12 |= SM(val, AR_PAPRDChainMask);
-
-- ACCESS_ONCE(ads->ctl12) = ctl12;
-- ACCESS_ONCE(ads->ctl17) = ctl17;
-+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
-+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
-
-- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
- | set11nPktDurRTSCTS(i->rates, 1);
-
-- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
-+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
- | set11nPktDurRTSCTS(i->rates, 3);
-
-- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
-+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
- | set11nRateFlags(i->rates, 1)
- | set11nRateFlags(i->rates, 2)
- | set11nRateFlags(i->rates, 3)
- | SM(i->rtscts_rate, AR_RTSCTSRate);
-
-- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
-+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
- }
-
- static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
-diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
-index 8b1123d..20a54e9 100644
---- a/drivers/net/wireless/ath/ath9k/hw.h
-+++ b/drivers/net/wireless/ath/ath9k/hw.h
-@@ -607,7 +607,7 @@ struct ath_hw_private_ops {
-
- /* ANI */
- void (*ani_cache_ini_regs)(struct ath_hw *ah);
--};
-+} __no_const;
-
- /**
- * struct ath_hw_ops - callbacks used by hardware code and driver code
-@@ -637,7 +637,7 @@ struct ath_hw_ops {
- void (*antdiv_comb_conf_set)(struct ath_hw *ah,
- struct ath_hw_antcomb_conf *antconf);
-
--};
-+} __no_const;
-
- struct ath_nf_limits {
- s16 max;
-diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
-index f93d66b..a6c7765 100644
---- a/drivers/net/wireless/b43/phy_lp.c
-+++ b/drivers/net/wireless/b43/phy_lp.c
-@@ -2520,7 +2520,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
- {
- struct ssb_bus *bus = dev->dev->sdev->bus;
-
-- static const struct b206x_channel *chandata = NULL;
-+ const struct b206x_channel *chandata = NULL;
- u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
- u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
- u16 old_comm15, scale;
-diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
-index 62dc461..5250f0b 100644
---- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
-+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
-@@ -175,7 +175,7 @@ struct brcmf_cfg80211_event_loop {
- struct net_device *ndev,
- const struct brcmf_event_msg *e,
- void *data);
--};
-+} __no_const;
-
- /* representing interface of cfg80211 plane */
- struct brcmf_cfg80211_iface {
-@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
- struct brcmf_cfg80211_iscan_eloop {
- s32 (*handler[WL_SCAN_ERSULTS_LAST])
- (struct brcmf_cfg80211_priv *cfg_priv);
--};
-+} __no_const;
-
- /* dongle iscan controller */
- struct brcmf_cfg80211_iscan_ctrl {
-diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
-index edf5515..91033e1 100644
---- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
-+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
-@@ -378,8 +378,8 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
- }
-
- static const struct otp_fn_s ipxotp_fn = {
-- (int (*)(struct si_pub *, struct otpinfo *)) ipxotp_init,
-- (int (*)(struct otpinfo *, int, u16 *, uint *)) ipxotp_read_region,
-+ .init = ipxotp_init,
-+ .read_region = ipxotp_read_region,
- };
-
- static int otp_init(struct si_pub *sih, struct otpinfo *oi)
-diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
-index b3d9f3f..9931f58 100644
---- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
-+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
-@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
- */
- if (iwl3945_mod_params.disable_hw_scan) {
- IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
-- iwl3945_hw_ops.hw_scan = NULL;
-+ pax_open_kernel();
-+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
-+ pax_close_kernel();
- }
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
-index 69a77e24..552b42c 100644
---- a/drivers/net/wireless/iwlwifi/iwl-debug.h
-+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
-@@ -71,8 +71,8 @@ do { \
- } while (0)
-
- #else
--#define IWL_DEBUG(m, level, fmt, args...)
--#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
-+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
-+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
- #define iwl_print_hex_dump(m, level, p, len)
- #endif /* CONFIG_IWLWIFI_DEBUG */
-
-diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
-index 93e6179..b221e4f 100644
---- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
-+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
-@@ -163,7 +163,7 @@ static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
- struct iwl_priv *priv = file->private_data;
- u32 clear_flag;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
-@@ -311,7 +311,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[64];
-- int buf_size;
-+ size_t buf_size;
- u32 offset, len;
-
- memset(buf, 0, sizeof(buf));
-@@ -601,7 +601,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
- struct iwl_priv *priv = file->private_data;
-
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
-@@ -682,7 +682,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int ht40;
-
- memset(buf, 0, sizeof(buf));
-@@ -737,7 +737,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int value;
-
- memset(buf, 0, sizeof(buf));
-@@ -897,7 +897,7 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
-@@ -912,10 +912,10 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
- return count;
- }
-
--static const char *fmt_value = " %-30s %10u\n";
--static const char *fmt_hex = " %-30s 0x%02X\n";
--static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
--static const char *fmt_header =
-+static const char fmt_value[] = " %-30s %10u\n";
-+static const char fmt_hex[] = " %-30s 0x%02X\n";
-+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
-+static const char fmt_header[] =
- "%-32s current cumulative delta max\n";
-
- static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-@@ -2078,7 +2078,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int clear;
-
- memset(buf, 0, sizeof(buf));
-@@ -2123,7 +2123,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int trace;
-
- memset(buf, 0, sizeof(buf));
-@@ -2193,7 +2193,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int missed;
-
- memset(buf, 0, sizeof(buf));
-@@ -2234,7 +2234,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int plcp;
-
- memset(buf, 0, sizeof(buf));
-@@ -2288,7 +2288,7 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int reset, ret;
-
- memset(buf, 0, sizeof(buf));
-@@ -2314,7 +2314,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int flush;
-
- memset(buf, 0, sizeof(buf));
-@@ -2338,7 +2338,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int timeout;
-
- memset(buf, 0, sizeof(buf));
-@@ -2427,7 +2427,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int rts;
-
- if (!priv->cfg->ht_params)
-@@ -2452,7 +2452,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
- {
- struct iwl_priv *priv = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
-diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
-index 75da4bc..7737dff 100644
---- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
-+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
-@@ -1890,7 +1890,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
-@@ -1911,7 +1911,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
- {
- struct iwl_trans *trans = file->private_data;
- char buf[8];
-- int buf_size;
-+ size_t buf_size;
- int csr;
-
- memset(buf, 0, sizeof(buf));
-diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
-index 523ad55..f8c5dc5 100644
---- a/drivers/net/wireless/mac80211_hwsim.c
-+++ b/drivers/net/wireless/mac80211_hwsim.c
-@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
- return -EINVAL;
-
- if (fake_hw_scan) {
-- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
-- mac80211_hwsim_ops.sw_scan_start = NULL;
-- mac80211_hwsim_ops.sw_scan_complete = NULL;
-+ pax_open_kernel();
-+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
-+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
-+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
-+ pax_close_kernel();
- }
-
- spin_lock_init(&hwsim_radio_lock);
-diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
-index d26a78b..156ad04 100644
---- a/drivers/net/wireless/mwifiex/debugfs.c
-+++ b/drivers/net/wireless/mwifiex/debugfs.c
-@@ -26,10 +26,17 @@
- static struct dentry *mwifiex_dfs_dir;
-
- static char *bss_modes[] = {
-- "Unknown",
-- "Ad-hoc",
-- "Managed",
-- "Auto"
-+ "UNSPECIFIED",
-+ "ADHOC",
-+ "STATION",
-+ "AP",
-+ "AP_VLAN",
-+ "WDS",
-+ "MONITOR",
-+ "MESH_POINT",
-+ "P2P_CLIENT",
-+ "P2P_GO",
-+ "P2P_DEVICE",
- };
-
- /* size/addr for mwifiex_debug_info */
-@@ -213,7 +220,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
- p += sprintf(p, "driver_version = %s", fmt);
- p += sprintf(p, "\nverext = %s", priv->version_str);
- p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
-- p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
-+
-+ if (info.bss_mode >= ARRAY_SIZE(bss_modes))
-+ p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
-+ else
-+ p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
-+
- p += sprintf(p, "media_state=\"%s\"\n",
- (!priv->media_connected ? "Disconnected" : "Connected"));
- p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
-diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
-index 0c13840..a5c3ed6 100644
---- a/drivers/net/wireless/rndis_wlan.c
-+++ b/drivers/net/wireless/rndis_wlan.c
-@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
-
- netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
-
-- if (rts_threshold < 0 || rts_threshold > 2347)
-+ if (rts_threshold > 2347)
- rts_threshold = 2347;
-
- tmp = cpu_to_le32(rts_threshold);
-diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
-index e2750a1..797e179 100644
---- a/drivers/net/wireless/wl1251/sdio.c
-+++ b/drivers/net/wireless/wl1251/sdio.c
-@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
-
- irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
-- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
-- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
-+ pax_open_kernel();
-+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
-+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
-+ pax_close_kernel();
-
- wl1251_info("using dedicated interrupt line");
- } else {
-- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
-- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
-+ pax_open_kernel();
-+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
-+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
-+ pax_close_kernel();
-
- wl1251_info("using SDIO interrupt");
- }
-diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
-index 785bdbe..ddde2d1 100644
---- a/drivers/net/wireless/zd1211rw/zd_usb.c
-+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
-@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
- {
- struct zd_usb *usb = urb->context;
- struct zd_usb_interrupt *intr = &usb->intr;
-- int len;
-+ unsigned int len;
- u16 int_num;
-
- ZD_ASSERT(in_interrupt());
-diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
-index 06c3642..c4ee5f2 100644
---- a/drivers/nfc/nfcwilink.c
-+++ b/drivers/nfc/nfcwilink.c
-@@ -237,7 +237,7 @@ static struct nci_ops nfcwilink_ops = {
-
- static int nfcwilink_probe(struct platform_device *pdev)
- {
-- static struct nfcwilink *drv;
-+ struct nfcwilink *drv;
- int rc;
- u32 protocols;
-
-diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
-index f34b5b2..b5abb9f 100644
---- a/drivers/oprofile/buffer_sync.c
-+++ b/drivers/oprofile/buffer_sync.c
-@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
- if (cookie == NO_COOKIE)
- offset = pc;
- if (cookie == INVALID_COOKIE) {
-- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
-+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
- offset = pc;
- }
- if (cookie != last_cookie) {
-@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
- /* add userspace sample */
-
- if (!mm) {
-- atomic_inc(&oprofile_stats.sample_lost_no_mm);
-+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
- return 0;
- }
-
- cookie = lookup_dcookie(mm, s->eip, &offset);
-
- if (cookie == INVALID_COOKIE) {
-- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
-+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
- return 0;
- }
-
-@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
- /* ignore backtraces if failed to add a sample */
- if (state == sb_bt_start) {
- state = sb_bt_ignore;
-- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
-+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
- }
- }
- release_mm(mm);
-diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
-index c0cc4e7..44d4e54 100644
---- a/drivers/oprofile/event_buffer.c
-+++ b/drivers/oprofile/event_buffer.c
-@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
- }
-
- if (buffer_pos == buffer_size) {
-- atomic_inc(&oprofile_stats.event_lost_overflow);
-+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
- return;
- }
-
-diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
-index f8c752e..28bf4fc 100644
---- a/drivers/oprofile/oprof.c
-+++ b/drivers/oprofile/oprof.c
-@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
- if (oprofile_ops.switch_events())
- return;
-
-- atomic_inc(&oprofile_stats.multiplex_counter);
-+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
- start_switch_worker();
- }
-
-diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
-index 84a208d..d61b0a1 100644
---- a/drivers/oprofile/oprofile_files.c
-+++ b/drivers/oprofile/oprofile_files.c
-@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
-
- #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
--static ssize_t timeout_read(struct file *file, char __user *buf,
-+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
- {
- return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
-diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
-index 917d28e..d62d981 100644
---- a/drivers/oprofile/oprofile_stats.c
-+++ b/drivers/oprofile/oprofile_stats.c
-@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
- cpu_buf->sample_invalid_eip = 0;
- }
-
-- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
-- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
-- atomic_set(&oprofile_stats.event_lost_overflow, 0);
-- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
-- atomic_set(&oprofile_stats.multiplex_counter, 0);
-+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
-+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
-+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
-+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
-+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
- }
-
-
-diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
-index 38b6fc0..b5cbfce 100644
---- a/drivers/oprofile/oprofile_stats.h
-+++ b/drivers/oprofile/oprofile_stats.h
-@@ -13,11 +13,11 @@
- #include <linux/atomic.h>
-
- struct oprofile_stat_struct {
-- atomic_t sample_lost_no_mm;
-- atomic_t sample_lost_no_mapping;
-- atomic_t bt_lost_no_mapping;
-- atomic_t event_lost_overflow;
-- atomic_t multiplex_counter;
-+ atomic_unchecked_t sample_lost_no_mm;
-+ atomic_unchecked_t sample_lost_no_mapping;
-+ atomic_unchecked_t bt_lost_no_mapping;
-+ atomic_unchecked_t event_lost_overflow;
-+ atomic_unchecked_t multiplex_counter;
- };
-
- extern struct oprofile_stat_struct oprofile_stats;
-diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
-index 2f0aa0f..1ba4404 100644
---- a/drivers/oprofile/oprofilefs.c
-+++ b/drivers/oprofile/oprofilefs.c
-@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
-
-
- int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
-- char const *name, atomic_t *val)
-+ char const *name, atomic_unchecked_t *val)
- {
- return __oprofilefs_create_file(sb, root, name,
- &atomic_ro_fops, 0444, val);
-@@ -279,6 +279,7 @@ static struct file_system_type oprofilefs_type = {
- .mount = oprofilefs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("oprofilefs");
-
-
- int __init oprofilefs_register(void)
-diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
-index 878fba1..2084bcf 100644
---- a/drivers/oprofile/timer_int.c
-+++ b/drivers/oprofile/timer_int.c
-@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata oprofile_cpu_notifier = {
-+static struct notifier_block oprofile_cpu_notifier = {
- .notifier_call = oprofile_cpu_notify,
- };
-
-diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
-index 3f56bc0..707d642 100644
---- a/drivers/parport/procfs.c
-+++ b/drivers/parport/procfs.c
-@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
-
- *ppos += len;
-
-- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
-+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
- }
-
- #ifdef CONFIG_PARPORT_1284
-@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
-
- *ppos += len;
-
-- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
-+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
- }
- #endif /* IEEE1284.3 support. */
-
-diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
-index e525263..ebde92e 100644
---- a/drivers/pci/hotplug/acpiphp_ibm.c
-+++ b/drivers/pci/hotplug/acpiphp_ibm.c
-@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
- goto init_cleanup;
- }
-
-- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
-+ pax_open_kernel();
-+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
-+ pax_close_kernel();
- retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
-
- return retval;
-diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
-index fb3f846..792d643 100644
---- a/drivers/pci/hotplug/cpcihp_generic.c
-+++ b/drivers/pci/hotplug/cpcihp_generic.c
-@@ -73,7 +73,6 @@ static u16 port;
- static unsigned int enum_bit;
- static u8 enum_mask;
-
--static struct cpci_hp_controller_ops generic_hpc_ops;
- static struct cpci_hp_controller generic_hpc;
-
- static int __init validate_parameters(void)
-@@ -139,6 +138,10 @@ static int query_enum(void)
- return ((value & enum_mask) == enum_mask);
- }
-
-+static struct cpci_hp_controller_ops generic_hpc_ops = {
-+ .query_enum = query_enum,
-+};
-+
- static int __init cpcihp_generic_init(void)
- {
- int status;
-@@ -169,7 +172,6 @@ static int __init cpcihp_generic_init(void)
- pci_dev_put(dev);
-
- memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
-- generic_hpc_ops.query_enum = query_enum;
- generic_hpc.ops = &generic_hpc_ops;
-
- status = cpci_hp_register_controller(&generic_hpc);
-diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
-index 41f6a8d..da73050 100644
---- a/drivers/pci/hotplug/cpcihp_zt5550.c
-+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
-@@ -59,7 +59,6 @@
- /* local variables */
- static int debug;
- static int poll;
--static struct cpci_hp_controller_ops zt5550_hpc_ops;
- static struct cpci_hp_controller zt5550_hpc;
-
- /* Primary cPCI bus bridge device */
-@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
- return 0;
- }
-
-+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
-+ .query_enum = zt5550_hc_query_enum,
-+};
-+
- static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
- {
- int status;
-@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
- dbg("returned from zt5550_hc_config");
-
- memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
-- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
- zt5550_hpc.ops = &zt5550_hpc_ops;
- if(!poll) {
- zt5550_hpc.irq = hc_dev->irq;
- zt5550_hpc.irq_flags = IRQF_SHARED;
- zt5550_hpc.dev_id = hc_dev;
-
-- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
-- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
-- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
-+ pax_open_kernel();
-+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
-+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
-+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
-+ pax_open_kernel();
- } else {
- info("using ENUM# polling mode");
- }
-diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
-index 76ba8a1..20ca857 100644
---- a/drivers/pci/hotplug/cpqphp_nvram.c
-+++ b/drivers/pci/hotplug/cpqphp_nvram.c
-@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
-
- void compaq_nvram_init (void __iomem *rom_start)
- {
-+
-+#ifndef CONFIG_PAX_KERNEXEC
- if (rom_start) {
- compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
- }
-+#endif
-+
- dbg("int15 entry = %p\n", compaq_int15_entry_point);
-
- /* initialize our int15 lock */
-diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
-index 6d2eea9..4bf3318 100644
---- a/drivers/pci/hotplug/pci_hotplug_core.c
-+++ b/drivers/pci/hotplug/pci_hotplug_core.c
-@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
- return -EINVAL;
- }
-
-- slot->ops->owner = owner;
-- slot->ops->mod_name = mod_name;
-+ pax_open_kernel();
-+ *(struct module **)&slot->ops->owner = owner;
-+ *(const char **)&slot->ops->mod_name = mod_name;
-+ pax_close_kernel();
-
- mutex_lock(&pci_hp_mutex);
- /*
-diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
-index dc126a2..46fecf8 100644
---- a/drivers/pci/hotplug/pciehp_core.c
-+++ b/drivers/pci/hotplug/pciehp_core.c
-@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
- struct slot *slot = ctrl->slot;
- struct hotplug_slot *hotplug = NULL;
- struct hotplug_slot_info *info = NULL;
-- struct hotplug_slot_ops *ops = NULL;
-+ hotplug_slot_ops_no_const *ops = NULL;
- char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
-
-diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index 1e6be19..3bcdd51 100644
---- a/drivers/pci/pci-sysfs.c
-+++ b/drivers/pci/pci-sysfs.c
-@@ -950,7 +950,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
- {
- /* allocate attribute structure, piggyback attribute name */
- int name_len = write_combine ? 13 : 10;
-- struct bin_attribute *res_attr;
-+ bin_attribute_no_const *res_attr;
- int retval;
-
- res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
-@@ -1135,7 +1135,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
- static int pci_create_capabilities_sysfs(struct pci_dev *dev)
- {
- int retval;
-- struct bin_attribute *attr;
-+ bin_attribute_no_const *attr;
-
- /* If the device has VPD, try to expose it in sysfs. */
- if (dev->vpd) {
-@@ -1182,7 +1182,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
- {
- int retval;
- int rom_size = 0;
-- struct bin_attribute *attr;
-+ bin_attribute_no_const *attr;
-
- if (!sysfs_initialized)
- return -EACCES;
-diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
-index b74084e..a9c2922 100644
---- a/drivers/pci/pci.h
-+++ b/drivers/pci/pci.h
-@@ -101,7 +101,7 @@ struct pci_vpd_ops {
- struct pci_vpd {
- unsigned int len;
- const struct pci_vpd_ops *ops;
-- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
-+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
- };
-
- extern int pci_vpd_pci22_init(struct pci_dev *dev);
-diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
-index c73ed00..cc3edec 100644
---- a/drivers/pci/pcie/aspm.c
-+++ b/drivers/pci/pcie/aspm.c
-@@ -27,9 +27,9 @@
- #define MODULE_PARAM_PREFIX "pcie_aspm."
-
- /* Note: those are not register definitions */
--#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
--#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
--#define ASPM_STATE_L1 (4) /* L1 state */
-+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
-+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
-+#define ASPM_STATE_L1 (4U) /* L1 state */
- #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
- #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
-
-diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
-index 7e41b70..6e34f30 100644
---- a/drivers/pci/pcie/portdrv_pci.c
-+++ b/drivers/pci/pcie/portdrv_pci.c
-@@ -337,7 +337,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
-+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
- /*
- * Boxes that should not use MSI for PCIe PME signaling.
- */
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index bc92c47..47e01d7 100644
---- a/drivers/pci/probe.c
-+++ b/drivers/pci/probe.c
-@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- u32 l, sz, mask;
- u16 orig_cmd;
-
-- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
-
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
-diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
-index 27911b5..5b6db88 100644
---- a/drivers/pci/proc.c
-+++ b/drivers/pci/proc.c
-@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
- static int __init pci_proc_init(void)
- {
- struct pci_dev *dev = NULL;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
-+#endif
-+#else
- proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
-+#endif
- proc_create("devices", 0, proc_bus_pci_dir,
- &proc_bus_pci_dev_operations);
- proc_initialized = 1;
-diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
-index 26fba2d..693b4d3 100644
---- a/drivers/platform/x86/asus-wmi.c
-+++ b/drivers/platform/x86/asus-wmi.c
-@@ -1463,6 +1463,10 @@ static int show_dsts(struct seq_file *m, void *data)
- int err;
- u32 retval = -1;
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ return -EPERM;
-+#endif
-+
- err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
-
- if (err < 0)
-@@ -1479,6 +1483,10 @@ static int show_devs(struct seq_file *m, void *data)
- int err;
- u32 retval = -1;
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ return -EPERM;
-+#endif
-+
- err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
- &retval);
-
-@@ -1503,6 +1511,10 @@ static int show_call(struct seq_file *m, void *data)
- union acpi_object *obj;
- acpi_status status;
-
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+ return -EPERM;
-+#endif
-+
- status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
- 1, asus->debug.method_id,
- &input, &output);
-diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
-index 8877b83..024cf2c 100644
---- a/drivers/platform/x86/compal-laptop.c
-+++ b/drivers/platform/x86/compal-laptop.c
-@@ -775,7 +775,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
- return 1;
- }
-
--static struct dmi_system_id __initdata compal_dmi_table[] = {
-+static const struct dmi_system_id __initconst compal_dmi_table[] = {
- {
- .ident = "FL90/IFL90",
- .matches = {
-diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
-index 5a34973..bb006d3 100644
---- a/drivers/platform/x86/hdaps.c
-+++ b/drivers/platform/x86/hdaps.c
-@@ -511,7 +511,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
- "ThinkPad T42p", so the order of the entries matters.
- If your ThinkPad is not recognized, please update to latest
- BIOS. This is especially the case for some R52 ThinkPads. */
--static struct dmi_system_id __initdata hdaps_whitelist[] = {
-+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
- HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
- HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
- HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
-diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
-index 2704386..2229492 100644
---- a/drivers/platform/x86/ibm_rtl.c
-+++ b/drivers/platform/x86/ibm_rtl.c
-@@ -238,7 +238,7 @@ static void rtl_teardown_sysfs(void) {
- }
-
-
--static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
-+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
- { \
- .matches = { \
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
-diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
-index 7f88c79..3b5a7a2 100644
---- a/drivers/platform/x86/intel_oaktrail.c
-+++ b/drivers/platform/x86/intel_oaktrail.c
-@@ -303,7 +303,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
- return 0;
- }
-
--static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
-+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
- {
- .ident = "OakTrail platform",
- .matches = {
-diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
-index f204643..df8cb2e 100644
---- a/drivers/platform/x86/msi-laptop.c
-+++ b/drivers/platform/x86/msi-laptop.c
-@@ -451,7 +451,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
- return 1;
- }
-
--static struct dmi_system_id __initdata msi_dmi_table[] = {
-+static const struct dmi_system_id __initconst msi_dmi_table[] = {
- {
- .ident = "MSI S270",
- .matches = {
-@@ -815,12 +815,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
- int result;
-
- /* allow userland write sysfs file */
-- dev_attr_bluetooth.store = store_bluetooth;
-- dev_attr_wlan.store = store_wlan;
-- dev_attr_threeg.store = store_threeg;
-- dev_attr_bluetooth.attr.mode |= S_IWUSR;
-- dev_attr_wlan.attr.mode |= S_IWUSR;
-- dev_attr_threeg.attr.mode |= S_IWUSR;
-+ pax_open_kernel();
-+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
-+ *(void **)&dev_attr_wlan.store = store_wlan;
-+ *(void **)&dev_attr_threeg.store = store_threeg;
-+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
-+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
-+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
-+ pax_close_kernel();
-
- /* disable hardware control by fn key */
- result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
-diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
-index b96766b..909c5a0 100644
---- a/drivers/platform/x86/msi-wmi.c
-+++ b/drivers/platform/x86/msi-wmi.c
-@@ -147,7 +147,7 @@ static const struct backlight_ops msi_backlight_ops = {
- static void msi_wmi_notify(u32 value, void *context)
- {
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
-- static struct key_entry *key;
-+ struct key_entry *key;
- union acpi_object *obj;
- ktime_t cur;
- acpi_status status;
-diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
-index 64e1f2d..fbd2115 100644
---- a/drivers/platform/x86/samsung-laptop.c
-+++ b/drivers/platform/x86/samsung-laptop.c
-@@ -543,7 +543,7 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
- get_performance_level, set_performance_level);
-
-
--static struct dmi_system_id __initdata samsung_dmi_table[] = {
-+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
-diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
-index 1e54ae7..bb6f7b2 100644
---- a/drivers/platform/x86/samsung-q10.c
-+++ b/drivers/platform/x86/samsung-q10.c
-@@ -130,7 +130,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
- return 1;
- }
-
--static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
-+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
- {
- .ident = "Samsung Q10",
- .matches = {
-diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
-index 40c4705..d4d41b70 100644
---- a/drivers/platform/x86/sony-laptop.c
-+++ b/drivers/platform/x86/sony-laptop.c
-@@ -3385,7 +3385,7 @@ static struct acpi_driver sony_pic_driver = {
- },
- };
-
--static struct dmi_system_id __initdata sonypi_dmi_table[] = {
-+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
- {
- .ident = "Sony Vaio",
- .matches = {
-diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
-index 2a8d6aa..29b1bcb 100644
---- a/drivers/platform/x86/thinkpad_acpi.c
-+++ b/drivers/platform/x86/thinkpad_acpi.c
-@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
- return 0;
- }
-
--void static hotkey_mask_warn_incomplete_mask(void)
-+static void hotkey_mask_warn_incomplete_mask(void)
- {
- /* log only what the user can fix... */
- const u32 wantedmask = hotkey_driver_mask &
-@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
- }
- }
-
--static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
-- struct tp_nvram_state *newn,
-- const u32 event_mask)
--{
--
- #define TPACPI_COMPARE_KEY(__scancode, __member) \
- do { \
- if ((event_mask & (1 << __scancode)) && \
-@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- tpacpi_hotkey_send_key(__scancode); \
- } while (0)
-
-- void issue_volchange(const unsigned int oldvol,
-- const unsigned int newvol)
-- {
-- unsigned int i = oldvol;
-+static void issue_volchange(const unsigned int oldvol,
-+ const unsigned int newvol,
-+ const u32 event_mask)
-+{
-+ unsigned int i = oldvol;
-
-- while (i > newvol) {
-- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
-- i--;
-- }
-- while (i < newvol) {
-- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
-- i++;
-- }
-+ while (i > newvol) {
-+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
-+ i--;
- }
-+ while (i < newvol) {
-+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
-+ i++;
-+ }
-+}
-
-- void issue_brightnesschange(const unsigned int oldbrt,
-- const unsigned int newbrt)
-- {
-- unsigned int i = oldbrt;
-+static void issue_brightnesschange(const unsigned int oldbrt,
-+ const unsigned int newbrt,
-+ const u32 event_mask)
-+{
-+ unsigned int i = oldbrt;
-
-- while (i > newbrt) {
-- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
-- i--;
-- }
-- while (i < newbrt) {
-- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
-- i++;
-- }
-+ while (i > newbrt) {
-+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
-+ i--;
-+ }
-+ while (i < newbrt) {
-+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
-+ i++;
- }
-+}
-
-+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
-+ struct tp_nvram_state *newn,
-+ const u32 event_mask)
-+{
- TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
- TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
- TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
-@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- oldn->volume_level != newn->volume_level) {
- /* recently muted, or repeated mute keypress, or
- * multiple presses ending in mute */
-- issue_volchange(oldn->volume_level, newn->volume_level);
-+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
- }
- } else {
-@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
- }
- if (oldn->volume_level != newn->volume_level) {
-- issue_volchange(oldn->volume_level, newn->volume_level);
-+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
- } else if (oldn->volume_toggle != newn->volume_toggle) {
- /* repeated vol up/down keypress at end of scale ? */
- if (newn->volume_level == 0)
-@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- /* handle brightness */
- if (oldn->brightness_level != newn->brightness_level) {
- issue_brightnesschange(oldn->brightness_level,
-- newn->brightness_level);
-+ newn->brightness_level,
-+ event_mask);
- } else if (oldn->brightness_toggle != newn->brightness_toggle) {
- /* repeated key presses that didn't change state */
- if (newn->brightness_level == 0)
-@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
- && !tp_features.bright_unkfw)
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
- }
-+}
-
- #undef TPACPI_COMPARE_KEY
- #undef TPACPI_MAY_SEND_KEY
--}
-
- /*
- * Polling driver
-diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
-index a134c26..d024437 100644
---- a/drivers/platform/x86/wmi.c
-+++ b/drivers/platform/x86/wmi.c
-@@ -743,7 +743,7 @@ static int wmi_create_device(const struct guid_block *gblock,
- wblock->dev.class = &wmi_class;
-
- wmi_gtoa(gblock->guid, guid_string);
-- dev_set_name(&wblock->dev, guid_string);
-+ dev_set_name(&wblock->dev, "%s", guid_string);
-
- dev_set_drvdata(&wblock->dev, wblock);
-
-diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
-index b859d16..5cc6b1a 100644
---- a/drivers/pnp/pnpbios/bioscalls.c
-+++ b/drivers/pnp/pnpbios/bioscalls.c
-@@ -59,7 +59,7 @@ do { \
- set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
- } while(0)
-
--static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
-+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
- (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
-
- /*
-@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
-
- cpu = get_cpu();
- save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
-+
-+ pax_open_kernel();
- get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
-+ pax_close_kernel();
-
- /* On some boxes IRQ's during PnP BIOS calls are deadly. */
- spin_lock_irqsave(&pnp_bios_lock, flags);
-@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
- :"memory");
- spin_unlock_irqrestore(&pnp_bios_lock, flags);
-
-+ pax_open_kernel();
- get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
-+ pax_close_kernel();
-+
- put_cpu();
-
- /* If we get here and this is set then the PnP BIOS faulted on us. */
-@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
- return status;
- }
-
--void pnpbios_calls_init(union pnp_bios_install_struct *header)
-+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
- {
- int i;
-
-@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
- pnp_bios_callpoint.offset = header->fields.pm16offset;
- pnp_bios_callpoint.segment = PNP_CS16;
-
-+ pax_open_kernel();
-+
- for_each_possible_cpu(i) {
- struct desc_struct *gdt = get_cpu_gdt_table(i);
- if (!gdt)
-@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
- set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
- (unsigned long)__va(header->fields.pm16dseg));
- }
-+
-+ pax_close_kernel();
- }
-diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
-index cfe8685..524a913 100644
---- a/drivers/pnp/pnpbios/core.c
-+++ b/drivers/pnp/pnpbios/core.c
-@@ -492,7 +492,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
- return 0;
- }
-
--static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
-+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
- { /* PnPBIOS GPF on boot */
- .callback = exploding_pnp_bios,
- .ident = "Higraded P14H",
-diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
-index b0ecacb..7c9da2e 100644
---- a/drivers/pnp/resource.c
-+++ b/drivers/pnp/resource.c
-@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
- return 1;
-
- /* check if the resource is valid */
-- if (*irq < 0 || *irq > 15)
-+ if (*irq > 15)
- return 0;
-
- /* check if the resource is reserved */
-@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
- return 1;
-
- /* check if the resource is valid */
-- if (*dma < 0 || *dma == 4 || *dma > 7)
-+ if (*dma == 4 || *dma > 7)
- return 0;
-
- /* check if the resource is reserved */
-diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
-index 018de2b..bc8e317 100644
---- a/drivers/power/power_supply.h
-+++ b/drivers/power/power_supply.h
-@@ -12,12 +12,12 @@
-
- #ifdef CONFIG_SYSFS
-
--extern void power_supply_init_attrs(struct device_type *dev_type);
-+extern void power_supply_init_attrs(void);
- extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
-
- #else
-
--static inline void power_supply_init_attrs(struct device_type *dev_type) {}
-+static inline void power_supply_init_attrs(void) {}
- #define power_supply_uevent NULL
-
- #endif /* CONFIG_SYSFS */
-diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
-index 329b46b..1b17633 100644
---- a/drivers/power/power_supply_core.c
-+++ b/drivers/power/power_supply_core.c
-@@ -23,7 +23,10 @@
- struct class *power_supply_class;
- EXPORT_SYMBOL_GPL(power_supply_class);
-
--static struct device_type power_supply_dev_type;
-+extern const struct attribute_group *power_supply_attr_groups[];
-+static struct device_type power_supply_dev_type = {
-+ .groups = power_supply_attr_groups,
-+};
-
- static int __power_supply_changed_work(struct device *dev, void *data)
- {
-@@ -215,7 +218,7 @@ static int __init power_supply_class_init(void)
- return PTR_ERR(power_supply_class);
-
- power_supply_class->dev_uevent = power_supply_uevent;
-- power_supply_init_attrs(&power_supply_dev_type);
-+ power_supply_init_attrs();
-
- return 0;
- }
-diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
-index e15d4c9..83cd617 100644
---- a/drivers/power/power_supply_sysfs.c
-+++ b/drivers/power/power_supply_sysfs.c
-@@ -208,17 +208,15 @@ static struct attribute_group power_supply_attr_group = {
- .is_visible = power_supply_attr_is_visible,
- };
-
--static const struct attribute_group *power_supply_attr_groups[] = {
-+const struct attribute_group *power_supply_attr_groups[] = {
- &power_supply_attr_group,
- NULL,
- };
-
--void power_supply_init_attrs(struct device_type *dev_type)
-+void power_supply_init_attrs(void)
- {
- int i;
-
-- dev_type->groups = power_supply_attr_groups;
--
- for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
- __power_supply_attrs[i] = &power_supply_attrs[i].attr;
- }
-diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
-index adba3d6..7d7a5a6 100644
---- a/drivers/regulator/core.c
-+++ b/drivers/regulator/core.c
-@@ -2641,7 +2641,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
- struct device *dev, const struct regulator_init_data *init_data,
- void *driver_data)
- {
-- static atomic_t regulator_no = ATOMIC_INIT(0);
-+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
- struct regulator_dev *rdev;
- int ret, i;
-
-@@ -2700,7 +2700,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
- rdev->dev.class = &regulator_class;
- rdev->dev.parent = dev;
- dev_set_name(&rdev->dev, "regulator.%d",
-- atomic_inc_return(&regulator_no) - 1);
-+ atomic_inc_return_unchecked(&regulator_no) - 1);
- ret = device_register(&rdev->dev);
- if (ret != 0) {
- put_device(&rdev->dev);
-diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
-index 33f5d9a..d957d3f 100644
---- a/drivers/regulator/max8660.c
-+++ b/drivers/regulator/max8660.c
-@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
- max8660->shadow_regs[MAX8660_OVER1] = 5;
- } else {
- /* Otherwise devices can be toggled via software */
-- max8660_dcdc_ops.enable = max8660_dcdc_enable;
-- max8660_dcdc_ops.disable = max8660_dcdc_disable;
-+ pax_open_kernel();
-+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
-+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
-+ pax_close_kernel();
- }
-
- /*
-diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
-index 023d17d..74ef35b 100644
---- a/drivers/regulator/mc13892-regulator.c
-+++ b/drivers/regulator/mc13892-regulator.c
-@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
- }
- mc13xxx_unlock(mc13892);
-
-- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
-+ pax_open_kernel();
-+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
- = mc13892_vcam_set_mode;
-- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
-+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
- = mc13892_vcam_get_mode;
-+ pax_close_kernel();
- for (i = 0; i < pdata->num_regulators; i++) {
- init_data = &pdata->regulators[i];
- priv->regulators[i] = regulator_register(
-diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
-index cae8985..8027fa7 100644
---- a/drivers/rtc/rtc-cmos.c
-+++ b/drivers/rtc/rtc-cmos.c
-@@ -772,7 +772,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
- hpet_rtc_timer_init();
-
- /* export at least the first block of NVRAM */
-- nvram.size = address_space - NVRAM_OFFSET;
-+ pax_open_kernel();
-+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
-+ pax_close_kernel();
- retval = sysfs_create_bin_file(&dev->kobj, &nvram);
- if (retval < 0) {
- dev_dbg(dev, "can't create nvram file? %d\n", retval);
-diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
-index cace6d3..f623fda 100644
---- a/drivers/rtc/rtc-dev.c
-+++ b/drivers/rtc/rtc-dev.c
-@@ -14,6 +14,7 @@
- #include <linux/module.h>
- #include <linux/rtc.h>
- #include <linux/sched.h>
-+#include <linux/grsecurity.h>
- #include "rtc-core.h"
-
- static dev_t rtc_devt;
-@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
- if (copy_from_user(&tm, uarg, sizeof(tm)))
- return -EFAULT;
-
-+ gr_log_timechange();
-+
- return rtc_set_time(rtc, &tm);
-
- case RTC_PIE_ON:
-diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
-index 2836538..30edf9d 100644
---- a/drivers/rtc/rtc-m48t59.c
-+++ b/drivers/rtc/rtc-m48t59.c
-@@ -482,7 +482,9 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
- goto out;
- }
-
-- m48t59_nvram_attr.size = pdata->offset;
-+ pax_open_kernel();
-+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
-+ pax_close_kernel();
-
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
- if (ret) {
-diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
-index 06ea3bc..3d9501f 100644
---- a/drivers/scsi/Kconfig
-+++ b/drivers/scsi/Kconfig
-@@ -1902,6 +1902,14 @@ config SCSI_BFA_FC
- To compile this driver as a module, choose M here. The module will
- be called bfa.
-
-+config SCSI_VIRTIO
-+ tristate "virtio-scsi support (EXPERIMENTAL)"
-+ depends on EXPERIMENTAL && VIRTIO
-+ help
-+ This is the virtual HBA driver for virtio. If the kernel will
-+ be used in a virtual machine, say Y or M.
-+
-+
- endif # SCSI_LOWLEVEL
-
- source "drivers/scsi/pcmcia/Kconfig"
-diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
-index 2b88749..351b28b 100644
---- a/drivers/scsi/Makefile
-+++ b/drivers/scsi/Makefile
-@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
- obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
- obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
- obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
-+obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
- obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
-
- obj-$(CONFIG_ARM) += arm/
-diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
-index 2e658d2..46f4afb 100644
---- a/drivers/scsi/aacraid/linit.c
-+++ b/drivers/scsi/aacraid/linit.c
-@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
- #elif defined(__devinitconst)
- static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
- #else
--static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
-+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
- #endif
- { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
- { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
-diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
-index bfd618a..bf049f8 100644
---- a/drivers/scsi/advansys.c
-+++ b/drivers/scsi/advansys.c
-@@ -8373,8 +8373,6 @@ static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
- struct asc_board *board = shost_priv(scp->device->host);
- scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-- dma_cache_sync(board->dev, scp->sense_buffer,
-- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- return cpu_to_le32(scp->SCp.dma_handle);
- }
-
-diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
-index 14b5f8d..cc9bd26 100644
---- a/drivers/scsi/aic7xxx/aic79xx_pci.c
-+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
-@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
- for (bit = 0; bit < 8; bit++) {
-
- if ((pci_status[i] & (0x1 << bit)) != 0) {
-- static const char *s;
-+ const char *s;
-
- s = pci_status_strings[bit];
- if (i == 7/*TARG*/ && bit == 3)
-@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
-
- for (bit = 0; bit < 8; bit++) {
-
-- if ((split_status[i] & (0x1 << bit)) != 0) {
-- static const char *s;
--
-- s = split_status_strings[bit];
-- printk(s, ahd_name(ahd),
-+ if ((split_status[i] & (0x1 << bit)) != 0)
-+ printk(split_status_strings[bit], ahd_name(ahd),
- split_status_source[i]);
-- }
-
- if (i > 1)
- continue;
-
-- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
-- static const char *s;
--
-- s = split_status_strings[bit];
-- printk(s, ahd_name(ahd), "SG");
-- }
-+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
-+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
- }
- }
- /*
-diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
-index d5ff142..49c0ebb 100644
---- a/drivers/scsi/aic94xx/aic94xx_init.c
-+++ b/drivers/scsi/aic94xx/aic94xx_init.c
-@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
- .lldd_control_phy = asd_control_phy,
- };
-
--static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
-+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
- {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
- {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
- {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
-diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
-index 1080bcb..4a8ddd9 100644
---- a/drivers/scsi/bfa/bfa_fcpim.h
-+++ b/drivers/scsi/bfa/bfa_fcpim.h
-@@ -36,7 +36,7 @@ struct bfa_iotag_s {
-
- struct bfa_itn_s {
- bfa_isr_func_t isr;
--};
-+} __no_const;
-
- void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
- void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
-diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
-index eaac57e..4b3fc1c 100644
---- a/drivers/scsi/bfa/bfa_fcs.c
-+++ b/drivers/scsi/bfa/bfa_fcs.c
-@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
- #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
-
- static struct bfa_fcs_mod_s fcs_modules[] = {
-- { bfa_fcs_port_attach, NULL, NULL },
-- { bfa_fcs_uf_attach, NULL, NULL },
-- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
-- bfa_fcs_fabric_modexit },
-+ {
-+ .attach = bfa_fcs_port_attach,
-+ .modinit = NULL,
-+ .modexit = NULL
-+ },
-+ {
-+ .attach = bfa_fcs_uf_attach,
-+ .modinit = NULL,
-+ .modexit = NULL
-+ },
-+ {
-+ .attach = bfa_fcs_fabric_attach,
-+ .modinit = bfa_fcs_fabric_modinit,
-+ .modexit = bfa_fcs_fabric_modexit
-+ },
- };
-
- /*
-diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
-index d4f951f..6081753 100644
---- a/drivers/scsi/bfa/bfa_fcs_lport.c
-+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
-@@ -57,13 +57,21 @@ static struct {
- void (*offline) (struct bfa_fcs_lport_s *port);
- } __port_action[] = {
- {
-- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
-- bfa_fcs_lport_unknown_offline}, {
-- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
-- bfa_fcs_lport_fab_offline}, {
-- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
-- bfa_fcs_lport_n2n_offline},
-- };
-+ .init = bfa_fcs_lport_unknown_init,
-+ .online = bfa_fcs_lport_unknown_online,
-+ .offline = bfa_fcs_lport_unknown_offline
-+ },
-+ {
-+ .init = bfa_fcs_lport_fab_init,
-+ .online = bfa_fcs_lport_fab_online,
-+ .offline = bfa_fcs_lport_fab_offline
-+ },
-+ {
-+ .init = bfa_fcs_lport_n2n_init,
-+ .online = bfa_fcs_lport_n2n_online,
-+ .offline = bfa_fcs_lport_n2n_offline
-+ },
-+};
-
- /*
- * fcs_port_sm FCS logical port state machine
-diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
-index 506a3ee..4f85221 100644
---- a/drivers/scsi/bfa/bfa_ioc.h
-+++ b/drivers/scsi/bfa/bfa_ioc.h
-@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
- bfa_ioc_disable_cbfn_t disable_cbfn;
- bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
- bfa_ioc_reset_cbfn_t reset_cbfn;
--};
-+} __no_const;
-
- /*
- * IOC event notification mechanism.
-@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
- void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
- bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
- bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
--};
-+} __no_const;
-
- /*
- * Queue element to wait for room in request queue. FIFO order is
-diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
-index 2d36e48..5818d72 100644
---- a/drivers/scsi/bfa/bfa_modules.h
-+++ b/drivers/scsi/bfa/bfa_modules.h
-@@ -77,12 +77,12 @@ enum {
- \
- extern struct bfa_module_s hal_mod_ ## __mod; \
- struct bfa_module_s hal_mod_ ## __mod = { \
-- bfa_ ## __mod ## _meminfo, \
-- bfa_ ## __mod ## _attach, \
-- bfa_ ## __mod ## _detach, \
-- bfa_ ## __mod ## _start, \
-- bfa_ ## __mod ## _stop, \
-- bfa_ ## __mod ## _iocdisable, \
-+ .meminfo = bfa_ ## __mod ## _meminfo, \
-+ .attach = bfa_ ## __mod ## _attach, \
-+ .detach = bfa_ ## __mod ## _detach, \
-+ .start = bfa_ ## __mod ## _start, \
-+ .stop = bfa_ ## __mod ## _stop, \
-+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
- }
-
- #define BFA_CACHELINE_SZ (256)
-diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
-index dee1a09..24adab6 100644
---- a/drivers/scsi/bfa/bfad_debugfs.c
-+++ b/drivers/scsi/bfa/bfad_debugfs.c
-@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
- file->f_pos += offset;
- break;
- case 2:
-- file->f_pos = debug->buffer_len - offset;
-+ file->f_pos = debug->buffer_len + offset;
- break;
- default:
- return -EINVAL;
-diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
-index e7522dc..f585e84 100644
---- a/drivers/scsi/fcoe/fcoe_ctlr.c
-+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -2030,7 +2030,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
- */
- port_id = fip->port_id;
- if (fip->probe_tries)
-- port_id = prandom32(&fip->rnd_state) & 0xffff;
-+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
- else if (!port_id)
- port_id = fip->lp->wwpn & 0xffff;
- if (!port_id || port_id == 0xffff)
-@@ -2055,7 +2055,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
- static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
- {
- fip->probe_tries = 0;
-- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
-+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
- fcoe_ctlr_vn_restart(fip);
- }
-
-diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
-index ee77a58..af9d518 100644
---- a/drivers/scsi/hosts.c
-+++ b/drivers/scsi/hosts.c
-@@ -42,7 +42,7 @@
- #include "scsi_logging.h"
-
-
--static atomic_t scsi_host_next_hn; /* host_no for next new host */
-+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
-
-
- static void scsi_host_cls_release(struct device *dev)
-@@ -358,7 +358,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
- * subtract one because we increment first then return, but we need to
- * know what the next host number was before increment
- */
-- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
-+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
- shost->dma_channel = 0xff;
-
- /* These three are default values which can be overridden */
-diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
-index 64c8a80..ba5263c 100644
---- a/drivers/scsi/hpsa.c
-+++ b/drivers/scsi/hpsa.c
-@@ -523,7 +523,7 @@ static inline u32 next_command(struct ctlr_info *h)
- u32 a;
-
- if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-- return h->access.command_completed(h);
-+ return h->access->command_completed(h);
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-@@ -3034,7 +3034,7 @@ static void start_io(struct ctlr_info *h)
- while (!list_empty(&h->reqQ)) {
- c = list_entry(h->reqQ.next, struct CommandList, list);
- /* can't do anything if fifo is full */
-- if ((h->access.fifo_full(h))) {
-+ if ((h->access->fifo_full(h))) {
- dev_warn(&h->pdev->dev, "fifo full\n");
- break;
- }
-@@ -3044,7 +3044,7 @@ static void start_io(struct ctlr_info *h)
- h->Qdepth--;
-
- /* Tell the controller execute command */
-- h->access.submit_command(h, c);
-+ h->access->submit_command(h, c);
-
- /* Put job onto the completed Q */
- addQ(&h->cmpQ, c);
-@@ -3053,17 +3053,17 @@ static void start_io(struct ctlr_info *h)
-
- static inline unsigned long get_next_completion(struct ctlr_info *h)
- {
-- return h->access.command_completed(h);
-+ return h->access->command_completed(h);
- }
-
- static inline bool interrupt_pending(struct ctlr_info *h)
- {
-- return h->access.intr_pending(h);
-+ return h->access->intr_pending(h);
- }
-
- static inline long interrupt_not_for_us(struct ctlr_info *h)
- {
-- return (h->access.intr_pending(h) == 0) ||
-+ return (h->access->intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0);
- }
-
-@@ -3963,7 +3963,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
- if (prod_index < 0)
- return -ENODEV;
- h->product_name = products[prod_index].product_name;
-- h->access = *(products[prod_index].access);
-+ h->access = products[prod_index].access;
-
- if (hpsa_board_disabled(h->pdev)) {
- dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-@@ -4208,7 +4208,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
-
- assert_spin_locked(&lockup_detector_lock);
- remove_ctlr_from_lockup_detector_list(h);
-- h->access.set_intr_mask(h, HPSA_INTR_OFF);
-+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
- spin_lock_irqsave(&h->lock, flags);
- h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- spin_unlock_irqrestore(&h->lock, flags);
-@@ -4384,7 +4384,7 @@ reinit_after_soft_reset:
- }
-
- /* make sure the board interrupts are off */
-- h->access.set_intr_mask(h, HPSA_INTR_OFF);
-+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
-
- if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
- goto clean2;
-@@ -4418,7 +4418,7 @@ reinit_after_soft_reset:
- * fake ones to scoop up any residual completions.
- */
- spin_lock_irqsave(&h->lock, flags);
-- h->access.set_intr_mask(h, HPSA_INTR_OFF);
-+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
- spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[h->intr_mode], h);
- rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
-@@ -4437,9 +4437,9 @@ reinit_after_soft_reset:
- dev_info(&h->pdev->dev, "Board READY.\n");
- dev_info(&h->pdev->dev,
- "Waiting for stale completions to drain.\n");
-- h->access.set_intr_mask(h, HPSA_INTR_ON);
-+ h->access->set_intr_mask(h, HPSA_INTR_ON);
- msleep(10000);
-- h->access.set_intr_mask(h, HPSA_INTR_OFF);
-+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
-
- rc = controller_reset_failed(h->cfgtable);
- if (rc)
-@@ -4460,7 +4460,7 @@ reinit_after_soft_reset:
- }
-
- /* Turn the interrupts on so we can service requests */
-- h->access.set_intr_mask(h, HPSA_INTR_ON);
-+ h->access->set_intr_mask(h, HPSA_INTR_ON);
-
- hpsa_hba_inquiry(h);
- hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
-@@ -4512,7 +4512,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
- * To write all data in the battery backed cache to disks
- */
- hpsa_flush_cache(h);
-- h->access.set_intr_mask(h, HPSA_INTR_OFF);
-+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[h->intr_mode], h);
- #ifdef CONFIG_PCI_MSI
- if (h->msix_vector)
-@@ -4676,7 +4676,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
- return;
- }
- /* Change the access methods to the performant access methods */
-- h->access = SA5_performant_access;
-+ h->access = &SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
- }
-
-diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
-index c721509..3a9df95 100644
---- a/drivers/scsi/hpsa.h
-+++ b/drivers/scsi/hpsa.h
-@@ -73,7 +73,7 @@ struct ctlr_info {
- unsigned int msix_vector;
- unsigned int msi_vector;
- int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
-- struct access_method access;
-+ struct access_method *access;
-
- /* queue and queue Info */
- struct list_head reqQ;
-@@ -351,19 +351,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
- }
-
- static struct access_method SA5_access = {
-- SA5_submit_command,
-- SA5_intr_mask,
-- SA5_fifo_full,
-- SA5_intr_pending,
-- SA5_completed,
-+ .submit_command = SA5_submit_command,
-+ .set_intr_mask = SA5_intr_mask,
-+ .fifo_full = SA5_fifo_full,
-+ .intr_pending = SA5_intr_pending,
-+ .command_completed = SA5_completed,
- };
-
- static struct access_method SA5_performant_access = {
-- SA5_submit_command,
-- SA5_performant_intr_mask,
-- SA5_fifo_full,
-- SA5_performant_intr_pending,
-- SA5_performant_completed,
-+ .submit_command = SA5_submit_command,
-+ .set_intr_mask = SA5_performant_intr_mask,
-+ .fifo_full = SA5_fifo_full,
-+ .intr_pending = SA5_performant_intr_pending,
-+ .command_completed = SA5_performant_completed,
- };
-
- struct board_type {
-diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
-index 9de9db2..1e09660 100644
---- a/drivers/scsi/libfc/fc_exch.c
-+++ b/drivers/scsi/libfc/fc_exch.c
-@@ -105,12 +105,12 @@ struct fc_exch_mgr {
- * all together if not used XXX
- */
- struct {
-- atomic_t no_free_exch;
-- atomic_t no_free_exch_xid;
-- atomic_t xid_not_found;
-- atomic_t xid_busy;
-- atomic_t seq_not_found;
-- atomic_t non_bls_resp;
-+ atomic_unchecked_t no_free_exch;
-+ atomic_unchecked_t no_free_exch_xid;
-+ atomic_unchecked_t xid_not_found;
-+ atomic_unchecked_t xid_busy;
-+ atomic_unchecked_t seq_not_found;
-+ atomic_unchecked_t non_bls_resp;
- } stats;
- };
-
-@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
- /* allocate memory for exchange */
- ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
- if (!ep) {
-- atomic_inc(&mp->stats.no_free_exch);
-+ atomic_inc_unchecked(&mp->stats.no_free_exch);
- goto out;
- }
- memset(ep, 0, sizeof(*ep));
-@@ -780,7 +780,7 @@ out:
- return ep;
- err:
- spin_unlock_bh(&pool->lock);
-- atomic_inc(&mp->stats.no_free_exch_xid);
-+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
- mempool_free(ep, mp->ep_pool);
- return NULL;
- }
-@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
- xid = ntohs(fh->fh_ox_id); /* we originated exch */
- ep = fc_exch_find(mp, xid);
- if (!ep) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- reject = FC_RJT_OX_ID;
- goto out;
- }
-@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
- ep = fc_exch_find(mp, xid);
- if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
- if (ep) {
-- atomic_inc(&mp->stats.xid_busy);
-+ atomic_inc_unchecked(&mp->stats.xid_busy);
- reject = FC_RJT_RX_ID;
- goto rel;
- }
-@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
- }
- xid = ep->xid; /* get our XID */
- } else if (!ep) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- reject = FC_RJT_RX_ID; /* XID not found */
- goto out;
- }
-@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
- } else {
- sp = &ep->seq;
- if (sp->id != fh->fh_seq_id) {
-- atomic_inc(&mp->stats.seq_not_found);
-+ atomic_inc_unchecked(&mp->stats.seq_not_found);
- if (f_ctl & FC_FC_END_SEQ) {
- /*
- * Update sequence_id based on incoming last
-@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
-
- ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
- if (!ep) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- goto out;
- }
- if (ep->esb_stat & ESB_ST_COMPLETE) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- goto rel;
- }
- if (ep->rxid == FC_XID_UNKNOWN)
- ep->rxid = ntohs(fh->fh_rx_id);
- if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- goto rel;
- }
- if (ep->did != ntoh24(fh->fh_s_id) &&
- ep->did != FC_FID_FLOGI) {
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- goto rel;
- }
- sof = fr_sof(fp);
-@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
- sp->ssb_stat |= SSB_ST_RESP;
- sp->id = fh->fh_seq_id;
- } else if (sp->id != fh->fh_seq_id) {
-- atomic_inc(&mp->stats.seq_not_found);
-+ atomic_inc_unchecked(&mp->stats.seq_not_found);
- goto rel;
- }
-
-@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
- sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
-
- if (!sp)
-- atomic_inc(&mp->stats.xid_not_found);
-+ atomic_inc_unchecked(&mp->stats.xid_not_found);
- else
-- atomic_inc(&mp->stats.non_bls_resp);
-+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
-
- fc_frame_free(fp);
- }
-diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
-index 5e170e3..1e87efc 100644
---- a/drivers/scsi/libsas/sas_ata.c
-+++ b/drivers/scsi/libsas/sas_ata.c
-@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
- .postreset = ata_std_postreset,
- .error_handler = ata_std_error_handler,
- .post_internal_cmd = sas_ata_post_internal,
-- .qc_defer = ata_std_qc_defer,
-+ .qc_defer = ata_std_qc_defer,
- .qc_prep = ata_noop_qc_prep,
- .qc_issue = sas_ata_qc_issue,
- .qc_fill_rtf = sas_ata_qc_fill_rtf,
-diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
-index bb4c8e0..f33d849 100644
---- a/drivers/scsi/lpfc/lpfc.h
-+++ b/drivers/scsi/lpfc/lpfc.h
-@@ -425,7 +425,7 @@ struct lpfc_vport {
- struct dentry *debug_nodelist;
- struct dentry *vport_debugfs_root;
- struct lpfc_debugfs_trc *disc_trc;
-- atomic_t disc_trc_cnt;
-+ atomic_unchecked_t disc_trc_cnt;
- #endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
-@@ -835,8 +835,8 @@ struct lpfc_hba {
- struct timer_list fabric_block_timer;
- unsigned long bit_flags;
- #define FABRIC_COMANDS_BLOCKED 0
-- atomic_t num_rsrc_err;
-- atomic_t num_cmd_success;
-+ atomic_unchecked_t num_rsrc_err;
-+ atomic_unchecked_t num_cmd_success;
- unsigned long last_rsrc_error_time;
- unsigned long last_ramp_down_time;
- unsigned long last_ramp_up_time;
-@@ -866,7 +866,7 @@ struct lpfc_hba {
-
- struct dentry *debug_slow_ring_trc;
- struct lpfc_debugfs_trc *slow_ring_trc;
-- atomic_t slow_ring_trc_cnt;
-+ atomic_unchecked_t slow_ring_trc_cnt;
- /* iDiag debugfs sub-directory */
- struct dentry *idiag_root;
- struct dentry *idiag_pci_cfg;
-diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
-index 2838259..35b747a 100644
---- a/drivers/scsi/lpfc/lpfc_debugfs.c
-+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
-@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
-
- #include <linux/debugfs.h>
-
--static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
-+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
- static unsigned long lpfc_debugfs_start_time = 0L;
-
- /* iDiag */
-@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
- lpfc_debugfs_enable = 0;
-
- len = 0;
-- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
-+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
- (lpfc_debugfs_max_disc_trc - 1);
- for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
- dtp = vport->disc_trc + i;
-@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
- lpfc_debugfs_enable = 0;
-
- len = 0;
-- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
-+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
- (lpfc_debugfs_max_slow_ring_trc - 1);
- for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
- dtp = phba->slow_ring_trc + i;
-@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
- !vport || !vport->disc_trc)
- return;
-
-- index = atomic_inc_return(&vport->disc_trc_cnt) &
-+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
- (lpfc_debugfs_max_disc_trc - 1);
- dtp = vport->disc_trc + index;
- dtp->fmt = fmt;
- dtp->data1 = data1;
- dtp->data2 = data2;
- dtp->data3 = data3;
-- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
-+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
- dtp->jif = jiffies;
- #endif
- return;
-@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
- !phba || !phba->slow_ring_trc)
- return;
-
-- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
-+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
- (lpfc_debugfs_max_slow_ring_trc - 1);
- dtp = phba->slow_ring_trc + index;
- dtp->fmt = fmt;
- dtp->data1 = data1;
- dtp->data2 = data2;
- dtp->data3 = data3;
-- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
-+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
- dtp->jif = jiffies;
- #endif
- return;
-@@ -1151,7 +1151,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
- pos = file->f_pos + off;
- break;
- case 2:
-- pos = debug->len - off;
-+ pos = debug->len + off;
- }
- return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
- }
-@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
- "slow_ring buffer\n");
- goto debug_failed;
- }
-- atomic_set(&phba->slow_ring_trc_cnt, 0);
-+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
- memset(phba->slow_ring_trc, 0,
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc));
-@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
- "buffer\n");
- goto debug_failed;
- }
-- atomic_set(&vport->disc_trc_cnt, 0);
-+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
-
- snprintf(name, sizeof(name), "discovery_trace");
- vport->debug_disc_trc =
-diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
-index 55bc4fc..a2a109c 100644
---- a/drivers/scsi/lpfc/lpfc_init.c
-+++ b/drivers/scsi/lpfc/lpfc_init.c
-@@ -10027,8 +10027,10 @@ lpfc_init(void)
- printk(LPFC_COPYRIGHT "\n");
-
- if (lpfc_enable_npiv) {
-- lpfc_transport_functions.vport_create = lpfc_vport_create;
-- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
-+ pax_open_kernel();
-+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
-+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
-+ pax_close_kernel();
- }
- lpfc_transport_template =
- fc_attach_transport(&lpfc_transport_functions);
-diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
-index 2e1e54e..1af0a0d 100644
---- a/drivers/scsi/lpfc/lpfc_scsi.c
-+++ b/drivers/scsi/lpfc/lpfc_scsi.c
-@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
- uint32_t evt_posted;
-
- spin_lock_irqsave(&phba->hbalock, flags);
-- atomic_inc(&phba->num_rsrc_err);
-+ atomic_inc_unchecked(&phba->num_rsrc_err);
- phba->last_rsrc_error_time = jiffies;
-
- if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
-@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
- unsigned long flags;
- struct lpfc_hba *phba = vport->phba;
- uint32_t evt_posted;
-- atomic_inc(&phba->num_cmd_success);
-+ atomic_inc_unchecked(&phba->num_cmd_success);
-
- if (vport->cfg_lun_queue_depth <= queue_depth)
- return;
-@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
- unsigned long num_rsrc_err, num_cmd_success;
- int i;
-
-- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
-- num_cmd_success = atomic_read(&phba->num_cmd_success);
-+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
-+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
-
- vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
-@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
-- atomic_set(&phba->num_rsrc_err, 0);
-- atomic_set(&phba->num_cmd_success, 0);
-+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
-+ atomic_set_unchecked(&phba->num_cmd_success, 0);
- }
-
- /**
-@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
- }
- }
- lpfc_destroy_vport_work_array(phba, vports);
-- atomic_set(&phba->num_rsrc_err, 0);
-- atomic_set(&phba->num_cmd_success, 0);
-+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
-+ atomic_set_unchecked(&phba->num_cmd_success, 0);
- }
-
- /**
-diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
-index 01780a9..e756c24 100644
---- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
-+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
-@@ -1532,7 +1532,7 @@ _scsih_get_resync(struct device *dev)
- {
- struct scsi_device *sdev = to_scsi_device(dev);
- struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-- static struct _raid_device *raid_device;
-+ struct _raid_device *raid_device;
- unsigned long flags;
- Mpi2RaidVolPage0_t vol_pg0;
- Mpi2ConfigReply_t mpi_reply;
-@@ -1571,7 +1571,7 @@ _scsih_get_state(struct device *dev)
- {
- struct scsi_device *sdev = to_scsi_device(dev);
- struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-- static struct _raid_device *raid_device;
-+ struct _raid_device *raid_device;
- unsigned long flags;
- Mpi2RaidVolPage0_t vol_pg0;
- Mpi2ConfigReply_t mpi_reply;
-@@ -6532,7 +6532,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
- struct fw_event_work *fw_event)
- {
- Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
-- static struct _raid_device *raid_device;
-+ struct _raid_device *raid_device;
- unsigned long flags;
- u16 handle;
-
-@@ -7005,7 +7005,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
- u64 sas_address;
- struct _sas_device *sas_device;
- struct _sas_node *expander_device;
-- static struct _raid_device *raid_device;
-+ struct _raid_device *raid_device;
- u8 retry_count;
-
- printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
-diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
-index 5163edb..7b142bc 100644
---- a/drivers/scsi/pmcraid.c
-+++ b/drivers/scsi/pmcraid.c
-@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
- res->scsi_dev = scsi_dev;
- scsi_dev->hostdata = res;
- res->change_detected = 0;
-- atomic_set(&res->read_failures, 0);
-- atomic_set(&res->write_failures, 0);
-+ atomic_set_unchecked(&res->read_failures, 0);
-+ atomic_set_unchecked(&res->write_failures, 0);
- rc = 0;
- }
- spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
-@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
-
- /* If this was a SCSI read/write command keep count of errors */
- if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
-- atomic_inc(&res->read_failures);
-+ atomic_inc_unchecked(&res->read_failures);
- else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
-- atomic_inc(&res->write_failures);
-+ atomic_inc_unchecked(&res->write_failures);
-
- if (!RES_IS_GSCSI(res->cfg_entry) &&
- masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
-@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
-- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
-+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
- cmd->cmd_done = pmcraid_io_done;
-
-@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
-- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
-+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
-@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
-
- pinstance = container_of(workp, struct pmcraid_instance, worker_q);
- /* add resources only after host is added into system */
-- if (!atomic_read(&pinstance->expose_resources))
-+ if (!atomic_read_unchecked(&pinstance->expose_resources))
- return;
-
- fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
-@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
- init_waitqueue_head(&pinstance->reset_wait_q);
-
- atomic_set(&pinstance->outstanding_cmds, 0);
-- atomic_set(&pinstance->last_message_id, 0);
-- atomic_set(&pinstance->expose_resources, 0);
-+ atomic_set_unchecked(&pinstance->last_message_id, 0);
-+ atomic_set_unchecked(&pinstance->expose_resources, 0);
-
- INIT_LIST_HEAD(&pinstance->free_res_q);
- INIT_LIST_HEAD(&pinstance->used_res_q);
-@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
- /* Schedule worker thread to handle CCN and take care of adding and
- * removing devices to OS
- */
-- atomic_set(&pinstance->expose_resources, 1);
-+ atomic_set_unchecked(&pinstance->expose_resources, 1);
- schedule_work(&pinstance->worker_q);
- return rc;
-
-diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
-index ca496c7..9c791d5 100644
---- a/drivers/scsi/pmcraid.h
-+++ b/drivers/scsi/pmcraid.h
-@@ -748,7 +748,7 @@ struct pmcraid_instance {
- struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
-
- /* Message id as filled in last fired IOARCB, used to identify HRRQ */
-- atomic_t last_message_id;
-+ atomic_unchecked_t last_message_id;
-
- /* configuration table */
- struct pmcraid_config_table *cfg_table;
-@@ -777,7 +777,7 @@ struct pmcraid_instance {
- atomic_t outstanding_cmds;
-
- /* should add/delete resources to mid-layer now ?*/
-- atomic_t expose_resources;
-+ atomic_unchecked_t expose_resources;
-
-
-
-@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
- struct pmcraid_config_table_entry_ext cfg_entry_ext;
- };
- struct scsi_device *scsi_dev; /* Link scsi_device structure */
-- atomic_t read_failures; /* count of failed READ commands */
-- atomic_t write_failures; /* count of failed WRITE commands */
-+ atomic_unchecked_t read_failures; /* count of failed READ commands */
-+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
-
- /* To indicate add/delete/modify during CCN */
- u8 change_detected;
-diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index 82a5ca6..97ace97 100644
---- a/drivers/scsi/qla2xxx/qla_os.c
-+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1429,8 +1429,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
- !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- /* Ok, a 64bit DMA mask is applicable. */
- ha->flags.enable_64bit_addressing = 1;
-- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
-- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
-+ pax_open_kernel();
-+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
-+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
-+ pax_close_kernel();
- return;
- }
- }
-diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
-index fd5edc6..4906148 100644
---- a/drivers/scsi/qla4xxx/ql4_def.h
-+++ b/drivers/scsi/qla4xxx/ql4_def.h
-@@ -258,7 +258,7 @@ struct ddb_entry {
- * (4000 only) */
- atomic_t relogin_timer; /* Max Time to wait for
- * relogin to complete */
-- atomic_t relogin_retry_count; /* Num of times relogin has been
-+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
- * retried */
- uint32_t default_time2wait; /* Default Min time between
- * relogins (+aens) */
-diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
-index 4169c8b..a8b896b 100644
---- a/drivers/scsi/qla4xxx/ql4_os.c
-+++ b/drivers/scsi/qla4xxx/ql4_os.c
-@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
- */
- if (!iscsi_is_session_online(cls_sess)) {
- /* Reset retry relogin timer */
-- atomic_inc(&ddb_entry->relogin_retry_count);
-+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "%s: index[%d] relogin timed out-retrying"
- " relogin (%d), retry (%d)\n", __func__,
- ddb_entry->fw_ddb_index,
-- atomic_read(&ddb_entry->relogin_retry_count),
-+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
- ddb_entry->default_time2wait + 4));
- set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
- atomic_set(&ddb_entry->retry_relogin_timer,
-@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
-
- atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
- atomic_set(&ddb_entry->relogin_timer, 0);
-- atomic_set(&ddb_entry->relogin_retry_count, 0);
-+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
-
- ddb_entry->default_relogin_timeout =
- le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
-diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
-index 831db24..1b88f70 100644
---- a/drivers/scsi/scsi.c
-+++ b/drivers/scsi/scsi.c
-@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
- unsigned long timeout;
- int rtn = 0;
-
-- atomic_inc(&cmd->device->iorequest_cnt);
-+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
-
- /* check if the device is still usable */
- if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
-@@ -837,7 +837,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
-
- good_bytes = scsi_bufflen(cmd);
- if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
-- int old_good_bytes = good_bytes;
-+ unsigned int old_good_bytes = good_bytes;
- drv = scsi_cmd_to_driver(cmd);
- if (drv->done)
- good_bytes = drv->done(cmd);
-diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
-index f6a464a..797b84d 100644
---- a/drivers/scsi/scsi_lib.c
-+++ b/drivers/scsi/scsi_lib.c
-@@ -1437,7 +1437,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
- shost = sdev->host;
- scsi_init_cmd_errh(cmd);
- cmd->result = DID_NO_CONNECT << 16;
-- atomic_inc(&cmd->device->iorequest_cnt);
-+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
-
- /*
- * SCSI request completion path will do scsi_device_unbusy(),
-@@ -1463,9 +1463,9 @@ static void scsi_softirq_done(struct request *rq)
-
- INIT_LIST_HEAD(&cmd->eh_entry);
-
-- atomic_inc(&cmd->device->iodone_cnt);
-+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
- if (cmd->result)
-- atomic_inc(&cmd->device->ioerr_cnt);
-+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
-
- disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
-diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index 88bc82e..a2aa1b0 100644
---- a/drivers/scsi/scsi_sysfs.c
-+++ b/drivers/scsi/scsi_sysfs.c
-@@ -652,7 +652,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
- char *buf) \
- { \
- struct scsi_device *sdev = to_scsi_device(dev); \
-- unsigned long long count = atomic_read(&sdev->field); \
-+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
- return snprintf(buf, 20, "0x%llx\n", count); \
- } \
- static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
-diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
-index 84a1fdf..693b0d6 100644
---- a/drivers/scsi/scsi_tgt_lib.c
-+++ b/drivers/scsi/scsi_tgt_lib.c
-@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
- int err;
-
- dprintk("%lx %u\n", uaddr, len);
-- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
-+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
- if (err) {
- /*
- * TODO: need to fixup sg_tablesize, max_segment_size,
-diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
-index 1b21491..1b7f60e 100644
---- a/drivers/scsi/scsi_transport_fc.c
-+++ b/drivers/scsi/scsi_transport_fc.c
-@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
- * Netlink Infrastructure
- */
-
--static atomic_t fc_event_seq;
-+static atomic_unchecked_t fc_event_seq;
-
- /**
- * fc_get_event_number - Obtain the next sequential FC event number
-@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
- u32
- fc_get_event_number(void)
- {
-- return atomic_add_return(1, &fc_event_seq);
-+ return atomic_add_return_unchecked(1, &fc_event_seq);
- }
- EXPORT_SYMBOL(fc_get_event_number);
-
-@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
- {
- int error;
-
-- atomic_set(&fc_event_seq, 0);
-+ atomic_set_unchecked(&fc_event_seq, 0);
-
- error = transport_class_register(&fc_host_class);
- if (error)
-@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
- char *cp;
-
- *val = simple_strtoul(buf, &cp, 0);
-- if ((*cp && (*cp != '\n')) || (*val < 0))
-+ if (*cp && (*cp != '\n'))
- return -EINVAL;
- /*
- * Check for overflow; dev_loss_tmo is u32
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index c874458..568a977 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -79,7 +79,7 @@ struct iscsi_internal {
- struct transport_container session_cont;
- };
-
--static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
-+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
- static struct workqueue_struct *iscsi_eh_timer_workq;
-
- static DEFINE_IDA(iscsi_sess_ida);
-@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
- int err;
-
- ihost = shost->shost_data;
-- session->sid = atomic_add_return(1, &iscsi_session_nr);
-+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
-
- if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
-@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
- printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
- ISCSI_TRANSPORT_VERSION);
-
-- atomic_set(&iscsi_session_nr, 0);
-+ atomic_set_unchecked(&iscsi_session_nr, 0);
-
- err = class_register(&iscsi_transport_class);
- if (err)
-diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
-index 21a045e..ec89e03 100644
---- a/drivers/scsi/scsi_transport_srp.c
-+++ b/drivers/scsi/scsi_transport_srp.c
-@@ -33,7 +33,7 @@
- #include "scsi_transport_srp_internal.h"
-
- struct srp_host_attrs {
-- atomic_t next_port_id;
-+ atomic_unchecked_t next_port_id;
- };
- #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
-
-@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
- struct Scsi_Host *shost = dev_to_shost(dev);
- struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
-
-- atomic_set(&srp_host->next_port_id, 0);
-+ atomic_set_unchecked(&srp_host->next_port_id, 0);
- return 0;
- }
-
-@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
- memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
- rport->roles = ids->roles;
-
-- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
-+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
- dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
-
- transport_setup_device(&rport->dev);
-diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 5c6b5f5..015ec9d 100644
---- a/drivers/scsi/sd.c
-+++ b/drivers/scsi/sd.c
-@@ -105,7 +105,7 @@ static void sd_shutdown(struct device *);
- static int sd_suspend(struct device *, pm_message_t state);
- static int sd_resume(struct device *);
- static void sd_rescan(struct device *);
--static int sd_done(struct scsi_cmnd *);
-+static unsigned int sd_done(struct scsi_cmnd *);
- static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
- static void scsi_disk_release(struct device *cdev);
- static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
-@@ -1390,7 +1390,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
- *
- * Note: potentially run from within an ISR. Must not block.
- **/
--static int sd_done(struct scsi_cmnd *SCpnt)
-+static unsigned int sd_done(struct scsi_cmnd *SCpnt)
- {
- int result = SCpnt->result;
- unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
-@@ -2635,7 +2635,7 @@ static int sd_probe(struct device *dev)
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = dev;
- sdkp->dev.class = &sd_disk_class;
-- dev_set_name(&sdkp->dev, dev_name(dev));
-+ dev_set_name(&sdkp->dev, "%s", dev_name(dev));
-
- if (device_add(&sdkp->dev))
- goto out_free_index;
-diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
-index 2d25616..7502cde 100644
---- a/drivers/scsi/sg.c
-+++ b/drivers/scsi/sg.c
-@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
- sdp->disk->disk_name,
- MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
- NULL,
-- (char *)arg);
-+ (char __user *)arg);
- case BLKTRACESTART:
- return blk_trace_startstop(sdp->device->request_queue, 1);
- case BLKTRACESTOP:
-@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
- const struct file_operations * fops;
- };
-
--static struct sg_proc_leaf sg_proc_leaf_arr[] = {
-+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
- {"allow_dio", &adio_fops},
- {"debug", &debug_fops},
- {"def_reserved_size", &dressz_fops},
-@@ -2327,7 +2327,7 @@ sg_proc_init(void)
- {
- int k, mask;
- int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
-- struct sg_proc_leaf * leaf;
-+ const struct sg_proc_leaf * leaf;
-
- sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
- if (!sg_proc_sgp)
-diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
-index 5fc97d2..5f26ccd 100644
---- a/drivers/scsi/sr.c
-+++ b/drivers/scsi/sr.c
-@@ -78,7 +78,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
- static DEFINE_MUTEX(sr_mutex);
- static int sr_probe(struct device *);
- static int sr_remove(struct device *);
--static int sr_done(struct scsi_cmnd *);
-+static unsigned int sr_done(struct scsi_cmnd *);
-
- static struct scsi_driver sr_template = {
- .owner = THIS_MODULE,
-@@ -296,11 +296,11 @@ do_tur:
- * It will be notified on the end of a SCSI read / write, and will take one
- * of several actions based on success or failure.
- */
--static int sr_done(struct scsi_cmnd *SCpnt)
-+static unsigned int sr_done(struct scsi_cmnd *SCpnt)
- {
- int result = SCpnt->result;
-- int this_count = scsi_bufflen(SCpnt);
-- int good_bytes = (result == 0 ? this_count : 0);
-+ unsigned int this_count = scsi_bufflen(SCpnt);
-+ unsigned int good_bytes = (result == 0 ? this_count : 0);
- int block_sectors = 0;
- long error_sector;
- struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
-diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
-new file mode 100644
-index 0000000..06c9d30
---- /dev/null
-+++ b/drivers/scsi/virtio_scsi.c
-@@ -0,0 +1,838 @@
-+/*
-+ * Virtio SCSI HBA driver
-+ *
-+ * Copyright IBM Corp. 2010
-+ * Copyright Red Hat, Inc. 2011
-+ *
-+ * Authors:
-+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
-+ * Paolo Bonzini <pbonzini@redhat.com>
-+ *
-+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
-+ * See the COPYING file in the top-level directory.
-+ *
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/mempool.h>
-+#include <linux/virtio.h>
-+#include <linux/virtio_ids.h>
-+#include <linux/virtio_config.h>
-+#include <linux/virtio_scsi.h>
-+#include <scsi/scsi_host.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_cmnd.h>
-+
-+#define VIRTIO_SCSI_MEMPOOL_SZ 64
-+#define VIRTIO_SCSI_EVENT_LEN 8
-+
-+/* Command queue element */
-+struct virtio_scsi_cmd {
-+ struct scsi_cmnd *sc;
-+ struct completion *comp;
-+ union {
-+ struct virtio_scsi_cmd_req cmd;
-+ struct virtio_scsi_ctrl_tmf_req tmf;
-+ struct virtio_scsi_ctrl_an_req an;
-+ } req;
-+ union {
-+ struct virtio_scsi_cmd_resp cmd;
-+ struct virtio_scsi_ctrl_tmf_resp tmf;
-+ struct virtio_scsi_ctrl_an_resp an;
-+ struct virtio_scsi_event evt;
-+ } resp;
-+} ____cacheline_aligned_in_smp;
-+
-+struct virtio_scsi_event_node {
-+ struct virtio_scsi *vscsi;
-+ struct virtio_scsi_event event;
-+ struct work_struct work;
-+};
-+
-+struct virtio_scsi_vq {
-+ /* Protects vq */
-+ spinlock_t vq_lock;
-+
-+ struct virtqueue *vq;
-+};
-+
-+/* Per-target queue state */
-+struct virtio_scsi_target_state {
-+ /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
-+ spinlock_t tgt_lock;
-+
-+ /* For sglist construction when adding commands to the virtqueue. */
-+ struct scatterlist sg[];
-+};
-+
-+/* Driver instance state */
-+struct virtio_scsi {
-+ struct virtio_device *vdev;
-+
-+ struct virtio_scsi_vq ctrl_vq;
-+ struct virtio_scsi_vq event_vq;
-+ struct virtio_scsi_vq req_vq;
-+
-+ /* Get some buffers ready for event vq */
-+ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
-+
-+ struct virtio_scsi_target_state *tgt[];
-+};
-+
-+static struct kmem_cache *virtscsi_cmd_cache;
-+static mempool_t *virtscsi_cmd_pool;
-+
-+static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
-+{
-+ return vdev->priv;
-+}
-+
-+static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
-+{
-+ if (!resid)
-+ return;
-+
-+ if (!scsi_bidi_cmnd(sc)) {
-+ scsi_set_resid(sc, resid);
-+ return;
-+ }
-+
-+ scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
-+ scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
-+}
-+
-+/**
-+ * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
-+ *
-+ * Called with vq_lock held.
-+ */
-+static void virtscsi_complete_cmd(void *buf)
-+{
-+ struct virtio_scsi_cmd *cmd = buf;
-+ struct scsi_cmnd *sc = cmd->sc;
-+ struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
-+
-+ dev_dbg(&sc->device->sdev_gendev,
-+ "cmd %p response %u status %#02x sense_len %u\n",
-+ sc, resp->response, resp->status, resp->sense_len);
-+
-+ sc->result = resp->status;
-+ virtscsi_compute_resid(sc, resp->resid);
-+ switch (resp->response) {
-+ case VIRTIO_SCSI_S_OK:
-+ set_host_byte(sc, DID_OK);
-+ break;
-+ case VIRTIO_SCSI_S_OVERRUN:
-+ set_host_byte(sc, DID_ERROR);
-+ break;
-+ case VIRTIO_SCSI_S_ABORTED:
-+ set_host_byte(sc, DID_ABORT);
-+ break;
-+ case VIRTIO_SCSI_S_BAD_TARGET:
-+ set_host_byte(sc, DID_BAD_TARGET);
-+ break;
-+ case VIRTIO_SCSI_S_RESET:
-+ set_host_byte(sc, DID_RESET);
-+ break;
-+ case VIRTIO_SCSI_S_BUSY:
-+ set_host_byte(sc, DID_BUS_BUSY);
-+ break;
-+ case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
-+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
-+ break;
-+ case VIRTIO_SCSI_S_TARGET_FAILURE:
-+ set_host_byte(sc, DID_TARGET_FAILURE);
-+ break;
-+ case VIRTIO_SCSI_S_NEXUS_FAILURE:
-+ set_host_byte(sc, DID_NEXUS_FAILURE);
-+ break;
-+ default:
-+ scmd_printk(KERN_WARNING, sc, "Unknown response %d",
-+ resp->response);
-+ /* fall through */
-+ case VIRTIO_SCSI_S_FAILURE:
-+ set_host_byte(sc, DID_ERROR);
-+ break;
-+ }
-+
-+ WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
-+ if (sc->sense_buffer) {
-+ memcpy(sc->sense_buffer, resp->sense,
-+ min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
-+ if (resp->sense_len)
-+ set_driver_byte(sc, DRIVER_SENSE);
-+ }
-+
-+ mempool_free(cmd, virtscsi_cmd_pool);
-+ sc->scsi_done(sc);
-+}
-+
-+static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
-+{
-+ void *buf;
-+ unsigned int len;
-+
-+ do {
-+ virtqueue_disable_cb(vq);
-+ while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
-+ fn(buf);
-+ } while (!virtqueue_enable_cb(vq));
-+}
-+
-+static void virtscsi_req_done(struct virtqueue *vq)
-+{
-+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
-+ virtscsi_vq_done(vq, virtscsi_complete_cmd);
-+ spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
-+};
-+
-+static void virtscsi_complete_free(void *buf)
-+{
-+ struct virtio_scsi_cmd *cmd = buf;
-+
-+ if (cmd->comp)
-+ complete_all(cmd->comp);
-+ else
-+ mempool_free(cmd, virtscsi_cmd_pool);
-+}
-+
-+static void virtscsi_ctrl_done(struct virtqueue *vq)
-+{
-+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
-+ virtscsi_vq_done(vq, virtscsi_complete_free);
-+ spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
-+};
-+
-+static int virtscsi_kick_event(struct virtio_scsi *vscsi,
-+ struct virtio_scsi_event_node *event_node)
-+{
-+ int ret;
-+ struct scatterlist sg;
-+ unsigned long flags;
-+
-+ sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
-+
-+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
-+
-+ ret = virtqueue_add_buf_gfp(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
-+ if (ret >= 0)
-+ virtqueue_kick(vscsi->event_vq.vq);
-+
-+ spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
-+
-+ return ret;
-+}
-+
-+static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
-+{
-+ int i;
-+
-+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
-+ vscsi->event_list[i].vscsi = vscsi;
-+ virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
-+ }
-+
-+ return 0;
-+}
-+
-+static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
-+{
-+ int i;
-+
-+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
-+ cancel_work_sync(&vscsi->event_list[i].work);
-+}
-+
-+static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
-+ struct virtio_scsi_event *event)
-+{
-+ struct scsi_device *sdev;
-+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
-+ unsigned int target = event->lun[1];
-+ unsigned int lun = (event->lun[2] << 8) | event->lun[3];
-+
-+ switch (event->reason) {
-+ case VIRTIO_SCSI_EVT_RESET_RESCAN:
-+ scsi_add_device(shost, 0, target, lun);
-+ break;
-+ case VIRTIO_SCSI_EVT_RESET_REMOVED:
-+ sdev = scsi_device_lookup(shost, 0, target, lun);
-+ if (sdev) {
-+ scsi_remove_device(sdev);
-+ scsi_device_put(sdev);
-+ } else {
-+ pr_err("SCSI device %d 0 %d %d not found\n",
-+ shost->host_no, target, lun);
-+ }
-+ break;
-+ default:
-+ pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
-+ }
-+}
-+
-+static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
-+ struct virtio_scsi_event *event)
-+{
-+ struct scsi_device *sdev;
-+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
-+ unsigned int target = event->lun[1];
-+ unsigned int lun = (event->lun[2] << 8) | event->lun[3];
-+ u8 asc = event->reason & 255;
-+ u8 ascq = event->reason >> 8;
-+
-+ sdev = scsi_device_lookup(shost, 0, target, lun);
-+ if (!sdev) {
-+ pr_err("SCSI device %d 0 %d %d not found\n",
-+ shost->host_no, target, lun);
-+ return;
-+ }
-+
-+ /* Handle "Parameters changed", "Mode parameters changed", and
-+ "Capacity data has changed". */
-+ if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
-+ scsi_rescan_device(&sdev->sdev_gendev);
-+
-+ scsi_device_put(sdev);
-+}
-+
-+static void virtscsi_handle_event(struct work_struct *work)
-+{
-+ struct virtio_scsi_event_node *event_node =
-+ container_of(work, struct virtio_scsi_event_node, work);
-+ struct virtio_scsi *vscsi = event_node->vscsi;
-+ struct virtio_scsi_event *event = &event_node->event;
-+
-+ if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
-+ event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
-+ scsi_scan_host(virtio_scsi_host(vscsi->vdev));
-+ }
-+
-+ switch (event->event) {
-+ case VIRTIO_SCSI_T_NO_EVENT:
-+ break;
-+ case VIRTIO_SCSI_T_TRANSPORT_RESET:
-+ virtscsi_handle_transport_reset(vscsi, event);
-+ break;
-+ case VIRTIO_SCSI_T_PARAM_CHANGE:
-+ virtscsi_handle_param_change(vscsi, event);
-+ break;
-+ default:
-+ pr_err("Unsupport virtio scsi event %x\n", event->event);
-+ }
-+ virtscsi_kick_event(vscsi, event_node);
-+}
-+
-+static void virtscsi_complete_event(void *buf)
-+{
-+ struct virtio_scsi_event_node *event_node = buf;
-+
-+ INIT_WORK(&event_node->work, virtscsi_handle_event);
-+ schedule_work(&event_node->work);
-+}
-+
-+static void virtscsi_event_done(struct virtqueue *vq)
-+{
-+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
-+ virtscsi_vq_done(vq, virtscsi_complete_event);
-+ spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
-+};
-+
-+static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
-+ struct scsi_data_buffer *sdb)
-+{
-+ struct sg_table *table = &sdb->table;
-+ struct scatterlist *sg_elem;
-+ unsigned int idx = *p_idx;
-+ int i;
-+
-+ for_each_sg(table->sgl, sg_elem, table->nents, i)
-+ sg[idx++] = *sg_elem;
-+
-+ *p_idx = idx;
-+}
-+
-+/**
-+ * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
-+ * @vscsi : virtio_scsi state
-+ * @cmd : command structure
-+ * @out_num : number of read-only elements
-+ * @in_num : number of write-only elements
-+ * @req_size : size of the request buffer
-+ * @resp_size : size of the response buffer
-+ *
-+ * Called with tgt_lock held.
-+ */
-+static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
-+ struct virtio_scsi_cmd *cmd,
-+ unsigned *out_num, unsigned *in_num,
-+ size_t req_size, size_t resp_size)
-+{
-+ struct scsi_cmnd *sc = cmd->sc;
-+ struct scatterlist *sg = tgt->sg;
-+ unsigned int idx = 0;
-+
-+ /* Request header. */
-+ sg_set_buf(&sg[idx++], &cmd->req, req_size);
-+
-+ /* Data-out buffer. */
-+ if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
-+ virtscsi_map_sgl(sg, &idx, scsi_out(sc));
-+
-+ *out_num = idx;
-+
-+ /* Response header. */
-+ sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
-+
-+ /* Data-in buffer */
-+ if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
-+ virtscsi_map_sgl(sg, &idx, scsi_in(sc));
-+
-+ *in_num = idx - *out_num;
-+}
-+
-+static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
-+ struct virtio_scsi_vq *vq,
-+ struct virtio_scsi_cmd *cmd,
-+ size_t req_size, size_t resp_size, gfp_t gfp)
-+{
-+ unsigned int out_num, in_num;
-+ unsigned long flags;
-+ int ret;
-+
-+ spin_lock_irqsave(&tgt->tgt_lock, flags);
-+ virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
-+
-+ spin_lock(&vq->vq_lock);
-+ ret = virtqueue_add_buf_gfp(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
-+ spin_unlock(&tgt->tgt_lock);
-+ if (ret >= 0)
-+ ret = virtqueue_kick_prepare(vq->vq);
-+
-+ spin_unlock_irqrestore(&vq->vq_lock, flags);
-+
-+ if (ret > 0)
-+ virtqueue_notify(vq->vq);
-+ return ret;
-+}
-+
-+static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
-+{
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+ struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
-+ struct virtio_scsi_cmd *cmd;
-+ int ret;
-+
-+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
-+ BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
-+
-+ /* TODO: check feature bit and fail if unsupported? */
-+ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
-+
-+ dev_dbg(&sc->device->sdev_gendev,
-+ "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
-+
-+ ret = SCSI_MLQUEUE_HOST_BUSY;
-+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
-+ if (!cmd)
-+ goto out;
-+
-+ memset(cmd, 0, sizeof(*cmd));
-+ cmd->sc = sc;
-+ cmd->req.cmd = (struct virtio_scsi_cmd_req){
-+ .lun[0] = 1,
-+ .lun[1] = sc->device->id,
-+ .lun[2] = (sc->device->lun >> 8) | 0x40,
-+ .lun[3] = sc->device->lun & 0xff,
-+ .tag = (unsigned long)sc,
-+ .task_attr = VIRTIO_SCSI_S_SIMPLE,
-+ .prio = 0,
-+ .crn = 0,
-+ };
-+
-+ BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
-+ memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
-+
-+ if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
-+ sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
-+ GFP_ATOMIC) >= 0)
-+ ret = 0;
-+ else
-+ mempool_free(cmd, virtscsi_cmd_pool);
-+
-+out:
-+ return ret;
-+}
-+
-+static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
-+{
-+ DECLARE_COMPLETION_ONSTACK(comp);
-+ struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
-+ int ret = FAILED;
-+
-+ cmd->comp = &comp;
-+ if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
-+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
-+ GFP_NOIO) < 0)
-+ goto out;
-+
-+ wait_for_completion(&comp);
-+ if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
-+ cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
-+ ret = SUCCESS;
-+
-+out:
-+ mempool_free(cmd, virtscsi_cmd_pool);
-+ return ret;
-+}
-+
-+static int virtscsi_device_reset(struct scsi_cmnd *sc)
-+{
-+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
-+ struct virtio_scsi_cmd *cmd;
-+
-+ sdev_printk(KERN_INFO, sc->device, "device reset\n");
-+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
-+ if (!cmd)
-+ return FAILED;
-+
-+ memset(cmd, 0, sizeof(*cmd));
-+ cmd->sc = sc;
-+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
-+ .type = VIRTIO_SCSI_T_TMF,
-+ .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
-+ .lun[0] = 1,
-+ .lun[1] = sc->device->id,
-+ .lun[2] = (sc->device->lun >> 8) | 0x40,
-+ .lun[3] = sc->device->lun & 0xff,
-+ };
-+ return virtscsi_tmf(vscsi, cmd);
-+}
-+
-+static int virtscsi_abort(struct scsi_cmnd *sc)
-+{
-+ struct virtio_scsi *vscsi = shost_priv(sc->device->host);
-+ struct virtio_scsi_cmd *cmd;
-+
-+ scmd_printk(KERN_INFO, sc, "abort\n");
-+ cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
-+ if (!cmd)
-+ return FAILED;
-+
-+ memset(cmd, 0, sizeof(*cmd));
-+ cmd->sc = sc;
-+ cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
-+ .type = VIRTIO_SCSI_T_TMF,
-+ .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
-+ .lun[0] = 1,
-+ .lun[1] = sc->device->id,
-+ .lun[2] = (sc->device->lun >> 8) | 0x40,
-+ .lun[3] = sc->device->lun & 0xff,
-+ .tag = (unsigned long)sc,
-+ };
-+ return virtscsi_tmf(vscsi, cmd);
-+}
-+
-+static struct scsi_host_template virtscsi_host_template = {
-+ .module = THIS_MODULE,
-+ .name = "Virtio SCSI HBA",
-+ .proc_name = "virtio_scsi",
-+ .queuecommand = virtscsi_queuecommand,
-+ .this_id = -1,
-+ .eh_abort_handler = virtscsi_abort,
-+ .eh_device_reset_handler = virtscsi_device_reset,
-+
-+ .can_queue = 1024,
-+ .dma_boundary = UINT_MAX,
-+ .use_clustering = ENABLE_CLUSTERING,
-+};
-+
-+#define virtscsi_config_get(vdev, fld) \
-+ ({ \
-+ typeof(((struct virtio_scsi_config *)0)->fld) __val; \
-+ vdev->config->get(vdev, \
-+ offsetof(struct virtio_scsi_config, fld), \
-+ &__val, sizeof(__val)); \
-+ __val; \
-+ })
-+
-+#define virtscsi_config_set(vdev, fld, val) \
-+ (void)({ \
-+ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
-+ vdev->config->set(vdev, \
-+ offsetof(struct virtio_scsi_config, fld), \
-+ &__val, sizeof(__val)); \
-+ })
-+
-+static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
-+ struct virtqueue *vq)
-+{
-+ spin_lock_init(&virtscsi_vq->vq_lock);
-+ virtscsi_vq->vq = vq;
-+}
-+
-+static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
-+ struct virtio_device *vdev, int sg_elems)
-+{
-+ struct virtio_scsi_target_state *tgt;
-+ gfp_t gfp_mask = GFP_KERNEL;
-+
-+ /* We need extra sg elements at head and tail. */
-+ tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
-+ gfp_mask);
-+
-+ if (!tgt)
-+ return NULL;
-+
-+ spin_lock_init(&tgt->tgt_lock);
-+ sg_init_table(tgt->sg, sg_elems + 2);
-+ return tgt;
-+}
-+
-+static void virtscsi_scan(struct virtio_device *vdev)
-+{
-+ struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
-+
-+ scsi_scan_host(shost);
-+}
-+
-+static void virtscsi_remove_vqs(struct virtio_device *vdev)
-+{
-+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+ u32 i, num_targets;
-+
-+ /* Stop all the virtqueues. */
-+ vdev->config->reset(vdev);
-+
-+ num_targets = sh->max_id;
-+ for (i = 0; i < num_targets; i++) {
-+ kfree(vscsi->tgt[i]);
-+ vscsi->tgt[i] = NULL;
-+ }
-+
-+ vdev->config->del_vqs(vdev);
-+}
-+
-+static int virtscsi_init(struct virtio_device *vdev,
-+ struct virtio_scsi *vscsi, int num_targets)
-+{
-+ int err;
-+ struct virtqueue *vqs[3];
-+ u32 i, sg_elems;
-+
-+ vq_callback_t *callbacks[] = {
-+ virtscsi_ctrl_done,
-+ virtscsi_event_done,
-+ virtscsi_req_done
-+ };
-+ const char *names[] = {
-+ "control",
-+ "event",
-+ "request"
-+ };
-+
-+ /* Discover virtqueues and write information to configuration. */
-+ err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
-+ if (err)
-+ return err;
-+
-+ virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
-+ virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
-+ virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
-+
-+ virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
-+ virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
-+
-+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
-+ virtscsi_kick_event_all(vscsi);
-+
-+ /* We need to know how many segments before we allocate. */
-+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
-+
-+ for (i = 0; i < num_targets; i++) {
-+ vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
-+ if (!vscsi->tgt[i]) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ }
-+ err = 0;
-+
-+out:
-+ if (err)
-+ virtscsi_remove_vqs(vdev);
-+ return err;
-+}
-+
-+static int __devinit virtscsi_probe(struct virtio_device *vdev)
-+{
-+ struct Scsi_Host *shost;
-+ struct virtio_scsi *vscsi;
-+ int err;
-+ u32 sg_elems, num_targets;
-+ u32 cmd_per_lun;
-+
-+ /* Allocate memory and link the structs together. */
-+ num_targets = virtscsi_config_get(vdev, max_target) + 1;
-+ shost = scsi_host_alloc(&virtscsi_host_template,
-+ sizeof(*vscsi)
-+ + num_targets * sizeof(struct virtio_scsi_target_state));
-+
-+ if (!shost)
-+ return -ENOMEM;
-+
-+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
-+ shost->sg_tablesize = sg_elems;
-+ vscsi = shost_priv(shost);
-+ vscsi->vdev = vdev;
-+ vdev->priv = shost;
-+
-+ err = virtscsi_init(vdev, vscsi, num_targets);
-+ if (err)
-+ goto virtscsi_init_failed;
-+
-+ cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
-+ shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
-+ shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
-+
-+ /* LUNs > 256 are reported with format 1, so they go in the range
-+ * 16640-32767.
-+ */
-+ shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
-+ shost->max_id = num_targets;
-+ shost->max_channel = 0;
-+ shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
-+ err = scsi_add_host(shost, &vdev->dev);
-+ if (err)
-+ goto scsi_add_host_failed;
-+ /*
-+ * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
-+ * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
-+ */
-+ return 0;
-+
-+scsi_add_host_failed:
-+ vdev->config->del_vqs(vdev);
-+virtscsi_init_failed:
-+ scsi_host_put(shost);
-+ return err;
-+}
-+
-+static void __devexit virtscsi_remove(struct virtio_device *vdev)
-+{
-+ struct Scsi_Host *shost = virtio_scsi_host(vdev);
-+ struct virtio_scsi *vscsi = shost_priv(shost);
-+
-+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
-+ virtscsi_cancel_event_work(vscsi);
-+
-+ scsi_remove_host(shost);
-+
-+ virtscsi_remove_vqs(vdev);
-+ scsi_host_put(shost);
-+}
-+
-+#if 0
-+static int virtscsi_freeze(struct virtio_device *vdev)
-+{
-+ virtscsi_remove_vqs(vdev);
-+ return 0;
-+}
-+
-+static int virtscsi_restore(struct virtio_device *vdev)
-+{
-+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
-+ struct virtio_scsi *vscsi = shost_priv(sh);
-+
-+ return virtscsi_init(vdev, vscsi, sh->max_id);
-+}
-+#endif
-+
-+static struct virtio_device_id id_table[] = {
-+ { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
-+ { 0 },
-+};
-+
-+static unsigned int features[] = {
-+ VIRTIO_SCSI_F_HOTPLUG,
-+ VIRTIO_SCSI_F_CHANGE,
-+};
-+
-+static struct virtio_driver virtio_scsi_driver = {
-+ .feature_table = features,
-+ .feature_table_size = ARRAY_SIZE(features),
-+ .driver.name = KBUILD_MODNAME,
-+ .driver.owner = THIS_MODULE,
-+ .id_table = id_table,
-+ .probe = virtscsi_probe,
-+ .scan = virtscsi_scan,
-+#if 0
-+ .freeze = virtscsi_freeze,
-+ .restore = virtscsi_restore,
-+#endif
-+ .remove = __devexit_p(virtscsi_remove),
-+};
-+
-+static int __init init(void)
-+{
-+ int ret = -ENOMEM;
-+
-+ virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
-+ if (!virtscsi_cmd_cache) {
-+ printk(KERN_ERR "kmem_cache_create() for "
-+ "virtscsi_cmd_cache failed\n");
-+ goto error;
-+ }
-+
-+
-+ virtscsi_cmd_pool =
-+ mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
-+ virtscsi_cmd_cache);
-+ if (!virtscsi_cmd_pool) {
-+ printk(KERN_ERR "mempool_create() for"
-+ "virtscsi_cmd_pool failed\n");
-+ goto error;
-+ }
-+ ret = register_virtio_driver(&virtio_scsi_driver);
-+ if (ret < 0)
-+ goto error;
-+
-+ return 0;
-+
-+error:
-+ if (virtscsi_cmd_pool) {
-+ mempool_destroy(virtscsi_cmd_pool);
-+ virtscsi_cmd_pool = NULL;
-+ }
-+ if (virtscsi_cmd_cache) {
-+ kmem_cache_destroy(virtscsi_cmd_cache);
-+ virtscsi_cmd_cache = NULL;
-+ }
-+ return ret;
-+}
-+
-+static void __exit fini(void)
-+{
-+ unregister_virtio_driver(&virtio_scsi_driver);
-+ mempool_destroy(virtscsi_cmd_pool);
-+ kmem_cache_destroy(virtscsi_cmd_cache);
-+}
-+module_init(init);
-+module_exit(fini);
-+
-+MODULE_DEVICE_TABLE(virtio, id_table);
-+MODULE_DESCRIPTION("Virtio SCSI HBA driver");
-+MODULE_LICENSE("GPL");
-diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
-index f64250e..1ee3049 100644
---- a/drivers/spi/spi-dw-pci.c
-+++ b/drivers/spi/spi-dw-pci.c
-@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
- #define spi_resume NULL
- #endif
-
--static const struct pci_device_id pci_ids[] __devinitdata = {
-+static const struct pci_device_id pci_ids[] __devinitconst = {
- /* Intel MID platform SPI controller 0 */
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
- {},
-diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
-index b2ccdea..84cde75 100644
---- a/drivers/spi/spi.c
-+++ b/drivers/spi/spi.c
-@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
- EXPORT_SYMBOL_GPL(spi_bus_unlock);
-
- /* portable code must never pass more than 32 bytes */
--#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
-+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
-
- static u8 *buf;
-
-diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
-index 436fe97..4082570 100644
---- a/drivers/staging/gma500/power.c
-+++ b/drivers/staging/gma500/power.c
-@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
- ret = gma_resume_pci(dev->pdev);
- if (ret == 0) {
- /* FIXME: we want to defer this for Medfield/Oaktrail */
-- gma_resume_display(dev);
-+ gma_resume_display(dev->pdev);
- psb_irq_preinstall(dev);
- psb_irq_postinstall(dev);
- pm_runtime_get(&dev->pdev->dev);
-diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
-index bafccb3..e3ac78d 100644
---- a/drivers/staging/hv/rndis_filter.c
-+++ b/drivers/staging/hv/rndis_filter.c
-@@ -42,7 +42,7 @@ struct rndis_device {
-
- enum rndis_device_state state;
- bool link_state;
-- atomic_t new_req_id;
-+ atomic_unchecked_t new_req_id;
-
- spinlock_t request_lock;
- struct list_head req_list;
-@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
- * template
- */
- set = &rndis_msg->msg.set_req;
-- set->req_id = atomic_inc_return(&dev->new_req_id);
-+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
-
- /* Add to the request list */
- spin_lock_irqsave(&dev->request_lock, flags);
-@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
-
- /* Setup the rndis set */
- halt = &request->request_msg.msg.halt_req;
-- halt->req_id = atomic_inc_return(&dev->new_req_id);
-+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
-
- /* Ignore return since this msg is optional. */
- rndis_filter_send_request(dev, request);
-diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
-index 7237a9a..2f83d9d 100644
---- a/drivers/staging/iio/accel/lis3l02dq.h
-+++ b/drivers/staging/iio/accel/lis3l02dq.h
-@@ -158,6 +158,7 @@ struct lis3l02dq_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- struct mutex buf_lock;
-+ int gpio;
- bool trigger_on;
-
- u8 tx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
-diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
-index 559545a..92d93d1 100644
---- a/drivers/staging/iio/accel/lis3l02dq_core.c
-+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
-@@ -15,6 +15,7 @@
- #include <linux/interrupt.h>
- #include <linux/irq.h>
- #include <linux/gpio.h>
-+#include <linux/of_gpio.h>
- #include <linux/mutex.h>
- #include <linux/device.h>
- #include <linux/kernel.h>
-@@ -678,6 +679,7 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
- spi_set_drvdata(spi, indio_dev);
-
- st->us = spi;
-+ st->gpio = of_get_gpio(spi->dev.of_node, 0);
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
-@@ -699,7 +701,7 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
- goto error_unreg_buffer_funcs;
- }
-
-- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
-+ if (spi->irq) {
- ret = request_threaded_irq(st->us->irq,
- &lis3l02dq_th,
- &lis3l02dq_event_handler,
-@@ -729,7 +731,7 @@ error_remove_trigger:
- if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
- lis3l02dq_remove_trigger(indio_dev);
- error_free_interrupt:
-- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
-+ if (spi->irq)
- free_irq(st->us->irq, indio_dev);
- error_uninitialize_buffer:
- iio_buffer_unregister(indio_dev);
-@@ -784,7 +786,7 @@ static int lis3l02dq_remove(struct spi_device *spi)
- if (ret)
- goto err_ret;
-
-- if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
-+ if (spi->irq)
- free_irq(st->us->irq, indio_dev);
-
- lis3l02dq_remove_trigger(indio_dev);
-diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
-index 89527af..70d34fc 100644
---- a/drivers/staging/iio/accel/lis3l02dq_ring.c
-+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
-@@ -292,7 +292,7 @@ static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
- /* If gpio still high (or high again) */
- /* In theory possible we will need to do this several times */
- for (i = 0; i < 5; i++)
-- if (gpio_get_value(irq_to_gpio(st->us->irq)))
-+ if (gpio_get_value(st->gpio))
- lis3l02dq_read_all(indio_dev, NULL);
- else
- break;
-diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
-index 9e8f010..af9efb56 100644
---- a/drivers/staging/iio/buffer_generic.h
-+++ b/drivers/staging/iio/buffer_generic.h
-@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
-
- int (*is_enabled)(struct iio_buffer *buffer);
- int (*enable)(struct iio_buffer *buffer);
--};
-+} __no_const;
-
- /**
- * struct iio_buffer_setup_ops - buffer setup related callbacks
-diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
-index 72d0f3f..ba3ff3c 100644
---- a/drivers/staging/iio/dac/ad5360.c
-+++ b/drivers/staging/iio/dac/ad5360.c
-@@ -439,8 +439,8 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
- struct iio_chan_spec *channels;
- unsigned int i;
-
-- channels = kcalloc(sizeof(struct iio_chan_spec),
-- st->chip_info->num_channels, GFP_KERNEL);
-+ channels = kcalloc(st->chip_info->num_channels,
-+ sizeof(struct iio_chan_spec), GFP_KERNEL);
-
- if (!channels)
- return -ENOMEM;
-diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
-index f0abf29..7f8ccd26 100644
---- a/drivers/staging/iio/industrialio-core.c
-+++ b/drivers/staging/iio/industrialio-core.c
-@@ -398,7 +398,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
- }
-
- static
--int __iio_device_attr_init(struct device_attribute *dev_attr,
-+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
- const char *postfix,
- struct iio_chan_spec const *chan,
- ssize_t (*readfunc)(struct device *dev,
-diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
-index 66a34ad..65f6aea 100644
---- a/drivers/staging/iio/ring_sw.c
-+++ b/drivers/staging/iio/ring_sw.c
-@@ -173,7 +173,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
-
- u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
- u8 *data;
-- int ret, max_copied, bytes_to_rip, dead_offset;
-+ long ret, max_copied, bytes_to_rip, dead_offset;
-
- /* A userspace program has probably made an error if it tries to
- * read something that is not a whole number of bpds.
-diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
-index 851b762..c09c498 100644
---- a/drivers/staging/line6/driver.c
-+++ b/drivers/staging/line6/driver.c
-@@ -551,7 +551,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
- {
- struct usb_device *usbdev = line6->usbdev;
- int ret;
-- unsigned char len;
-+ unsigned char *plen;
-
- /* query the serial number: */
- ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
-@@ -564,27 +564,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
- return ret;
- }
-
-+ plen = kmalloc(1, GFP_KERNEL);
-+ if (plen == NULL)
-+ return -ENOMEM;
-+
- /* Wait for data length. We'll get a couple of 0xff until length arrives. */
- do {
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_IN,
-- 0x0012, 0x0000, &len, 1,
-+ 0x0012, 0x0000, plen, 1,
- LINE6_TIMEOUT * HZ);
- if (ret < 0) {
- dev_err(line6->ifcdev,
- "receive length failed (error %d)\n", ret);
-+ kfree(plen);
- return ret;
- }
-- } while (len == 0xff);
-+ } while (*plen == 0xff);
-
-- if (len != datalen) {
-+ if (*plen != datalen) {
- /* should be equal or something went wrong */
- dev_err(line6->ifcdev,
- "length mismatch (expected %d, got %d)\n",
-- (int)datalen, (int)len);
-+ (int)datalen, (int)*plen);
-+ kfree(plen);
- return -EINVAL;
- }
-+ kfree(plen);
-
- /* receive the result: */
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
-@@ -608,7 +615,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
- {
- struct usb_device *usbdev = line6->usbdev;
- int ret;
-- unsigned char status;
-+ unsigned char *status;
-
- ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
-@@ -621,26 +628,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
- return ret;
- }
-
-+ status = kmalloc(1, GFP_KERNEL);
-+ if (status == NULL)
-+ return -ENOMEM;
-+
- do {
- ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
- 0x67,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_IN,
- 0x0012, 0x0000,
-- &status, 1, LINE6_TIMEOUT * HZ);
-+ status, 1, LINE6_TIMEOUT * HZ);
-
- if (ret < 0) {
- dev_err(line6->ifcdev,
- "receiving status failed (error %d)\n", ret);
-+ kfree(status);
- return ret;
- }
-- } while (status == 0xff);
-+ } while (*status == 0xff);
-
-- if (status != 0) {
-+ if (*status != 0) {
- dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
-+ kfree(status);
- return -EINVAL;
- }
-
-+ kfree(status);
-+
- return 0;
- }
-
-diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
-index 879e699..0d53b97 100644
---- a/drivers/staging/line6/toneport.c
-+++ b/drivers/staging/line6/toneport.c
-@@ -11,6 +11,7 @@
- */
-
- #include <linux/wait.h>
-+#include <linux/slab.h>
- #include <sound/control.h>
-
- #include "audio.h"
-@@ -310,13 +311,19 @@ static void toneport_destruct(struct usb_interface *interface)
- */
- static void toneport_setup(struct usb_line6_toneport *toneport)
- {
-- int ticks;
-+ int *ticks;
- struct usb_line6 *line6 = &toneport->line6;
- struct usb_device *usbdev = line6->usbdev;
-
-+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
-+ if (ticks == NULL)
-+ return;
-+
- /* sync time on device with host: */
-- ticks = (int)get_seconds();
-- line6_write_data(line6, 0x80c6, &ticks, 4);
-+ *ticks = (int)get_seconds();
-+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
-+
-+ kfree(ticks);
-
- /* enable device: */
- toneport_send_cmd(usbdev, 0x0301, 0x0000);
-diff --git a/drivers/staging/media/solo6x10/g723.c b/drivers/staging/media/solo6x10/g723.c
-index 2cd0de2..0169c04 100644
---- a/drivers/staging/media/solo6x10/g723.c
-+++ b/drivers/staging/media/solo6x10/g723.c
-@@ -336,7 +336,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
-
- int solo_g723_init(struct solo_dev *solo_dev)
- {
-- static struct snd_device_ops ops = { NULL };
-+ static struct snd_device_ops ops = { };
- struct snd_card *card;
- struct snd_kcontrol_new kctl;
- char name[32];
-diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
-index 8b307b4..f999246 100644
---- a/drivers/staging/octeon/ethernet-rx.c
-+++ b/drivers/staging/octeon/ethernet-rx.c
-@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
- /* Increment RX stats for virtual ports */
- if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
- #ifdef CONFIG_64BIT
-- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
-- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
-+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
-+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
- #else
-- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
-- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
-+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
-+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
- #endif
- }
- netif_receive_skb(skb);
-@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
- dev->name);
- */
- #ifdef CONFIG_64BIT
-- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
-+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
- #else
-- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
-+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
- #endif
- dev_kfree_skb_irq(skb);
- }
-diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
-index 076f866..2308070 100644
---- a/drivers/staging/octeon/ethernet.c
-+++ b/drivers/staging/octeon/ethernet.c
-@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
- * since the RX tasklet also increments it.
- */
- #ifdef CONFIG_64BIT
-- atomic64_add(rx_status.dropped_packets,
-- (atomic64_t *)&priv->stats.rx_dropped);
-+ atomic64_add_unchecked(rx_status.dropped_packets,
-+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
- #else
-- atomic_add(rx_status.dropped_packets,
-- (atomic_t *)&priv->stats.rx_dropped);
-+ atomic_add_unchecked(rx_status.dropped_packets,
-+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
- #endif
- }
-
-diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
-index 7a19555..466456d 100644
---- a/drivers/staging/pohmelfs/inode.c
-+++ b/drivers/staging/pohmelfs/inode.c
-@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
- mutex_init(&psb->mcache_lock);
- psb->mcache_root = RB_ROOT;
- psb->mcache_timeout = msecs_to_jiffies(5000);
-- atomic_long_set(&psb->mcache_gen, 0);
-+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
-
- psb->trans_max_pages = 100;
-
-@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
- INIT_LIST_HEAD(&psb->crypto_ready_list);
- INIT_LIST_HEAD(&psb->crypto_active_list);
-
-- atomic_set(&psb->trans_gen, 1);
-+ atomic_set_unchecked(&psb->trans_gen, 1);
- atomic_long_set(&psb->total_inodes, 0);
-
- mutex_init(&psb->state_lock);
-diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
-index e22665c..a2a9390 100644
---- a/drivers/staging/pohmelfs/mcache.c
-+++ b/drivers/staging/pohmelfs/mcache.c
-@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
- m->data = data;
- m->start = start;
- m->size = size;
-- m->gen = atomic_long_inc_return(&psb->mcache_gen);
-+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
-
- mutex_lock(&psb->mcache_lock);
- err = pohmelfs_mcache_insert(psb, m);
-diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
-index 985b6b7..7699e05 100644
---- a/drivers/staging/pohmelfs/netfs.h
-+++ b/drivers/staging/pohmelfs/netfs.h
-@@ -571,14 +571,14 @@ struct pohmelfs_config;
- struct pohmelfs_sb {
- struct rb_root mcache_root;
- struct mutex mcache_lock;
-- atomic_long_t mcache_gen;
-+ atomic_long_unchecked_t mcache_gen;
- unsigned long mcache_timeout;
-
- unsigned int idx;
-
- unsigned int trans_retries;
-
-- atomic_t trans_gen;
-+ atomic_unchecked_t trans_gen;
-
- unsigned int crypto_attached_size;
- unsigned int crypto_align_size;
-diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
-index 06c1a74..866eebc 100644
---- a/drivers/staging/pohmelfs/trans.c
-+++ b/drivers/staging/pohmelfs/trans.c
-@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
- int err;
- struct netfs_cmd *cmd = t->iovec.iov_base;
-
-- t->gen = atomic_inc_return(&psb->trans_gen);
-+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
-
- cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
- t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
-diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
-index 86308a0..feaa925 100644
---- a/drivers/staging/rtl8712/rtl871x_io.h
-+++ b/drivers/staging/rtl8712/rtl871x_io.h
-@@ -108,7 +108,7 @@ struct _io_ops {
- u8 *pmem);
- u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
--};
-+} __no_const;
-
- struct io_req {
- struct list_head list;
-diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
-index e11467a..c9dac26 100644
---- a/drivers/staging/rts5139/rts51x_transport.c
-+++ b/drivers/staging/rts5139/rts51x_transport.c
-@@ -337,11 +337,18 @@ int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
- void *data, u16 size, int timeout)
- {
- struct rts51x_usb *rts51x = chip->usb;
-+ void *buf = kmalloc(size, GFP_KERNEL);
- int result;
-+ int ret;
-+
-+ if (buf == NULL)
-+ TRACE_RET(chip, STATUS_ERROR);
-
- RTS51X_DEBUGP("%s: rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
- __func__, request, requesttype, value, index, size);
-
-+ memcpy(buf, data, size);
-+
- /* fill in the devrequest structure */
- rts51x->cr->bRequestType = requesttype;
- rts51x->cr->bRequest = request;
-@@ -351,12 +358,17 @@ int rts51x_ctrl_transfer(struct rts51x_chip *chip, unsigned int pipe,
-
- /* fill and submit the URB */
- usb_fill_control_urb(rts51x->current_urb, rts51x->pusb_dev, pipe,
-- (unsigned char *)rts51x->cr, data, size,
-+ (unsigned char *)rts51x->cr, buf, size,
- urb_done_completion, NULL);
- result = rts51x_msg_common(chip, rts51x->current_urb, timeout);
-
-- return interpret_urb_result(chip, pipe, size, result,
-+ ret = interpret_urb_result(chip, pipe, size, result,
- rts51x->current_urb->actual_length);
-+ memcpy(data, buf, size);
-+
-+ kfree(buf);
-+
-+ return ret;
- }
-
- int rts51x_clear_halt(struct rts51x_chip *chip, unsigned int pipe)
-@@ -794,17 +806,30 @@ int rts51x_bulk_transfer_buf(struct rts51x_chip *chip, unsigned int pipe,
- unsigned int *act_len, int timeout)
- {
- int result;
-+ int ret;
-+ void *newbuf = kmalloc(length, GFP_KERNEL);
-+
-+ if (newbuf == NULL)
-+ TRACE_RET(chip, STATUS_ERROR);
-+
-+ memcpy(newbuf, buf, length);
-
- /* fill and submit the URB */
- usb_fill_bulk_urb(chip->usb->current_urb, chip->usb->pusb_dev, pipe,
-- buf, length, urb_done_completion, NULL);
-+ newbuf, length, urb_done_completion, NULL);
- result = rts51x_msg_common(chip, chip->usb->current_urb, timeout);
-
- /* store the actual length of the data transferred */
- if (act_len)
- *act_len = chip->usb->current_urb->actual_length;
-- return interpret_urb_result(chip, pipe, length, result,
-+ ret = interpret_urb_result(chip, pipe, length, result,
- chip->usb->current_urb->actual_length);
-+
-+ memcpy(buf, newbuf, length);
-+
-+ kfree(newbuf);
-+
-+ return ret;
- }
-
- int rts51x_transfer_data(struct rts51x_chip *chip, unsigned int pipe,
-@@ -888,11 +913,19 @@ int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
- unsigned int pipe = RCV_INTR_PIPE(chip);
- struct usb_host_endpoint *ep;
- struct completion urb_done;
-+ u16 *buf_status;
- int result;
-+ int ret;
-
- if (!status)
- TRACE_RET(chip, STATUS_ERROR);
-
-+ buf_status = kmalloc(sizeof(*status), GFP_KERNEL);
-+ if (buf_status == NULL)
-+ TRACE_RET(chip, STATUS_ERROR);
-+
-+ *buf_status = *status;
-+
- /* set up data structures for the wakeup system */
- init_completion(&urb_done);
-
-@@ -902,12 +935,17 @@ int rts51x_get_epc_status(struct rts51x_chip *chip, u16 * status)
- /* We set interval to 1 here, so the polling interval is controlled
- * by our polling thread */
- usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe,
-- status, 2, urb_done_completion, &urb_done, 1);
-+ buf_status, 2, urb_done_completion, &urb_done, 1);
-
- result = rts51x_msg_common(chip, chip->usb->intr_urb, 50);
-
-- return interpret_urb_result(chip, pipe, 2, result,
-+ ret = interpret_urb_result(chip, pipe, 2, result,
- chip->usb->intr_urb->actual_length);
-+ *status = *buf_status;
-+
-+ kfree(buf_status);
-+
-+ return ret;
- }
-
- u8 media_not_present[] = {
-diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
-index c7b5e8b..783d6cbe 100644
---- a/drivers/staging/sbe-2t3e3/netdev.c
-+++ b/drivers/staging/sbe-2t3e3/netdev.c
-@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
-
- if (rlen)
-- if (copy_to_user(data, &resp, rlen))
-+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
- return -EFAULT;
-
- return 0;
-diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
-index 88b3298..3783eee 100644
---- a/drivers/staging/usbip/vhci.h
-+++ b/drivers/staging/usbip/vhci.h
-@@ -88,7 +88,7 @@ struct vhci_hcd {
- unsigned resuming:1;
- unsigned long re_timeout;
-
-- atomic_t seqnum;
-+ atomic_unchecked_t seqnum;
-
- /*
- * NOTE:
-diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
-index 2ee97e2..0420b86 100644
---- a/drivers/staging/usbip/vhci_hcd.c
-+++ b/drivers/staging/usbip/vhci_hcd.c
-@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
- return;
- }
-
-- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
-+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
- if (priv->seqnum == 0xffff)
- dev_info(&urb->dev->dev, "seqnum max\n");
-
-@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
- return -ENOMEM;
- }
-
-- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
-+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
- if (unlink->seqnum == 0xffff)
- pr_info("seqnum max\n");
-
-@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
- vdev->rhport = rhport;
- }
-
-- atomic_set(&vhci->seqnum, 0);
-+ atomic_set_unchecked(&vhci->seqnum, 0);
- spin_lock_init(&vhci->lock);
-
- hcd->power_budget = 0; /* no limit */
-diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
-index 1a7afaa..e7dafbb 100644
---- a/drivers/staging/usbip/vhci_rx.c
-+++ b/drivers/staging/usbip/vhci_rx.c
-@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
- if (!urb) {
- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
- pr_info("max seqnum %d\n",
-- atomic_read(&the_controller->seqnum));
-+ atomic_read_unchecked(&the_controller->seqnum));
- usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
- return;
- }
-diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
-index 7735027..30eed13 100644
---- a/drivers/staging/vt6655/hostap.c
-+++ b/drivers/staging/vt6655/hostap.c
-@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
- *
- */
-
-+static net_device_ops_no_const apdev_netdev_ops;
-+
- static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
- {
- PSDevice apdev_priv;
- struct net_device *dev = pDevice->dev;
- int ret;
-- const struct net_device_ops apdev_netdev_ops = {
-- .ndo_start_xmit = pDevice->tx_80211,
-- };
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
-
-@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
- *apdev_priv = *pDevice;
- memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
-
-+ /* only half broken now */
-+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
- pDevice->apdev->netdev_ops = &apdev_netdev_ops;
-
- pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
-index df8ea25..47dd9c6 100644
---- a/drivers/staging/vt6656/hostap.c
-+++ b/drivers/staging/vt6656/hostap.c
-@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
- *
- */
-
-+static net_device_ops_no_const apdev_netdev_ops;
-+
- static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
- {
- PSDevice apdev_priv;
- struct net_device *dev = pDevice->dev;
- int ret;
-- const struct net_device_ops apdev_netdev_ops = {
-- .ndo_start_xmit = pDevice->tx_80211,
-- };
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
-
-@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
- *apdev_priv = *pDevice;
- memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
-
-+ /* only half broken now */
-+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
- pDevice->apdev->netdev_ops = &apdev_netdev_ops;
-
- pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
-index 1ca66ea..76f1343 100644
---- a/drivers/staging/zcache/tmem.c
-+++ b/drivers/staging/zcache/tmem.c
-@@ -39,7 +39,7 @@
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
--static struct tmem_hostops tmem_hostops;
-+static tmem_hostops_no_const tmem_hostops;
-
- static void tmem_objnode_tree_init(void);
-
-@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation
- */
--static struct tmem_pamops tmem_pamops;
-+static tmem_pamops_no_const tmem_pamops;
-
- void tmem_register_pamops(struct tmem_pamops *m)
- {
-diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
-index ed147c4..94fc3c6 100644
---- a/drivers/staging/zcache/tmem.h
-+++ b/drivers/staging/zcache/tmem.h
-@@ -180,6 +180,7 @@ struct tmem_pamops {
- void (*new_obj)(struct tmem_obj *);
- int (*replace_in_obj)(void *, struct tmem_obj *);
- };
-+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
- extern void tmem_register_pamops(struct tmem_pamops *m);
-
- /* memory allocation methods provided by the host implementation */
-@@ -189,6 +190,7 @@ struct tmem_hostops {
- struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
- void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
- };
-+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
- extern void tmem_register_hostops(struct tmem_hostops *m);
-
- /* core tmem accessor functions */
-diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
-index ae4e7da..46264ce 100644
---- a/drivers/target/iscsi/iscsi_target.c
-+++ b/drivers/target/iscsi/iscsi_target.c
-@@ -1357,7 +1357,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
- * outstanding_r2ts reaches zero, go ahead and send the delayed
- * TASK_ABORTED status.
- */
-- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
-+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
- if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
- if (--cmd->outstanding_r2ts < 1) {
- iscsit_stop_dataout_timer(cmd);
-diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
-index 6845228..df77141 100644
---- a/drivers/target/target_core_tmr.c
-+++ b/drivers/target/target_core_tmr.c
-@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
- cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
- cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
-- atomic_read(&cmd->t_task_cdbs_sent),
-+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
- atomic_read(&cmd->t_transport_active),
- atomic_read(&cmd->t_transport_stop),
- atomic_read(&cmd->t_transport_sent));
-@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
- pr_debug("LUN_RESET: got t_transport_active = 1 for"
- " task: %p, t_fe_count: %d dev: %p\n", task,
- fe_count, dev);
-- atomic_set(&cmd->t_transport_aborted, 1);
-+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
-@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
- }
- pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
- " t_fe_count: %d dev: %p\n", task, fe_count, dev);
-- atomic_set(&cmd->t_transport_aborted, 1);
-+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
-diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index be1218f..3481d56 100644
---- a/drivers/target/target_core_transport.c
-+++ b/drivers/target/target_core_transport.c
-@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
-
- dev->queue_depth = dev_limits->queue_depth;
- atomic_set(&dev->depth_left, dev->queue_depth);
-- atomic_set(&dev->dev_ordered_id, 0);
-+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
-
- se_dev_set_default_attribs(dev, dev_limits);
-
-@@ -1531,7 +1531,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
- * Used to determine when ORDERED commands should go from
- * Dormant to Active status.
- */
-- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
-+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
- smp_mb__after_atomic_inc();
- pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
- cmd->se_ordered_id, cmd->sam_task_attr,
-@@ -1801,7 +1801,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
- " t_transport_active: %d t_transport_stop: %d"
- " t_transport_sent: %d\n", cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
-- atomic_read(&cmd->t_task_cdbs_sent),
-+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
- atomic_read(&cmd->t_task_cdbs_ex_left),
- atomic_read(&cmd->t_transport_active),
- atomic_read(&cmd->t_transport_stop),
-@@ -2091,9 +2091,9 @@ check_depth:
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- task->task_flags |= (TF_ACTIVE | TF_SENT);
-- atomic_inc(&cmd->t_task_cdbs_sent);
-+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
-
-- if (atomic_read(&cmd->t_task_cdbs_sent) ==
-+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
- cmd->t_task_list_num)
- atomic_set(&cmd->t_transport_sent, 1);
-
-@@ -4303,7 +4303,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
- atomic_set(&cmd->transport_lun_stop, 0);
- }
- if (!atomic_read(&cmd->t_transport_active) ||
-- atomic_read(&cmd->t_transport_aborted)) {
-+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-@@ -4561,7 +4561,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
- {
- int ret = 0;
-
-- if (atomic_read(&cmd->t_transport_aborted) != 0) {
-+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
- if (!send_status ||
- (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
- return 1;
-@@ -4598,7 +4598,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-- atomic_inc(&cmd->t_transport_aborted);
-+ atomic_inc_unchecked(&cmd->t_transport_aborted);
- smp_mb__after_atomic_inc();
- }
- }
-diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
-index b9040be..e3f5aab 100644
---- a/drivers/tty/hvc/hvcs.c
-+++ b/drivers/tty/hvc/hvcs.c
-@@ -83,6 +83,7 @@
- #include <asm/hvcserver.h>
- #include <asm/uaccess.h>
- #include <asm/vio.h>
-+#include <asm/local.h>
-
- /*
- * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
-@@ -270,7 +271,7 @@ struct hvcs_struct {
- unsigned int index;
-
- struct tty_struct *tty;
-- int open_count;
-+ local_t open_count;
-
- /*
- * Used to tell the driver kernel_thread what operations need to take
-@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-
-- if (hvcsd->open_count > 0) {
-+ if (local_read(&hvcsd->open_count) > 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_INFO "HVCS: vterm state unchanged. "
- "The hvcs device node is still in use.\n");
-@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
- if ((retval = hvcs_partner_connect(hvcsd)))
- goto error_release;
-
-- hvcsd->open_count = 1;
-+ local_set(&hvcsd->open_count, 1);
- hvcsd->tty = tty;
- tty->driver_data = hvcsd;
-
-@@ -1179,7 +1180,7 @@ fast_open:
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- kref_get(&hvcsd->kref);
-- hvcsd->open_count++;
-+ local_inc(&hvcsd->open_count);
- hvcsd->todo_mask |= HVCS_SCHED_READ;
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-
-@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
- hvcsd = tty->driver_data;
-
- spin_lock_irqsave(&hvcsd->lock, flags);
-- if (--hvcsd->open_count == 0) {
-+ if (local_dec_and_test(&hvcsd->open_count)) {
-
- vio_disable_interrupts(hvcsd->vdev);
-
-@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
- free_irq(irq, hvcsd);
- kref_put(&hvcsd->kref, destroy_hvcs_struct);
- return;
-- } else if (hvcsd->open_count < 0) {
-+ } else if (local_read(&hvcsd->open_count) < 0) {
- printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
- " is missmanaged.\n",
-- hvcsd->vdev->unit_address, hvcsd->open_count);
-+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
- }
-
- spin_unlock_irqrestore(&hvcsd->lock, flags);
-@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
-
- spin_lock_irqsave(&hvcsd->lock, flags);
- /* Preserve this so that we know how many kref refs to put */
-- temp_open_count = hvcsd->open_count;
-+ temp_open_count = local_read(&hvcsd->open_count);
-
- /*
- * Don't kref put inside the spinlock because the destruction
-@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
- hvcsd->tty->driver_data = NULL;
- hvcsd->tty = NULL;
-
-- hvcsd->open_count = 0;
-+ local_set(&hvcsd->open_count, 0);
-
- /* This will drop any buffered data on the floor which is OK in a hangup
- * scenario. */
-@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
- * the middle of a write operation? This is a crummy place to do this
- * but we want to keep it all in the spinlock.
- */
-- if (hvcsd->open_count <= 0) {
-+ if (local_read(&hvcsd->open_count) <= 0) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- return -ENODEV;
- }
-@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
- {
- struct hvcs_struct *hvcsd = tty->driver_data;
-
-- if (!hvcsd || hvcsd->open_count <= 0)
-+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
- return 0;
-
- return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
-diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
-index cdfa3e0..37fa165 100644
---- a/drivers/tty/hvc/hvsi.c
-+++ b/drivers/tty/hvc/hvsi.c
-@@ -86,7 +86,7 @@ struct hvsi_struct {
- int n_outbuf;
- uint32_t vtermno;
- uint32_t virq;
-- atomic_t seqno; /* HVSI packet sequence number */
-+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
- uint16_t mctrl;
- uint8_t state; /* HVSI protocol state */
- uint8_t flags;
-@@ -297,7 +297,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
-
- packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
- packet.hdr.len = sizeof(struct hvsi_query_response);
-- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
- packet.verb = VSV_SEND_VERSION_NUMBER;
- packet.u.version = HVSI_VERSION;
- packet.query_seqno = query_seqno+1;
-@@ -581,7 +581,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
-
- packet.hdr.type = VS_QUERY_PACKET_HEADER;
- packet.hdr.len = sizeof(struct hvsi_query);
-- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
- packet.verb = verb;
-
- pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
-@@ -623,7 +623,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
- int wrote;
-
- packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
- packet.hdr.len = sizeof(struct hvsi_control);
- packet.verb = VSV_SET_MODEM_CTL;
- packet.mask = HVSI_TSDTR;
-@@ -706,7 +706,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
- BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
-
- packet.hdr.type = VS_DATA_PACKET_HEADER;
-- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
- packet.hdr.len = count + sizeof(struct hvsi_header);
- memcpy(&packet.data, buf, count);
-
-@@ -723,7 +723,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
- struct hvsi_control packet __ALIGNED__;
-
- packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
- packet.hdr.len = 6;
- packet.verb = VSV_CLOSE_PROTOCOL;
-
-@@ -755,7 +755,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
- spin_lock_irqsave(&hp->lock, flags);
- hp->tty = tty;
- hp->count++;
-- atomic_set(&hp->seqno, 0);
-+ atomic_set_unchecked(&hp->seqno, 0);
- h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
- spin_unlock_irqrestore(&hp->lock, flags);
-
-diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
-index 3749688..82c91dc 100644
---- a/drivers/tty/hvc/hvsi_lib.c
-+++ b/drivers/tty/hvc/hvsi_lib.c
-@@ -9,7 +9,7 @@
-
- static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
- {
-- packet->seqno = atomic_inc_return(&pv->seqno);
-+ packet->seqno = atomic_inc_return_unchecked(&pv->seqno);
-
- /* Assumes that always succeeds, works in practice */
- return pv->put_chars(pv->termno, (char *)packet, packet->len);
-@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
-
- /* Reset state */
- pv->established = 0;
-- atomic_set(&pv->seqno, 0);
-+ atomic_set_unchecked(&pv->seqno, 0);
-
- pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
-
-@@ -265,7 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
- pv->mctrl_update = 0;
- q.hdr.type = VS_QUERY_PACKET_HEADER;
- q.hdr.len = sizeof(struct hvsi_query);
-- q.hdr.seqno = atomic_inc_return(&pv->seqno);
-+ q.hdr.seqno = atomic_inc_return_unchecked(&pv->seqno);
- q.verb = VSV_SEND_MODEM_CTL_STATUS;
- rc = hvsi_send_packet(pv, &q.hdr);
- if (rc <= 0) {
-diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
-index ef92869..f4ebd88 100644
---- a/drivers/tty/ipwireless/tty.c
-+++ b/drivers/tty/ipwireless/tty.c
-@@ -29,6 +29,7 @@
- #include <linux/tty_driver.h>
- #include <linux/tty_flip.h>
- #include <linux/uaccess.h>
-+#include <asm/local.h>
-
- #include "tty.h"
- #include "network.h"
-@@ -51,7 +52,7 @@ struct ipw_tty {
- int tty_type;
- struct ipw_network *network;
- struct tty_struct *linux_tty;
-- int open_count;
-+ local_t open_count;
- unsigned int control_lines;
- struct mutex ipw_tty_mutex;
- int tx_bytes_queued;
-@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
- mutex_unlock(&tty->ipw_tty_mutex);
- return -ENODEV;
- }
-- if (tty->open_count == 0)
-+ if (local_read(&tty->open_count) == 0)
- tty->tx_bytes_queued = 0;
-
-- tty->open_count++;
-+ local_inc(&tty->open_count);
-
- tty->linux_tty = linux_tty;
- linux_tty->driver_data = tty;
-@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
-
- static void do_ipw_close(struct ipw_tty *tty)
- {
-- tty->open_count--;
--
-- if (tty->open_count == 0) {
-+ if (local_dec_return(&tty->open_count) == 0) {
- struct tty_struct *linux_tty = tty->linux_tty;
-
- if (linux_tty != NULL) {
-@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
- return;
-
- mutex_lock(&tty->ipw_tty_mutex);
-- if (tty->open_count == 0) {
-+ if (local_read(&tty->open_count) == 0) {
- mutex_unlock(&tty->ipw_tty_mutex);
- return;
- }
-@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
- return;
- }
-
-- if (!tty->open_count) {
-+ if (!local_read(&tty->open_count)) {
- mutex_unlock(&tty->ipw_tty_mutex);
- return;
- }
-@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
- return -ENODEV;
-
- mutex_lock(&tty->ipw_tty_mutex);
-- if (!tty->open_count) {
-+ if (!local_read(&tty->open_count)) {
- mutex_unlock(&tty->ipw_tty_mutex);
- return -EINVAL;
- }
-@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
- if (!tty)
- return -ENODEV;
-
-- if (!tty->open_count)
-+ if (!local_read(&tty->open_count))
- return -EINVAL;
-
- room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
-@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
- if (!tty)
- return 0;
-
-- if (!tty->open_count)
-+ if (!local_read(&tty->open_count))
- return 0;
-
- return tty->tx_bytes_queued;
-@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
- if (!tty)
- return -ENODEV;
-
-- if (!tty->open_count)
-+ if (!local_read(&tty->open_count))
- return -EINVAL;
-
- return get_control_lines(tty);
-@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
- if (!tty)
- return -ENODEV;
-
-- if (!tty->open_count)
-+ if (!local_read(&tty->open_count))
- return -EINVAL;
-
- return set_control_lines(tty, set, clear);
-@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
- if (!tty)
- return -ENODEV;
-
-- if (!tty->open_count)
-+ if (!local_read(&tty->open_count))
- return -EINVAL;
-
- /* FIXME: Exactly how is the tty object locked here .. */
-@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
- against a parallel ioctl etc */
- mutex_lock(&ttyj->ipw_tty_mutex);
- }
-- while (ttyj->open_count)
-+ while (local_read(&ttyj->open_count))
- do_ipw_close(ttyj);
- ipwireless_disassociate_network_ttys(network,
- ttyj->channel_idx);
-diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
-index 643a0a0..4da1c03 100644
---- a/drivers/tty/n_gsm.c
-+++ b/drivers/tty/n_gsm.c
-@@ -1649,7 +1649,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
- kref_init(&dlci->ref);
- mutex_init(&dlci->mutex);
- dlci->fifo = &dlci->_fifo;
-- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
-+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
- kfree(dlci);
- return NULL;
- }
-diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index bac83d8..0b87bf6 100644
---- a/drivers/tty/n_tty.c
-+++ b/drivers/tty/n_tty.c
-@@ -1639,6 +1639,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
- int retval;
- size_t n;
- unsigned long flags;
-+ bool is_eof;
-
- retval = 0;
- spin_lock_irqsave(&tty->read_lock, flags);
-@@ -1648,15 +1649,15 @@ static int copy_from_read_buf(struct tty_struct *tty,
- if (n) {
- retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
- n -= retval;
-+ is_eof = n == 1 &&
-+ tty->read_buf[tty->read_tail] == EOF_CHAR(tty);
- tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n);
- spin_lock_irqsave(&tty->read_lock, flags);
- tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
- tty->read_cnt -= n;
- /* Turn single EOF into zero-length read */
-- if (L_EXTPROC(tty) && tty->icanon && n == 1) {
-- if (!tty->read_cnt && (*b)[n-1] == EOF_CHAR(tty))
-- n--;
-- }
-+ if (L_EXTPROC(tty) && tty->icanon && is_eof && !tty->read_cnt)
-+ n = 0;
- spin_unlock_irqrestore(&tty->read_lock, flags);
- *b += n;
- *nr -= n;
-@@ -2134,6 +2135,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
- {
- *ops = tty_ldisc_N_TTY;
- ops->owner = NULL;
-- ops->refcount = ops->flags = 0;
-+ atomic_set(&ops->refcount, 0);
-+ ops->flags = 0;
- }
- EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
-diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
-index e753be2..9a8de18 100644
---- a/drivers/tty/pty.c
-+++ b/drivers/tty/pty.c
-@@ -778,8 +778,10 @@ static void __init unix98_pty_init(void)
- register_sysctl_table(pty_root_table);
-
- /* Now create the /dev/ptmx special device */
-+ pax_open_kernel();
- tty_default_fops(&ptmx_fops);
-- ptmx_fops.open = ptmx_open;
-+ *(void **)&ptmx_fops.open = ptmx_open;
-+ pax_close_kernel();
-
- cdev_init(&ptmx_cdev, &ptmx_fops);
- if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
-diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
-index 6b36c15..335a4a2 100644
---- a/drivers/tty/serial/ioc4_serial.c
-+++ b/drivers/tty/serial/ioc4_serial.c
-@@ -438,7 +438,7 @@ struct ioc4_soft {
- } is_intr_info[MAX_IOC4_INTR_ENTS];
-
- /* Number of entries active in the above array */
-- atomic_t is_num_intrs;
-+ atomic_unchecked_t is_num_intrs;
- } is_intr_type[IOC4_NUM_INTR_TYPES];
-
- /* is_ir_lock must be held while
-@@ -975,7 +975,7 @@ intr_connect(struct ioc4_soft *soft, int type,
- BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
- || (type == IOC4_OTHER_INTR_TYPE)));
-
-- i = atomic_inc(&soft-> is_intr_type[type].is_num_intrs) - 1;
-+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
- BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
-
- /* Save off the lower level interrupt handler */
-@@ -1002,7 +1002,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
-
- soft = arg;
- for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
-- num_intrs = (int)atomic_read(
-+ num_intrs = (int)atomic_read_unchecked(
- &soft->is_intr_type[intr_type].is_num_intrs);
-
- this_mir = this_ir = pending_intrs(soft, intr_type);
-diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
-index 2b42a01..32a2ed3 100644
---- a/drivers/tty/serial/kgdboc.c
-+++ b/drivers/tty/serial/kgdboc.c
-@@ -24,8 +24,9 @@
- #define MAX_CONFIG_LEN 40
-
- static struct kgdb_io kgdboc_io_ops;
-+static struct kgdb_io kgdboc_io_ops_console;
-
--/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
-+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
- static int configured = -1;
-
- static char config[MAX_CONFIG_LEN];
-@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
- kgdboc_unregister_kbd();
- if (configured == 1)
- kgdb_unregister_io_module(&kgdboc_io_ops);
-+ else if (configured == 2)
-+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
- }
-
- static int configure_kgdboc(void)
-@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
- int err;
- char *cptr = config;
- struct console *cons;
-+ int is_console = 0;
-
- err = kgdboc_option_setup(config);
- if (err || !strlen(config) || isspace(config[0]))
- goto noconfig;
-
- err = -ENODEV;
-- kgdboc_io_ops.is_console = 0;
- kgdb_tty_driver = NULL;
-
- kgdboc_use_kms = 0;
-@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
- int idx;
- if (cons->device && cons->device(cons, &idx) == p &&
- idx == tty_line) {
-- kgdboc_io_ops.is_console = 1;
-+ is_console = 1;
- break;
- }
- cons = cons->next;
-@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
- kgdb_tty_line = tty_line;
-
- do_register:
-- err = kgdb_register_io_module(&kgdboc_io_ops);
-+ if (is_console) {
-+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
-+ configured = 2;
-+ } else {
-+ err = kgdb_register_io_module(&kgdboc_io_ops);
-+ configured = 1;
-+ }
- if (err)
- goto noconfig;
-
-- configured = 1;
--
- return 0;
-
- noconfig:
-@@ -213,7 +220,7 @@ noconfig:
- static int __init init_kgdboc(void)
- {
- /* Already configured? */
-- if (configured == 1)
-+ if (configured >= 1)
- return 0;
-
- return configure_kgdboc();
-@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
- if (config[len - 1] == '\n')
- config[len - 1] = '\0';
-
-- if (configured == 1)
-+ if (configured >= 1)
- cleanup_kgdboc();
-
- /* Go and configure with the new params. */
-@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
- .post_exception = kgdboc_post_exp_handler,
- };
-
-+static struct kgdb_io kgdboc_io_ops_console = {
-+ .name = "kgdboc",
-+ .read_char = kgdboc_get_char,
-+ .write_char = kgdboc_put_char,
-+ .pre_exception = kgdboc_pre_exp_handler,
-+ .post_exception = kgdboc_post_exp_handler,
-+ .is_console = 1
-+};
-+
- #ifdef CONFIG_KGDB_SERIAL_CONSOLE
- /* This is only available if kgdboc is a built in for early debugging */
- static int __init kgdboc_early_init(char *opt)
-diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
-index 8131e2c..b48928a 100644
---- a/drivers/tty/serial/msm_serial.c
-+++ b/drivers/tty/serial/msm_serial.c
-@@ -857,7 +857,7 @@ static struct uart_driver msm_uart_driver = {
- .cons = MSM_CONSOLE,
- };
-
--static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
-+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
-
- static int __init msm_serial_probe(struct platform_device *pdev)
- {
-@@ -867,7 +867,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
- int irq;
-
- if (pdev->id == -1)
-- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
-+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
-
- if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
- return -ENXIO;
-diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
-index 626e75b..4a87ecc 100644
---- a/drivers/tty/serial/samsung.c
-+++ b/drivers/tty/serial/samsung.c
-@@ -440,11 +440,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
- }
- }
-
-+static int s3c64xx_serial_startup(struct uart_port *port);
- static int s3c24xx_serial_startup(struct uart_port *port)
- {
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- int ret;
-
-+ /* Startup sequence is different for s3c64xx and higher SoC's */
-+ if (s3c24xx_serial_has_interrupt_mask(port))
-+ return s3c64xx_serial_startup(port);
-+
- dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
- port->mapbase, port->membase);
-
-@@ -1153,10 +1158,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
- port->dev = &platdev->dev;
- ourport->info = info;
-
-- /* Startup sequence is different for s3c64xx and higher SoC's */
-- if (s3c24xx_serial_has_interrupt_mask(port))
-- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
--
- /* copy the info in from provided structure */
- ourport->port.fifosize = info->fifosize;
-
-diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
-index 43db715..82134aa 100644
---- a/drivers/tty/sysrq.c
-+++ b/drivers/tty/sysrq.c
-@@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
- static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
- {
-- if (count) {
-+ if (count && capable(CAP_SYS_ADMIN)) {
- char c;
-
- if (get_user(c, buf))
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index fa59fbe..751b0f8 100644
---- a/drivers/tty/tty_io.c
-+++ b/drivers/tty/tty_io.c
-@@ -1089,7 +1089,7 @@ static inline ssize_t do_tty_write(
- cond_resched();
- }
- if (written) {
-- struct inode *inode = file->f_path.dentry->d_inode;
-+ struct inode *inode = file->f_path.dentry->d_inode;
- tty_update_time(&inode->i_mtime);
- ret = written;
- }
-@@ -3255,7 +3255,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
-
- void tty_default_fops(struct file_operations *fops)
- {
-- *fops = tty_fops;
-+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
- }
-
- /*
-diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
-index 8e0924f..4204eb4 100644
---- a/drivers/tty/tty_ldisc.c
-+++ b/drivers/tty/tty_ldisc.c
-@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
- if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
- struct tty_ldisc_ops *ldo = ld->ops;
-
-- ldo->refcount--;
-+ atomic_dec(&ldo->refcount);
- module_put(ldo->owner);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
-@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
- spin_lock_irqsave(&tty_ldisc_lock, flags);
- tty_ldiscs[disc] = new_ldisc;
- new_ldisc->num = disc;
-- new_ldisc->refcount = 0;
-+ atomic_set(&new_ldisc->refcount, 0);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
-
- return ret;
-@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
- return -EINVAL;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
-- if (tty_ldiscs[disc]->refcount)
-+ if (atomic_read(&tty_ldiscs[disc]->refcount))
- ret = -EBUSY;
- else
- tty_ldiscs[disc] = NULL;
-@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
- if (ldops) {
- ret = ERR_PTR(-EAGAIN);
- if (try_module_get(ldops->owner)) {
-- ldops->refcount++;
-+ atomic_inc(&ldops->refcount);
- ret = ldops;
- }
- }
-@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
- unsigned long flags;
-
- spin_lock_irqsave(&tty_ldisc_lock, flags);
-- ldops->refcount--;
-+ atomic_dec(&ldops->refcount);
- module_put(ldops->owner);
- spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- }
-diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
-index a605549..6bd3c96 100644
---- a/drivers/tty/vt/keyboard.c
-+++ b/drivers/tty/vt/keyboard.c
-@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
- kbd->kbdmode == VC_OFF) &&
- value != KVAL(K_SAK))
- return; /* SAK is allowed even in raw mode */
-+
-+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
-+ {
-+ void *func = fn_handler[value];
-+ if (func == fn_show_state || func == fn_show_ptregs ||
-+ func == fn_show_mem)
-+ return;
-+ }
-+#endif
-+
- fn_handler[value](vc);
- }
-
-diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
-index 65447c5..0526f0a 100644
---- a/drivers/tty/vt/vt_ioctl.c
-+++ b/drivers/tty/vt/vt_ioctl.c
-@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
- if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
- return -EFAULT;
-
-- if (!capable(CAP_SYS_TTY_CONFIG))
-- perm = 0;
--
- switch (cmd) {
- case KDGKBENT:
- key_map = key_maps[s];
-@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
- val = (i ? K_HOLE : K_NOSUCHMAP);
- return put_user(val, &user_kbe->kb_value);
- case KDSKBENT:
-+ if (!capable(CAP_SYS_TTY_CONFIG))
-+ perm = 0;
-+
- if (!perm)
- return -EPERM;
- if (!i && v == K_NOSUCHMAP) {
-@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
- int i, j, k;
- int ret;
-
-- if (!capable(CAP_SYS_TTY_CONFIG))
-- perm = 0;
--
- kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
- if (!kbs) {
- ret = -ENOMEM;
-@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
- kfree(kbs);
- return ((p && *p) ? -EOVERFLOW : 0);
- case KDSKBSENT:
-+ if (!capable(CAP_SYS_TTY_CONFIG))
-+ perm = 0;
-+
- if (!perm) {
- ret = -EPERM;
- goto reterr;
-diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
-index af57648..2a8a122 100644
---- a/drivers/uio/uio.c
-+++ b/drivers/uio/uio.c
-@@ -25,6 +25,7 @@
- #include <linux/kobject.h>
- #include <linux/cdev.h>
- #include <linux/uio_driver.h>
-+#include <asm/local.h>
-
- #define UIO_MAX_DEVICES (1U << MINORBITS)
-
-@@ -32,10 +33,10 @@ struct uio_device {
- struct module *owner;
- struct device *dev;
- int minor;
-- atomic_t event;
-+ atomic_unchecked_t event;
- struct fasync_struct *async_queue;
- wait_queue_head_t wait;
-- int vma_count;
-+ local_t vma_count;
- struct uio_info *info;
- struct kobject *map_dir;
- struct kobject *portio_dir;
-@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct uio_device *idev = dev_get_drvdata(dev);
-- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
-+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
- }
-
- static struct device_attribute uio_class_attributes[] = {
-@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
- {
- struct uio_device *idev = info->uio_dev;
-
-- atomic_inc(&idev->event);
-+ atomic_inc_unchecked(&idev->event);
- wake_up_interruptible(&idev->wait);
- kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
- }
-@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
- }
-
- listener->dev = idev;
-- listener->event_count = atomic_read(&idev->event);
-+ listener->event_count = atomic_read_unchecked(&idev->event);
- filep->private_data = listener;
-
- if (idev->info->open) {
-@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
- return -EIO;
-
- poll_wait(filep, &idev->wait, wait);
-- if (listener->event_count != atomic_read(&idev->event))
-+ if (listener->event_count != atomic_read_unchecked(&idev->event))
- return POLLIN | POLLRDNORM;
- return 0;
- }
-@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
- do {
- set_current_state(TASK_INTERRUPTIBLE);
-
-- event_count = atomic_read(&idev->event);
-+ event_count = atomic_read_unchecked(&idev->event);
- if (event_count != listener->event_count) {
- if (copy_to_user(buf, &event_count, count))
- retval = -EFAULT;
-@@ -594,9 +595,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
- static int uio_find_mem_index(struct vm_area_struct *vma)
- {
- struct uio_device *idev = vma->vm_private_data;
-+ unsigned long size;
-
- if (vma->vm_pgoff < MAX_UIO_MAPS) {
-- if (idev->info->mem[vma->vm_pgoff].size == 0)
-+ size = idev->info->mem[vma->vm_pgoff].size;
-+ if (size == 0)
-+ return -1;
-+ if (vma->vm_end - vma->vm_start > size)
- return -1;
- return (int)vma->vm_pgoff;
- }
-@@ -606,13 +611,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
- static void uio_vma_open(struct vm_area_struct *vma)
- {
- struct uio_device *idev = vma->vm_private_data;
-- idev->vma_count++;
-+ local_inc(&idev->vma_count);
- }
-
- static void uio_vma_close(struct vm_area_struct *vma)
- {
- struct uio_device *idev = vma->vm_private_data;
-- idev->vma_count--;
-+ local_dec(&idev->vma_count);
- }
-
- static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-@@ -655,6 +660,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
- return -EINVAL;
- mem = idev->info->mem + mi;
-
-+ if (mem->addr & ~PAGE_MASK)
-+ return -ENODEV;
- if (vma->vm_end - vma->vm_start > mem->size)
- return -EINVAL;
-
-@@ -833,7 +840,7 @@ int __uio_register_device(struct module *owner,
- idev->owner = owner;
- idev->info = info;
- init_waitqueue_head(&idev->wait);
-- atomic_set(&idev->event, 0);
-+ atomic_set_unchecked(&idev->event, 0);
-
- ret = uio_get_minor(idev);
- if (ret)
-diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
-index 9497171..bfeecaf 100644
---- a/drivers/usb/atm/cxacru.c
-+++ b/drivers/usb/atm/cxacru.c
-@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
- ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
- if (ret < 2)
- return -EINVAL;
-- if (index < 0 || index > 0x7f)
-+ if (index > 0x7f)
- return -EINVAL;
- pos += tmp;
-
-diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
-index d3448ca..d2864ca 100644
---- a/drivers/usb/atm/usbatm.c
-+++ b/drivers/usb/atm/usbatm.c
-@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
- if (printk_ratelimit())
- atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
- __func__, vpi, vci);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- return;
- }
-
-@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
- if (length > ATM_MAX_AAL5_PDU) {
- atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
- __func__, length, vcc);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto out;
- }
-
-@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
- if (sarb->len < pdu_length) {
- atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
- __func__, pdu_length, sarb->len, vcc);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto out;
- }
-
- if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
- atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
- __func__, vcc);
-- atomic_inc(&vcc->stats->rx_err);
-+ atomic_inc_unchecked(&vcc->stats->rx_err);
- goto out;
- }
-
-@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
- if (printk_ratelimit())
- atm_err(instance, "%s: no memory for skb (length: %u)!\n",
- __func__, length);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- goto out;
- }
-
-@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
-
- vcc->push(vcc, skb);
-
-- atomic_inc(&vcc->stats->rx);
-+ atomic_inc_unchecked(&vcc->stats->rx);
- out:
- skb_trim(sarb, 0);
- }
-@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
- struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
-
- usbatm_pop(vcc, skb);
-- atomic_inc(&vcc->stats->tx);
-+ atomic_inc_unchecked(&vcc->stats->tx);
-
- skb = skb_dequeue(&instance->sndqueue);
- }
-@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
- if (!left--)
- return sprintf(page,
- "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
-- atomic_read(&atm_dev->stats.aal5.tx),
-- atomic_read(&atm_dev->stats.aal5.tx_err),
-- atomic_read(&atm_dev->stats.aal5.rx),
-- atomic_read(&atm_dev->stats.aal5.rx_err),
-- atomic_read(&atm_dev->stats.aal5.rx_drop));
-+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
-+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
-+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
-+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
-+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
-
- if (!left--) {
- if (instance->disconnected)
-diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
-index 3440812..2a4ef1f 100644
---- a/drivers/usb/core/devices.c
-+++ b/drivers/usb/core/devices.c
-@@ -126,7 +126,7 @@ static const char format_endpt[] =
- * time it gets called.
- */
- static struct device_connect_event {
-- atomic_t count;
-+ atomic_unchecked_t count;
- wait_queue_head_t wait;
- } device_event = {
- .count = ATOMIC_INIT(1),
-@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
-
- void usbfs_conn_disc_event(void)
- {
-- atomic_add(2, &device_event.count);
-+ atomic_add_unchecked(2, &device_event.count);
- wake_up(&device_event.wait);
- }
-
-@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
-
- poll_wait(file, &device_event.wait, wait);
-
-- event_count = atomic_read(&device_event.count);
-+ event_count = atomic_read_unchecked(&device_event.count);
- if (file->f_version != event_count) {
- file->f_version = event_count;
- return POLLIN | POLLRDNORM;
-diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
-index 64340f3..88c4041 100644
---- a/drivers/usb/core/devio.c
-+++ b/drivers/usb/core/devio.c
-@@ -147,7 +147,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
- struct dev_state *ps = file->private_data;
- struct usb_device *dev = ps->dev;
- ssize_t ret = 0;
-- unsigned len;
-+ size_t len;
- loff_t pos;
- int i;
-
-@@ -189,22 +189,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
- for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
- struct usb_config_descriptor *config =
- (struct usb_config_descriptor *)dev->rawdescriptors[i];
-- unsigned int length = le16_to_cpu(config->wTotalLength);
-+ size_t length = le16_to_cpu(config->wTotalLength);
-
- if (*ppos < pos + length) {
-
- /* The descriptor may claim to be longer than it
- * really is. Here is the actual allocated length. */
-- unsigned alloclen =
-+ size_t alloclen =
- le16_to_cpu(dev->config[i].desc.wTotalLength);
-
-- len = length - (*ppos - pos);
-+ len = length + pos - *ppos;
- if (len > nbytes)
- len = nbytes;
-
- /* Simply don't write (skip over) unallocated parts */
- if (alloclen > (*ppos - pos)) {
-- alloclen -= (*ppos - pos);
-+ alloclen = alloclen + pos - *ppos;
- if (copy_to_user(buf,
- dev->rawdescriptors[i] + (*ppos - pos),
- min(len, alloclen))) {
-diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index eb2c3bd..5236c12 100644
---- a/drivers/usb/core/hcd.c
-+++ b/drivers/usb/core/hcd.c
-@@ -1475,7 +1475,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
- */
- usb_get_urb(urb);
- atomic_inc(&urb->use_count);
-- atomic_inc(&urb->dev->urbnum);
-+ atomic_inc_unchecked(&urb->dev->urbnum);
- usbmon_urb_submit(&hcd->self, urb);
-
- /* NOTE requirements on root-hub callers (usbfs and the hub
-@@ -1502,7 +1502,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
- urb->hcpriv = NULL;
- INIT_LIST_HEAD(&urb->urb_list);
- atomic_dec(&urb->use_count);
-- atomic_dec(&urb->dev->urbnum);
-+ atomic_dec_unchecked(&urb->dev->urbnum);
- if (atomic_read(&urb->reject))
- wake_up(&usb_kill_urb_queue);
- usb_put_urb(urb);
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 18286ce..c6d2114 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -25,6 +25,7 @@
- #include <linux/mutex.h>
- #include <linux/freezer.h>
- #include <linux/random.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/uaccess.h>
- #include <asm/byteorder.h>
-@@ -3485,6 +3486,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
- return;
- }
-
-+ if (gr_handle_new_usb())
-+ goto done;
-+
- for (i = 0; i < SET_CONFIG_TRIES; i++) {
-
- /* reallocate for each attempt, since references
-diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
-index ab11ca3c..2df783d 100644
---- a/drivers/usb/core/message.c
-+++ b/drivers/usb/core/message.c
-@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
- * method can wait for it to complete. Since you don't have a handle on the
- * URB used, you can't cancel the request.
- */
--int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
-+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
- __u8 requesttype, __u16 value, __u16 index, void *data,
- __u16 size, int timeout)
- {
-@@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
- * complete. Since you don't have a handle on the URB used, you can't cancel
- * the request.
- */
--int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
-+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length, int timeout)
- {
- return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
-@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
- * interrupt endpoints. We will take the liberty of creating an interrupt URB
- * (with the default interval) if the target is an interrupt endpoint.
- */
--int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
-+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length, int timeout)
- {
- struct urb *urb;
-diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
-index 662c0cf..6880fbb 100644
---- a/drivers/usb/core/sysfs.c
-+++ b/drivers/usb/core/sysfs.c
-@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
- struct usb_device *udev;
-
- udev = to_usb_device(dev);
-- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
-+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
- }
- static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
-
-diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
-index 9044ef6..dcda063 100644
---- a/drivers/usb/core/usb.c
-+++ b/drivers/usb/core/usb.c
-@@ -396,7 +396,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
- dev->dev.dma_mask = bus->controller->dma_mask;
- set_dev_node(&dev->dev, dev_to_node(bus->controller));
- dev->state = USB_STATE_ATTACHED;
-- atomic_set(&dev->urbnum, 0);
-+ atomic_set_unchecked(&dev->urbnum, 0);
-
- INIT_LIST_HEAD(&dev->ep0.urb_list);
- dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
-diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
-index 347bb05..63e1b73 100644
---- a/drivers/usb/early/ehci-dbgp.c
-+++ b/drivers/usb/early/ehci-dbgp.c
-@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
-
- #ifdef CONFIG_KGDB
- static struct kgdb_io kgdbdbgp_io_ops;
--#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
-+static struct kgdb_io kgdbdbgp_io_ops_console;
-+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
- #else
- #define dbgp_kgdb_mode (0)
- #endif
-@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
- .write_char = kgdbdbgp_write_char,
- };
-
-+static struct kgdb_io kgdbdbgp_io_ops_console = {
-+ .name = "kgdbdbgp",
-+ .read_char = kgdbdbgp_read_char,
-+ .write_char = kgdbdbgp_write_char,
-+ .is_console = 1
-+};
-+
- static int kgdbdbgp_wait_time;
-
- static int __init kgdbdbgp_parse_config(char *str)
-@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
- ptr++;
- kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
- }
-- kgdb_register_io_module(&kgdbdbgp_io_ops);
-- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
-+ if (early_dbgp_console.index != -1)
-+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
-+ else
-+ kgdb_register_io_module(&kgdbdbgp_io_ops);
-
- return 0;
- }
-diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
-index c635c4c..dc91e75 100644
---- a/drivers/usb/gadget/f_fs.c
-+++ b/drivers/usb/gadget/f_fs.c
-@@ -1212,6 +1212,7 @@ static struct file_system_type ffs_fs_type = {
- .mount = ffs_fs_mount,
- .kill_sb = ffs_fs_kill_sb,
- };
-+MODULE_ALIAS_FS("functionfs");
-
-
- /* Driver's main init/cleanup functions *************************************/
-diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
-index db2d607..3a25028 100644
---- a/drivers/usb/gadget/file_storage.c
-+++ b/drivers/usb/gadget/file_storage.c
-@@ -3329,18 +3329,20 @@ static int __init fsg_bind(struct usb_gadget *gadget)
- if ((rc = check_parameters(fsg)) != 0)
- goto out;
-
-+ pax_open_kernel();
- if (mod_data.removable) { // Enable the store_xxx attributes
-- dev_attr_file.attr.mode = 0644;
-- dev_attr_file.store = fsg_store_file;
-+ *(mode_t *)&dev_attr_file.attr.mode = 0644;
-+ *(void **)&dev_attr_file.store = fsg_store_file;
- if (!mod_data.cdrom) {
-- dev_attr_ro.attr.mode = 0644;
-- dev_attr_ro.store = fsg_store_ro;
-+ *(mode_t *)&dev_attr_ro.attr.mode = 0644;
-+ *(void **)&dev_attr_ro.store = fsg_store_ro;
- }
- }
-
- /* Only for removable media? */
-- dev_attr_nofua.attr.mode = 0644;
-- dev_attr_nofua.store = fsg_store_nofua;
-+ *(mode_t *)&dev_attr_nofua.attr.mode = 0644;
-+ *(void **)&dev_attr_nofua.store = fsg_store_nofua;
-+ pax_close_kernel();
-
- /* Find out how many LUNs there should be */
- i = mod_data.nluns;
-diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
-index ce7253b..754242d 100644
---- a/drivers/usb/gadget/inode.c
-+++ b/drivers/usb/gadget/inode.c
-@@ -2132,6 +2132,7 @@ static struct file_system_type gadgetfs_type = {
- .mount = gadgetfs_mount,
- .kill_sb = gadgetfs_kill_sb,
- };
-+MODULE_ALIAS_FS("gadgetfs");
-
- /*----------------------------------------------------------------------*/
-
-diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
-index 9bfac65..0f874d1 100644
---- a/drivers/usb/host/hwa-hc.c
-+++ b/drivers/usb/host/hwa-hc.c
-@@ -291,7 +291,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
- struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
- struct wahc *wa = &hwahc->wa;
- struct device *dev = &wa->usb_iface->dev;
-- u8 mas_le[UWB_NUM_MAS/8];
-+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
-+
-+ if (mas_le == NULL)
-+ return -ENOMEM;
-
- /* Set the stream index */
- result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
-@@ -310,10 +313,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
- WUSB_REQ_SET_WUSB_MAS,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
-- mas_le, 32, 1000 /* FIXME: arbitrary */);
-+ mas_le, UWB_NUM_MAS/8, 1000 /* FIXME: arbitrary */);
- if (result < 0)
- dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
- out:
-+ kfree(mas_le);
-+
- return result;
- }
-
-diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
-index 9f7003e..b1db1b6 100644
---- a/drivers/usb/misc/appledisplay.c
-+++ b/drivers/usb/misc/appledisplay.c
-@@ -83,7 +83,7 @@ struct appledisplay {
- spinlock_t lock;
- };
-
--static atomic_t count_displays = ATOMIC_INIT(0);
-+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
- static struct workqueue_struct *wq;
-
- static void appledisplay_complete(struct urb *urb)
-@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
-
- /* Register backlight device */
- snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
-- atomic_inc_return(&count_displays) - 1);
-+ atomic_inc_return_unchecked(&count_displays) - 1);
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 0xff;
-diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
-index 87302dd..2d0130a 100644
---- a/drivers/usb/serial/console.c
-+++ b/drivers/usb/serial/console.c
-@@ -205,7 +205,7 @@ static int usb_console_setup(struct console *co, char *options)
- static void usb_console_write(struct console *co,
- const char *buf, unsigned count)
- {
-- static struct usbcons_info *info = &usbcons_info;
-+ struct usbcons_info *info = &usbcons_info;
- struct usb_serial_port *port = info->port;
- struct usb_serial *serial;
- int retval = -ENODEV;
-diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
-index e39b188..1fffda8 100644
---- a/drivers/usb/storage/realtek_cr.c
-+++ b/drivers/usb/storage/realtek_cr.c
-@@ -430,7 +430,7 @@ static int rts51x_read_status(struct us_data *us,
-
- buf = kmalloc(len, GFP_NOIO);
- if (buf == NULL)
-- return USB_STOR_TRANSPORT_ERROR;
-+ return -ENOMEM;
-
- US_DEBUGP("%s, lun = %d\n", __func__, lun);
-
-diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
-index 75f70f0..d467e1a 100644
---- a/drivers/usb/storage/usb.h
-+++ b/drivers/usb/storage/usb.h
-@@ -63,7 +63,7 @@ struct us_unusual_dev {
- __u8 useProtocol;
- __u8 useTransport;
- int (*initFunction)(struct us_data *);
--};
-+} __do_const;
-
-
- /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
-diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
-index d6bea3e..60b250e 100644
---- a/drivers/usb/wusbcore/wa-hc.h
-+++ b/drivers/usb/wusbcore/wa-hc.h
-@@ -192,7 +192,7 @@ struct wahc {
- struct list_head xfer_delayed_list;
- spinlock_t xfer_list_lock;
- struct work_struct xfer_work;
-- atomic_t xfer_id_count;
-+ atomic_unchecked_t xfer_id_count;
- };
-
-
-@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
- INIT_LIST_HEAD(&wa->xfer_delayed_list);
- spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
-- atomic_set(&wa->xfer_id_count, 1);
-+ atomic_set_unchecked(&wa->xfer_id_count, 1);
- }
-
- /**
-diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
-index 5f6df6e..0a16602 100644
---- a/drivers/usb/wusbcore/wa-xfer.c
-+++ b/drivers/usb/wusbcore/wa-xfer.c
-@@ -297,7 +297,7 @@ out:
- */
- static void wa_xfer_id_init(struct wa_xfer *xfer)
- {
-- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
-+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
- }
-
- /*
-diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
-index be32b1b..b5f6c08 100644
---- a/drivers/vhost/vhost.c
-+++ b/drivers/vhost/vhost.c
-@@ -631,7 +631,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
- return 0;
- }
-
--static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
-+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
- {
- struct file *eventfp, *filep = NULL,
- *pollstart = NULL, *pollstop = NULL;
-diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
-index c22e8d3..12c48b0 100644
---- a/drivers/video/arcfb.c
-+++ b/drivers/video/arcfb.c
-@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
- return -ENOSPC;
-
- err = 0;
-- if ((count + p) > fbmemlength) {
-+ if (count > (fbmemlength - p)) {
- count = fbmemlength - p;
- err = -ENOSPC;
- }
-diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
-index b0b2ac3..89a4399 100644
---- a/drivers/video/aty/aty128fb.c
-+++ b/drivers/video/aty/aty128fb.c
-@@ -148,7 +148,7 @@ enum {
- };
-
- /* Must match above enum */
--static const char *r128_family[] __devinitdata = {
-+static const char *r128_family[] __devinitconst = {
- "AGP",
- "PCI",
- "PRO AGP",
-diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
-index 44bdce4..a79c55f 100644
---- a/drivers/video/aty/atyfb_base.c
-+++ b/drivers/video/aty/atyfb_base.c
-@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
- par->accel_flags = var->accel_flags; /* hack */
-
- if (var->accel_flags) {
-- info->fbops->fb_sync = atyfb_sync;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_sync = atyfb_sync;
-+ pax_close_kernel();
- info->flags &= ~FBINFO_HWACCEL_DISABLED;
- } else {
-- info->fbops->fb_sync = NULL;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_sync = NULL;
-+ pax_close_kernel();
- info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-
-diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
-index 4b87318..7407754 100644
---- a/drivers/video/aty/mach64_cursor.c
-+++ b/drivers/video/aty/mach64_cursor.c
-@@ -8,6 +8,7 @@
- #include "../fb_draw.h"
-
- #include <asm/io.h>
-+#include <asm/pgtable.h>
-
- #ifdef __sparc__
- #include <asm/fbio.h>
-@@ -218,7 +219,9 @@ int __devinit aty_init_cursor(struct fb_info *info)
- info->sprite.buf_align = 16; /* and 64 lines tall. */
- info->sprite.flags = FB_PIXMAP_IO;
-
-- info->fbops->fb_cursor = atyfb_cursor;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
-+ pax_close_kernel();
-
- return 0;
- }
-diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
-index 7363c1b..b69ad66 100644
---- a/drivers/video/backlight/backlight.c
-+++ b/drivers/video/backlight/backlight.c
-@@ -303,7 +303,7 @@ struct backlight_device *backlight_device_register(const char *name,
- new_bd->dev.class = backlight_class;
- new_bd->dev.parent = parent;
- new_bd->dev.release = bl_device_release;
-- dev_set_name(&new_bd->dev, name);
-+ dev_set_name(&new_bd->dev, "%s", name);
- dev_set_drvdata(&new_bd->dev, devdata);
-
- /* Set default properties */
-diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
-index 72dd555..5f9bfbe 100644
---- a/drivers/video/backlight/kb3886_bl.c
-+++ b/drivers/video/backlight/kb3886_bl.c
-@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
- static unsigned long kb3886bl_flags;
- #define KB3886BL_SUSPENDED 0x01
-
--static struct dmi_system_id __initdata kb3886bl_device_table[] = {
-+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
- {
- .ident = "Sahara Touch-iT",
- .matches = {
-diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
-index 71a11ca..86afe4b 100644
---- a/drivers/video/backlight/lcd.c
-+++ b/drivers/video/backlight/lcd.c
-@@ -209,7 +209,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
- new_ld->dev.class = lcd_class;
- new_ld->dev.parent = parent;
- new_ld->dev.release = lcd_device_release;
-- dev_set_name(&new_ld->dev, name);
-+ dev_set_name(&new_ld->dev, "%s", name);
- dev_set_drvdata(&new_ld->dev, devdata);
-
- rc = device_register(&new_ld->dev);
-diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
-index e132157..516db70 100644
---- a/drivers/video/backlight/s6e63m0.c
-+++ b/drivers/video/backlight/s6e63m0.c
-@@ -690,7 +690,7 @@ static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
- struct backlight_device *bd = NULL;
- int brightness, rc;
-
-- rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode);
-+ rc = kstrtouint(buf, 0, &lcd->gamma_mode);
- if (rc < 0)
- return rc;
-
-diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
-index 6b4fb5c..385e560 100644
---- a/drivers/video/console/fbcon.c
-+++ b/drivers/video/console/fbcon.c
-@@ -450,7 +450,7 @@ static int __init fb_console_setup(char *this_opt)
-
- while ((options = strsep(&this_opt, ",")) != NULL) {
- if (!strncmp(options, "font:", 5))
-- strcpy(fontname, options + 5);
-+ strlcpy(fontname, options + 5, sizeof(fontname));
-
- if (!strncmp(options, "scrollback:", 11)) {
- options += 11;
-diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
-index 8a3d51f..f3e2e64 100644
---- a/drivers/video/fb_defio.c
-+++ b/drivers/video/fb_defio.c
-@@ -201,7 +201,9 @@ void fb_deferred_io_init(struct fb_info *info)
-
- BUG_ON(!fbdefio);
- mutex_init(&fbdefio->lock);
-- info->fbops->fb_mmap = fb_deferred_io_mmap;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
-+ pax_close_kernel();
- INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
- INIT_LIST_HEAD(&fbdefio->pagelist);
- if (fbdefio->delay == 0) /* set a default of 1 s */
-@@ -232,7 +234,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
- page->mapping = NULL;
- }
-
-- info->fbops->fb_mmap = NULL;
-+ *(void **)&info->fbops->fb_mmap = NULL;
- mutex_destroy(&fbdefio->lock);
- }
- EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
-diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
-index 5c3960d..15cf8fc 100644
---- a/drivers/video/fbcmap.c
-+++ b/drivers/video/fbcmap.c
-@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
- rc = -ENODEV;
- goto out;
- }
-- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
-- !info->fbops->fb_setcmap)) {
-+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
- rc = -EINVAL;
- goto out1;
- }
-diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
-index 0a22808..b3bdf50 100644
---- a/drivers/video/fbmem.c
-+++ b/drivers/video/fbmem.c
-@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
- image->dx += image->width + 8;
- }
- } else if (rotate == FB_ROTATE_UD) {
-- for (x = 0; x < num && image->dx >= 0; x++) {
-+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
- info->fbops->fb_imageblit(info, image);
- image->dx -= image->width + 8;
- }
-@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
- image->dy += image->height + 8;
- }
- } else if (rotate == FB_ROTATE_CCW) {
-- for (x = 0; x < num && image->dy >= 0; x++) {
-+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
- info->fbops->fb_imageblit(info, image);
- image->dy -= image->height + 8;
- }
-@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
- return -EFAULT;
- if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
- return -EINVAL;
-- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
-+ if (con2fb.framebuffer >= FB_MAX)
- return -EINVAL;
- if (!registered_fb[con2fb.framebuffer])
- request_module("fb%d", con2fb.framebuffer);
-@@ -1260,7 +1260,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
- __u32 data;
- int err;
-
-- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
-+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
-
- data = (__u32) (unsigned long) fix->smem_start;
- err |= put_user(data, &fix32->smem_start);
-diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
-index 5a5d092..265c5ed 100644
---- a/drivers/video/geode/gx1fb_core.c
-+++ b/drivers/video/geode/gx1fb_core.c
-@@ -29,7 +29,7 @@ static int crt_option = 1;
- static char panel_option[32] = "";
-
- /* Modes relevant to the GX1 (taken from modedb.c) */
--static const struct fb_videomode __devinitdata gx1_modedb[] = {
-+static const struct fb_videomode __devinitconst gx1_modedb[] = {
- /* 640x480-60 VESA */
- { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
- 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
-diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
-index 0fad23f..0e9afa4 100644
---- a/drivers/video/gxt4500.c
-+++ b/drivers/video/gxt4500.c
-@@ -156,7 +156,7 @@ struct gxt4500_par {
- static char *mode_option;
-
- /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
--static const struct fb_videomode defaultmode __devinitdata = {
-+static const struct fb_videomode defaultmode __devinitconst = {
- .refresh = 60,
- .xres = 1280,
- .yres = 1024,
-@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
- return 0;
- }
-
--static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
-+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
- .id = "IBM GXT4500P",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_PSEUDOCOLOR,
-diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
-index 7672d2e..b56437f 100644
---- a/drivers/video/i810/i810_accel.c
-+++ b/drivers/video/i810/i810_accel.c
-@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
- }
- }
- printk("ringbuffer lockup!!!\n");
-+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
- i810_report_error(mmio);
- par->dev_flags |= LOCKUP;
- info->pixmap.scan_align = 1;
-diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
-index 318f6fb..9a389c1 100644
---- a/drivers/video/i810/i810_main.c
-+++ b/drivers/video/i810/i810_main.c
-@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
- static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
-
- /* PCI */
--static const char *i810_pci_list[] __devinitdata = {
-+static const char *i810_pci_list[] __devinitconst = {
- "Intel(R) 810 Framebuffer Device" ,
- "Intel(R) 810-DC100 Framebuffer Device" ,
- "Intel(R) 810E Framebuffer Device" ,
-diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
-index de36693..3c63fc2 100644
---- a/drivers/video/jz4740_fb.c
-+++ b/drivers/video/jz4740_fb.c
-@@ -136,7 +136,7 @@ struct jzfb {
- uint32_t pseudo_palette[16];
- };
-
--static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
-+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
- .id = "JZ4740 FB",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
-diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
-index 3c14e43..eafa544 100644
---- a/drivers/video/logo/logo_linux_clut224.ppm
-+++ b/drivers/video/logo/logo_linux_clut224.ppm
-@@ -1,1604 +1,1123 @@
- P3
--# Standard 224-color Linux logo
- 80 80
- 255
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 6 6 6 10 10 10 10 10 10
-- 10 10 10 6 6 6 6 6 6 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 10 10 10 14 14 14
-- 22 22 22 26 26 26 30 30 30 34 34 34
-- 30 30 30 30 30 30 26 26 26 18 18 18
-- 14 14 14 10 10 10 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 14 14 14 26 26 26 42 42 42
-- 54 54 54 66 66 66 78 78 78 78 78 78
-- 78 78 78 74 74 74 66 66 66 54 54 54
-- 42 42 42 26 26 26 18 18 18 10 10 10
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 22 22 22 42 42 42 66 66 66 86 86 86
-- 66 66 66 38 38 38 38 38 38 22 22 22
-- 26 26 26 34 34 34 54 54 54 66 66 66
-- 86 86 86 70 70 70 46 46 46 26 26 26
-- 14 14 14 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 10 10 10 26 26 26
-- 50 50 50 82 82 82 58 58 58 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 6 6 6 54 54 54 86 86 86 66 66 66
-- 38 38 38 18 18 18 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 22 22 22 50 50 50
-- 78 78 78 34 34 34 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 6 6 6 70 70 70
-- 78 78 78 46 46 46 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 18 18 18 42 42 42 82 82 82
-- 26 26 26 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 14 14 14
-- 46 46 46 34 34 34 6 6 6 2 2 6
-- 42 42 42 78 78 78 42 42 42 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 0 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 10 10 10 30 30 30 66 66 66 58 58 58
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 26 26 26
-- 86 86 86 101 101 101 46 46 46 10 10 10
-- 2 2 6 58 58 58 70 70 70 34 34 34
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 14 14 14 42 42 42 86 86 86 10 10 10
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 30 30 30
-- 94 94 94 94 94 94 58 58 58 26 26 26
-- 2 2 6 6 6 6 78 78 78 54 54 54
-- 22 22 22 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 22 22 22 62 62 62 62 62 62 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 26 26 26
-- 54 54 54 38 38 38 18 18 18 10 10 10
-- 2 2 6 2 2 6 34 34 34 82 82 82
-- 38 38 38 14 14 14 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 30 30 30 78 78 78 30 30 30 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 10 10 10
-- 10 10 10 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 78 78 78
-- 50 50 50 18 18 18 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 38 38 38 86 86 86 14 14 14 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 54 54 54
-- 66 66 66 26 26 26 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 42 42 42 82 82 82 2 2 6 2 2 6
-- 2 2 6 6 6 6 10 10 10 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 6 6 6
-- 14 14 14 10 10 10 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 18 18 18
-- 82 82 82 34 34 34 10 10 10 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 46 46 46 86 86 86 2 2 6 2 2 6
-- 6 6 6 6 6 6 22 22 22 34 34 34
-- 6 6 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 18 18 18 34 34 34
-- 10 10 10 50 50 50 22 22 22 2 2 6
-- 2 2 6 2 2 6 2 2 6 10 10 10
-- 86 86 86 42 42 42 14 14 14 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 46 46 46 86 86 86 2 2 6 2 2 6
-- 38 38 38 116 116 116 94 94 94 22 22 22
-- 22 22 22 2 2 6 2 2 6 2 2 6
-- 14 14 14 86 86 86 138 138 138 162 162 162
--154 154 154 38 38 38 26 26 26 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 86 86 86 46 46 46 14 14 14 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 46 46 46 86 86 86 2 2 6 14 14 14
--134 134 134 198 198 198 195 195 195 116 116 116
-- 10 10 10 2 2 6 2 2 6 6 6 6
--101 98 89 187 187 187 210 210 210 218 218 218
--214 214 214 134 134 134 14 14 14 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 86 86 86 50 50 50 18 18 18 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 1 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 46 46 46 86 86 86 2 2 6 54 54 54
--218 218 218 195 195 195 226 226 226 246 246 246
-- 58 58 58 2 2 6 2 2 6 30 30 30
--210 210 210 253 253 253 174 174 174 123 123 123
--221 221 221 234 234 234 74 74 74 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 70 70 70 58 58 58 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 46 46 46 82 82 82 2 2 6 106 106 106
--170 170 170 26 26 26 86 86 86 226 226 226
--123 123 123 10 10 10 14 14 14 46 46 46
--231 231 231 190 190 190 6 6 6 70 70 70
-- 90 90 90 238 238 238 158 158 158 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 70 70 70 58 58 58 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 1 0 0 0
-- 0 0 1 0 0 1 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 42 42 42 86 86 86 6 6 6 116 116 116
--106 106 106 6 6 6 70 70 70 149 149 149
--128 128 128 18 18 18 38 38 38 54 54 54
--221 221 221 106 106 106 2 2 6 14 14 14
-- 46 46 46 190 190 190 198 198 198 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 74 74 74 62 62 62 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 1 0 0 0
-- 0 0 1 0 0 0 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 42 42 42 94 94 94 14 14 14 101 101 101
--128 128 128 2 2 6 18 18 18 116 116 116
--118 98 46 121 92 8 121 92 8 98 78 10
--162 162 162 106 106 106 2 2 6 2 2 6
-- 2 2 6 195 195 195 195 195 195 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 74 74 74 62 62 62 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 1 0 0 1
-- 0 0 1 0 0 0 0 0 1 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 38 38 38 90 90 90 14 14 14 58 58 58
--210 210 210 26 26 26 54 38 6 154 114 10
--226 170 11 236 186 11 225 175 15 184 144 12
--215 174 15 175 146 61 37 26 9 2 2 6
-- 70 70 70 246 246 246 138 138 138 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 70 70 70 66 66 66 26 26 26 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 38 38 38 86 86 86 14 14 14 10 10 10
--195 195 195 188 164 115 192 133 9 225 175 15
--239 182 13 234 190 10 232 195 16 232 200 30
--245 207 45 241 208 19 232 195 16 184 144 12
--218 194 134 211 206 186 42 42 42 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 50 50 50 74 74 74 30 30 30 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 34 34 34 86 86 86 14 14 14 2 2 6
--121 87 25 192 133 9 219 162 10 239 182 13
--236 186 11 232 195 16 241 208 19 244 214 54
--246 218 60 246 218 38 246 215 20 241 208 19
--241 208 19 226 184 13 121 87 25 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 50 50 50 82 82 82 34 34 34 10 10 10
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 34 34 34 82 82 82 30 30 30 61 42 6
--180 123 7 206 145 10 230 174 11 239 182 13
--234 190 10 238 202 15 241 208 19 246 218 74
--246 218 38 246 215 20 246 215 20 246 215 20
--226 184 13 215 174 15 184 144 12 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 26 26 26 94 94 94 42 42 42 14 14 14
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 78 78 78 50 50 50 104 69 6
--192 133 9 216 158 10 236 178 12 236 186 11
--232 195 16 241 208 19 244 214 54 245 215 43
--246 215 20 246 215 20 241 208 19 198 155 10
--200 144 11 216 158 10 156 118 10 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 6 6 6 90 90 90 54 54 54 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 78 78 78 46 46 46 22 22 22
--137 92 6 210 162 10 239 182 13 238 190 10
--238 202 15 241 208 19 246 215 20 246 215 20
--241 208 19 203 166 17 185 133 11 210 150 10
--216 158 10 210 150 10 102 78 10 2 2 6
-- 6 6 6 54 54 54 14 14 14 2 2 6
-- 2 2 6 62 62 62 74 74 74 30 30 30
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 34 34 34 78 78 78 50 50 50 6 6 6
-- 94 70 30 139 102 15 190 146 13 226 184 13
--232 200 30 232 195 16 215 174 15 190 146 13
--168 122 10 192 133 9 210 150 10 213 154 11
--202 150 34 182 157 106 101 98 89 2 2 6
-- 2 2 6 78 78 78 116 116 116 58 58 58
-- 2 2 6 22 22 22 90 90 90 46 46 46
-- 18 18 18 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 38 38 38 86 86 86 50 50 50 6 6 6
--128 128 128 174 154 114 156 107 11 168 122 10
--198 155 10 184 144 12 197 138 11 200 144 11
--206 145 10 206 145 10 197 138 11 188 164 115
--195 195 195 198 198 198 174 174 174 14 14 14
-- 2 2 6 22 22 22 116 116 116 116 116 116
-- 22 22 22 2 2 6 74 74 74 70 70 70
-- 30 30 30 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 50 50 50 101 101 101 26 26 26 10 10 10
--138 138 138 190 190 190 174 154 114 156 107 11
--197 138 11 200 144 11 197 138 11 192 133 9
--180 123 7 190 142 34 190 178 144 187 187 187
--202 202 202 221 221 221 214 214 214 66 66 66
-- 2 2 6 2 2 6 50 50 50 62 62 62
-- 6 6 6 2 2 6 10 10 10 90 90 90
-- 50 50 50 18 18 18 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 10 10 10 34 34 34
-- 74 74 74 74 74 74 2 2 6 6 6 6
--144 144 144 198 198 198 190 190 190 178 166 146
--154 121 60 156 107 11 156 107 11 168 124 44
--174 154 114 187 187 187 190 190 190 210 210 210
--246 246 246 253 253 253 253 253 253 182 182 182
-- 6 6 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 62 62 62
-- 74 74 74 34 34 34 14 14 14 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 10 10 10 22 22 22 54 54 54
-- 94 94 94 18 18 18 2 2 6 46 46 46
--234 234 234 221 221 221 190 190 190 190 190 190
--190 190 190 187 187 187 187 187 187 190 190 190
--190 190 190 195 195 195 214 214 214 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
-- 82 82 82 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 14 14 14
-- 86 86 86 54 54 54 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 18 18 18 46 46 46 90 90 90
-- 46 46 46 18 18 18 6 6 6 182 182 182
--253 253 253 246 246 246 206 206 206 190 190 190
--190 190 190 190 190 190 190 190 190 190 190 190
--206 206 206 231 231 231 250 250 250 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--202 202 202 14 14 14 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 42 42 42 86 86 86 42 42 42 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 14 14 14 38 38 38 74 74 74 66 66 66
-- 2 2 6 6 6 6 90 90 90 250 250 250
--253 253 253 253 253 253 238 238 238 198 198 198
--190 190 190 190 190 190 195 195 195 221 221 221
--246 246 246 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 82 82 82 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 78 78 78 70 70 70 34 34 34
-- 14 14 14 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 34 34 34 66 66 66 78 78 78 6 6 6
-- 2 2 6 18 18 18 218 218 218 253 253 253
--253 253 253 253 253 253 253 253 253 246 246 246
--226 226 226 231 231 231 246 246 246 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 178 178 178 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 18 18 18 90 90 90 62 62 62
-- 30 30 30 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 10 10 10 26 26 26
-- 58 58 58 90 90 90 18 18 18 2 2 6
-- 2 2 6 110 110 110 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--250 250 250 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 231 231 231 18 18 18 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 18 18 18 94 94 94
-- 54 54 54 26 26 26 10 10 10 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 22 22 22 50 50 50
-- 90 90 90 26 26 26 2 2 6 2 2 6
-- 14 14 14 195 195 195 250 250 250 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--250 250 250 242 242 242 54 54 54 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 38 38 38
-- 86 86 86 50 50 50 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 14 14 14 38 38 38 82 82 82
-- 34 34 34 2 2 6 2 2 6 2 2 6
-- 42 42 42 195 195 195 246 246 246 253 253 253
--253 253 253 253 253 253 253 253 253 250 250 250
--242 242 242 242 242 242 250 250 250 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 250 250 250 246 246 246 238 238 238
--226 226 226 231 231 231 101 101 101 6 6 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 38 38 38 82 82 82 42 42 42 14 14 14
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 10 10 10 26 26 26 62 62 62 66 66 66
-- 2 2 6 2 2 6 2 2 6 6 6 6
-- 70 70 70 170 170 170 206 206 206 234 234 234
--246 246 246 250 250 250 250 250 250 238 238 238
--226 226 226 231 231 231 238 238 238 250 250 250
--250 250 250 250 250 250 246 246 246 231 231 231
--214 214 214 206 206 206 202 202 202 202 202 202
--198 198 198 202 202 202 182 182 182 18 18 18
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 62 62 62 66 66 66 30 30 30
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 14 14 14 42 42 42 82 82 82 18 18 18
-- 2 2 6 2 2 6 2 2 6 10 10 10
-- 94 94 94 182 182 182 218 218 218 242 242 242
--250 250 250 253 253 253 253 253 253 250 250 250
--234 234 234 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 246 246 246
--238 238 238 226 226 226 210 210 210 202 202 202
--195 195 195 195 195 195 210 210 210 158 158 158
-- 6 6 6 14 14 14 50 50 50 14 14 14
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 6 6 6 86 86 86 46 46 46
-- 18 18 18 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 22 22 22 54 54 54 70 70 70 2 2 6
-- 2 2 6 10 10 10 2 2 6 22 22 22
--166 166 166 231 231 231 250 250 250 253 253 253
--253 253 253 253 253 253 253 253 253 250 250 250
--242 242 242 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 246 246 246
--231 231 231 206 206 206 198 198 198 226 226 226
-- 94 94 94 2 2 6 6 6 6 38 38 38
-- 30 30 30 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 62 62 62 66 66 66
-- 26 26 26 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 74 74 74 50 50 50 2 2 6
-- 26 26 26 26 26 26 2 2 6 106 106 106
--238 238 238 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 246 246 246 218 218 218 202 202 202
--210 210 210 14 14 14 2 2 6 2 2 6
-- 30 30 30 22 22 22 2 2 6 2 2 6
-- 2 2 6 2 2 6 18 18 18 86 86 86
-- 42 42 42 14 14 14 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 42 42 42 90 90 90 22 22 22 2 2 6
-- 42 42 42 2 2 6 18 18 18 218 218 218
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 250 250 250 221 221 221
--218 218 218 101 101 101 2 2 6 14 14 14
-- 18 18 18 38 38 38 10 10 10 2 2 6
-- 2 2 6 2 2 6 2 2 6 78 78 78
-- 58 58 58 22 22 22 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 54 54 54 82 82 82 2 2 6 26 26 26
-- 22 22 22 2 2 6 123 123 123 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 250 250 250
--238 238 238 198 198 198 6 6 6 38 38 38
-- 58 58 58 26 26 26 38 38 38 2 2 6
-- 2 2 6 2 2 6 2 2 6 46 46 46
-- 78 78 78 30 30 30 10 10 10 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 10 10 10 30 30 30
-- 74 74 74 58 58 58 2 2 6 42 42 42
-- 2 2 6 22 22 22 231 231 231 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 250 250 250
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 246 246 246 46 46 46 38 38 38
-- 42 42 42 14 14 14 38 38 38 14 14 14
-- 2 2 6 2 2 6 2 2 6 6 6 6
-- 86 86 86 46 46 46 14 14 14 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 14 14 14 42 42 42
-- 90 90 90 18 18 18 18 18 18 26 26 26
-- 2 2 6 116 116 116 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 250 250 250 238 238 238
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 94 94 94 6 6 6
-- 2 2 6 2 2 6 10 10 10 34 34 34
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 74 74 74 58 58 58 22 22 22 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 10 10 10 26 26 26 66 66 66
-- 82 82 82 2 2 6 38 38 38 6 6 6
-- 14 14 14 210 210 210 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 246 246 246 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 144 144 144 2 2 6
-- 2 2 6 2 2 6 2 2 6 46 46 46
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 42 42 42 74 74 74 30 30 30 10 10 10
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 14 14 14 42 42 42 90 90 90
-- 26 26 26 6 6 6 42 42 42 2 2 6
-- 74 74 74 250 250 250 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 242 242 242 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 182 182 182 2 2 6
-- 2 2 6 2 2 6 2 2 6 46 46 46
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 10 10 10 86 86 86 38 38 38 10 10 10
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 10 10 10 26 26 26 66 66 66 82 82 82
-- 2 2 6 22 22 22 18 18 18 2 2 6
--149 149 149 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 234 234 234 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 206 206 206 2 2 6
-- 2 2 6 2 2 6 2 2 6 38 38 38
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 6 6 6 86 86 86 46 46 46 14 14 14
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 18 18 18 46 46 46 86 86 86 18 18 18
-- 2 2 6 34 34 34 10 10 10 6 6 6
--210 210 210 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 234 234 234 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 221 221 221 6 6 6
-- 2 2 6 2 2 6 6 6 6 30 30 30
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 82 82 82 54 54 54 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 26 26 26 66 66 66 62 62 62 2 2 6
-- 2 2 6 38 38 38 10 10 10 26 26 26
--238 238 238 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 238 238 238
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 6 6 6
-- 2 2 6 2 2 6 10 10 10 30 30 30
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 66 66 66 58 58 58 22 22 22
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 38 38 38 78 78 78 6 6 6 2 2 6
-- 2 2 6 46 46 46 14 14 14 42 42 42
--246 246 246 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 234 234 234 10 10 10
-- 2 2 6 2 2 6 22 22 22 14 14 14
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 66 66 66 62 62 62 22 22 22
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 50 50 50 74 74 74 2 2 6 2 2 6
-- 14 14 14 70 70 70 34 34 34 62 62 62
--250 250 250 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 246 246 246
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 234 234 234 14 14 14
-- 2 2 6 2 2 6 30 30 30 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 66 66 66 62 62 62 22 22 22
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 54 54 54 62 62 62 2 2 6 2 2 6
-- 2 2 6 30 30 30 46 46 46 70 70 70
--250 250 250 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 246 246 246
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 226 226 226 10 10 10
-- 2 2 6 6 6 6 30 30 30 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 66 66 66 58 58 58 22 22 22
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 22 22 22
-- 58 58 58 62 62 62 2 2 6 2 2 6
-- 2 2 6 2 2 6 30 30 30 78 78 78
--250 250 250 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 246 246 246
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 206 206 206 2 2 6
-- 22 22 22 34 34 34 18 14 6 22 22 22
-- 26 26 26 18 18 18 6 6 6 2 2 6
-- 2 2 6 82 82 82 54 54 54 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 26 26 26
-- 62 62 62 106 106 106 74 54 14 185 133 11
--210 162 10 121 92 8 6 6 6 62 62 62
--238 238 238 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 246 246 246
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 158 158 158 18 18 18
-- 14 14 14 2 2 6 2 2 6 2 2 6
-- 6 6 6 18 18 18 66 66 66 38 38 38
-- 6 6 6 94 94 94 50 50 50 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 10 10 10 10 10 10 18 18 18 38 38 38
-- 78 78 78 142 134 106 216 158 10 242 186 14
--246 190 14 246 190 14 156 118 10 10 10 10
-- 90 90 90 238 238 238 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 250 250 250
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 246 230 190
--238 204 91 238 204 91 181 142 44 37 26 9
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 38 38 38 46 46 46
-- 26 26 26 106 106 106 54 54 54 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 14 14 14 22 22 22
-- 30 30 30 38 38 38 50 50 50 70 70 70
--106 106 106 190 142 34 226 170 11 242 186 14
--246 190 14 246 190 14 246 190 14 154 114 10
-- 6 6 6 74 74 74 226 226 226 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 231 231 231 250 250 250
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 228 184 62
--241 196 14 241 208 19 232 195 16 38 30 10
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 6 6 6 30 30 30 26 26 26
--203 166 17 154 142 90 66 66 66 26 26 26
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 18 18 18 38 38 38 58 58 58
-- 78 78 78 86 86 86 101 101 101 123 123 123
--175 146 61 210 150 10 234 174 13 246 186 14
--246 190 14 246 190 14 246 190 14 238 190 10
--102 78 10 2 2 6 46 46 46 198 198 198
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 234 234 234 242 242 242
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 224 178 62
--242 186 14 241 196 14 210 166 10 22 18 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 6 6 6 121 92 8
--238 202 15 232 195 16 82 82 82 34 34 34
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 14 14 14 38 38 38 70 70 70 154 122 46
--190 142 34 200 144 11 197 138 11 197 138 11
--213 154 11 226 170 11 242 186 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--225 175 15 46 32 6 2 2 6 22 22 22
--158 158 158 250 250 250 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 250 250 250 242 242 242 224 178 62
--239 182 13 236 186 11 213 154 11 46 32 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 61 42 6 225 175 15
--238 190 10 236 186 11 112 100 78 42 42 42
-- 14 14 14 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 22 22 22 54 54 54 154 122 46 213 154 11
--226 170 11 230 174 11 226 170 11 226 170 11
--236 178 12 242 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--241 196 14 184 144 12 10 10 10 2 2 6
-- 6 6 6 116 116 116 242 242 242 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 231 231 231 198 198 198 214 170 54
--236 178 12 236 178 12 210 150 10 137 92 6
-- 18 14 6 2 2 6 2 2 6 2 2 6
-- 6 6 6 70 47 6 200 144 11 236 178 12
--239 182 13 239 182 13 124 112 88 58 58 58
-- 22 22 22 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 70 70 70 180 133 36 226 170 11
--239 182 13 242 186 14 242 186 14 246 186 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 232 195 16 98 70 6 2 2 6
-- 2 2 6 2 2 6 66 66 66 221 221 221
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 206 206 206 198 198 198 214 166 58
--230 174 11 230 174 11 216 158 10 192 133 9
--163 110 8 116 81 8 102 78 10 116 81 8
--167 114 7 197 138 11 226 170 11 239 182 13
--242 186 14 242 186 14 162 146 94 78 78 78
-- 34 34 34 14 14 14 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 30 30 30 78 78 78 190 142 34 226 170 11
--239 182 13 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 241 196 14 203 166 17 22 18 6
-- 2 2 6 2 2 6 2 2 6 38 38 38
--218 218 218 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--250 250 250 206 206 206 198 198 198 202 162 69
--226 170 11 236 178 12 224 166 10 210 150 10
--200 144 11 197 138 11 192 133 9 197 138 11
--210 150 10 226 170 11 242 186 14 246 190 14
--246 190 14 246 186 14 225 175 15 124 112 88
-- 62 62 62 30 30 30 14 14 14 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 78 78 78 174 135 50 224 166 10
--239 182 13 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 241 196 14 139 102 15
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 78 78 78 250 250 250 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--250 250 250 214 214 214 198 198 198 190 150 46
--219 162 10 236 178 12 234 174 13 224 166 10
--216 158 10 213 154 11 213 154 11 216 158 10
--226 170 11 239 182 13 246 190 14 246 190 14
--246 190 14 246 190 14 242 186 14 206 162 42
--101 101 101 58 58 58 30 30 30 14 14 14
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 74 74 74 174 135 50 216 158 10
--236 178 12 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 241 196 14 226 184 13
-- 61 42 6 2 2 6 2 2 6 2 2 6
-- 22 22 22 238 238 238 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 226 226 226 187 187 187 180 133 36
--216 158 10 236 178 12 239 182 13 236 178 12
--230 174 11 226 170 11 226 170 11 230 174 11
--236 178 12 242 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 186 14 239 182 13
--206 162 42 106 106 106 66 66 66 34 34 34
-- 14 14 14 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 26 26 26 70 70 70 163 133 67 213 154 11
--236 178 12 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 241 196 14
--190 146 13 18 14 6 2 2 6 2 2 6
-- 46 46 46 246 246 246 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 221 221 221 86 86 86 156 107 11
--216 158 10 236 178 12 242 186 14 246 186 14
--242 186 14 239 182 13 239 182 13 242 186 14
--242 186 14 246 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--242 186 14 225 175 15 142 122 72 66 66 66
-- 30 30 30 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 26 26 26 70 70 70 163 133 67 210 150 10
--236 178 12 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--232 195 16 121 92 8 34 34 34 106 106 106
--221 221 221 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--242 242 242 82 82 82 18 14 6 163 110 8
--216 158 10 236 178 12 242 186 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 242 186 14 163 133 67
-- 46 46 46 18 18 18 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 10 10 10
-- 30 30 30 78 78 78 163 133 67 210 150 10
--236 178 12 246 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--241 196 14 215 174 15 190 178 144 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 218 218 218
-- 58 58 58 2 2 6 22 18 6 167 114 7
--216 158 10 236 178 12 246 186 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 186 14 242 186 14 190 150 46
-- 54 54 54 22 22 22 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 38 38 38 86 86 86 180 133 36 213 154 11
--236 178 12 246 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 232 195 16 190 146 13 214 214 214
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 250 250 250 170 170 170 26 26 26
-- 2 2 6 2 2 6 37 26 9 163 110 8
--219 162 10 239 182 13 246 186 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 186 14 236 178 12 224 166 10 142 122 72
-- 46 46 46 18 18 18 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 50 50 50 109 106 95 192 133 9 224 166 10
--242 186 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--242 186 14 226 184 13 210 162 10 142 110 46
--226 226 226 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--253 253 253 253 253 253 253 253 253 253 253 253
--198 198 198 66 66 66 2 2 6 2 2 6
-- 2 2 6 2 2 6 50 34 6 156 107 11
--219 162 10 239 182 13 246 186 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 242 186 14
--234 174 13 213 154 11 154 122 46 66 66 66
-- 30 30 30 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 22 22 22
-- 58 58 58 154 121 60 206 145 10 234 174 13
--242 186 14 246 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 186 14 236 178 12 210 162 10 163 110 8
-- 61 42 6 138 138 138 218 218 218 250 250 250
--253 253 253 253 253 253 253 253 253 250 250 250
--242 242 242 210 210 210 144 144 144 66 66 66
-- 6 6 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 61 42 6 163 110 8
--216 158 10 236 178 12 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 239 182 13 230 174 11 216 158 10
--190 142 34 124 112 88 70 70 70 38 38 38
-- 18 18 18 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 22 22 22
-- 62 62 62 168 124 44 206 145 10 224 166 10
--236 178 12 239 182 13 242 186 14 242 186 14
--246 186 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 236 178 12 216 158 10 175 118 6
-- 80 54 7 2 2 6 6 6 6 30 30 30
-- 54 54 54 62 62 62 50 50 50 38 38 38
-- 14 14 14 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 6 6 6 80 54 7 167 114 7
--213 154 11 236 178 12 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 190 14 242 186 14 239 182 13 239 182 13
--230 174 11 210 150 10 174 135 50 124 112 88
-- 82 82 82 54 54 54 34 34 34 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 18 18 18
-- 50 50 50 158 118 36 192 133 9 200 144 11
--216 158 10 219 162 10 224 166 10 226 170 11
--230 174 11 236 178 12 239 182 13 239 182 13
--242 186 14 246 186 14 246 190 14 246 190 14
--246 190 14 246 190 14 246 190 14 246 190 14
--246 186 14 230 174 11 210 150 10 163 110 8
--104 69 6 10 10 10 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 6 6 6 91 60 6 167 114 7
--206 145 10 230 174 11 242 186 14 246 190 14
--246 190 14 246 190 14 246 186 14 242 186 14
--239 182 13 230 174 11 224 166 10 213 154 11
--180 133 36 124 112 88 86 86 86 58 58 58
-- 38 38 38 22 22 22 10 10 10 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 14 14 14
-- 34 34 34 70 70 70 138 110 50 158 118 36
--167 114 7 180 123 7 192 133 9 197 138 11
--200 144 11 206 145 10 213 154 11 219 162 10
--224 166 10 230 174 11 239 182 13 242 186 14
--246 186 14 246 186 14 246 186 14 246 186 14
--239 182 13 216 158 10 185 133 11 152 99 6
--104 69 6 18 14 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 2 2 6 2 2 6 2 2 6
-- 2 2 6 6 6 6 80 54 7 152 99 6
--192 133 9 219 162 10 236 178 12 239 182 13
--246 186 14 242 186 14 239 182 13 236 178 12
--224 166 10 206 145 10 192 133 9 154 121 60
-- 94 94 94 62 62 62 42 42 42 22 22 22
-- 14 14 14 6 6 6 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 18 18 18 34 34 34 58 58 58 78 78 78
--101 98 89 124 112 88 142 110 46 156 107 11
--163 110 8 167 114 7 175 118 6 180 123 7
--185 133 11 197 138 11 210 150 10 219 162 10
--226 170 11 236 178 12 236 178 12 234 174 13
--219 162 10 197 138 11 163 110 8 130 83 6
-- 91 60 6 10 10 10 2 2 6 2 2 6
-- 18 18 18 38 38 38 38 38 38 38 38 38
-- 38 38 38 38 38 38 38 38 38 38 38 38
-- 38 38 38 38 38 38 26 26 26 2 2 6
-- 2 2 6 6 6 6 70 47 6 137 92 6
--175 118 6 200 144 11 219 162 10 230 174 11
--234 174 13 230 174 11 219 162 10 210 150 10
--192 133 9 163 110 8 124 112 88 82 82 82
-- 50 50 50 30 30 30 14 14 14 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 14 14 14 22 22 22 34 34 34
-- 42 42 42 58 58 58 74 74 74 86 86 86
--101 98 89 122 102 70 130 98 46 121 87 25
--137 92 6 152 99 6 163 110 8 180 123 7
--185 133 11 197 138 11 206 145 10 200 144 11
--180 123 7 156 107 11 130 83 6 104 69 6
-- 50 34 6 54 54 54 110 110 110 101 98 89
-- 86 86 86 82 82 82 78 78 78 78 78 78
-- 78 78 78 78 78 78 78 78 78 78 78 78
-- 78 78 78 82 82 82 86 86 86 94 94 94
--106 106 106 101 101 101 86 66 34 124 80 6
--156 107 11 180 123 7 192 133 9 200 144 11
--206 145 10 200 144 11 192 133 9 175 118 6
--139 102 15 109 106 95 70 70 70 42 42 42
-- 22 22 22 10 10 10 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 6 6 6 10 10 10
-- 14 14 14 22 22 22 30 30 30 38 38 38
-- 50 50 50 62 62 62 74 74 74 90 90 90
--101 98 89 112 100 78 121 87 25 124 80 6
--137 92 6 152 99 6 152 99 6 152 99 6
--138 86 6 124 80 6 98 70 6 86 66 30
--101 98 89 82 82 82 58 58 58 46 46 46
-- 38 38 38 34 34 34 34 34 34 34 34 34
-- 34 34 34 34 34 34 34 34 34 34 34 34
-- 34 34 34 34 34 34 38 38 38 42 42 42
-- 54 54 54 82 82 82 94 86 76 91 60 6
--134 86 6 156 107 11 167 114 7 175 118 6
--175 118 6 167 114 7 152 99 6 121 87 25
--101 98 89 62 62 62 34 34 34 18 18 18
-- 6 6 6 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 6 6 6 10 10 10
-- 18 18 18 22 22 22 30 30 30 42 42 42
-- 50 50 50 66 66 66 86 86 86 101 98 89
--106 86 58 98 70 6 104 69 6 104 69 6
--104 69 6 91 60 6 82 62 34 90 90 90
-- 62 62 62 38 38 38 22 22 22 14 14 14
-- 10 10 10 10 10 10 10 10 10 10 10 10
-- 10 10 10 10 10 10 6 6 6 10 10 10
-- 10 10 10 10 10 10 10 10 10 14 14 14
-- 22 22 22 42 42 42 70 70 70 89 81 66
-- 80 54 7 104 69 6 124 80 6 137 92 6
--134 86 6 116 81 8 100 82 52 86 86 86
-- 58 58 58 30 30 30 14 14 14 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 10 10 10 14 14 14
-- 18 18 18 26 26 26 38 38 38 54 54 54
-- 70 70 70 86 86 86 94 86 76 89 81 66
-- 89 81 66 86 86 86 74 74 74 50 50 50
-- 30 30 30 14 14 14 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 18 18 18 34 34 34 58 58 58
-- 82 82 82 89 81 66 89 81 66 89 81 66
-- 94 86 66 94 86 76 74 74 74 50 50 50
-- 26 26 26 14 14 14 6 6 6 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 6 6 6 6 6 6 14 14 14 18 18 18
-- 30 30 30 38 38 38 46 46 46 54 54 54
-- 50 50 50 42 42 42 30 30 30 18 18 18
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 6 6 6 14 14 14 26 26 26
-- 38 38 38 50 50 50 58 58 58 58 58 58
-- 54 54 54 42 42 42 30 30 30 18 18 18
-- 10 10 10 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 6 6 6 10 10 10 14 14 14 18 18 18
-- 18 18 18 14 14 14 10 10 10 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 6 6 6
-- 14 14 14 18 18 18 22 22 22 22 22 22
-- 18 18 18 14 14 14 10 10 10 6 6 6
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-- 0 0 0 0 0 0 0 0 0 0 0 0
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
-+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
-+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
-+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
-+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
-+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
-+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
-+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
-+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
-+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
-+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
-+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
-+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
-+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
-+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
-+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
-+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
-+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
-+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
-+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
-+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
-+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
-+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
-+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
-+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
-+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
-+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
-+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
-+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
-+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
-+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
-+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
-+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
-+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
-+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
-+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
-+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
-+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
-+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
-+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
-+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
-+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
-+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
-+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
-+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
-+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
-+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
-+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
-+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
-+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
-+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
-+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
-+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
-+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
-+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
-+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
-+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
-+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
-+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
-+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
-+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
-+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
-+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
-+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
-+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
-+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
-+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
-+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
-+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
-+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
-+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
-+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
-+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
-+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
-+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
-+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
-+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
-+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
-+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
-+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
-+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
-+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
-+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
-+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
-+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
-+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
-+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
-+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
-+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
-+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
-+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
-+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
-+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
-+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
-+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
-+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
-+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
-+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
-+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
-+0 0 0 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
-+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
-+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
-+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
-+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
-+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
-+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
-+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
-+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
-+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
-+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
-+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
-+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
-+2 0 0 0 0 0
-+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
-+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
-+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
-+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
-+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
-+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
-+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
-+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
-+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
-+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
-+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
-+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
-+37 38 37 0 0 0
-+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
-+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
-+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
-+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
-+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
-+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
-+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
-+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
-+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
-+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
-+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
-+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
-+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
-+85 115 134 4 0 0
-+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
-+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
-+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
-+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
-+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
-+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
-+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
-+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
-+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
-+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
-+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
-+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
-+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
-+60 73 81 4 0 0
-+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
-+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
-+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
-+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
-+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
-+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
-+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
-+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
-+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
-+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
-+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
-+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
-+16 19 21 4 0 0
-+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
-+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
-+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
-+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
-+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
-+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
-+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
-+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
-+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
-+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
-+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
-+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
-+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
-+4 0 0 4 3 3
-+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
-+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
-+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
-+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
-+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
-+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
-+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
-+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
-+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
-+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
-+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
-+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
-+3 2 2 4 4 4
-+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
-+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
-+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
-+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
-+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
-+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
-+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
-+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
-+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
-+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
-+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
-+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
-+4 4 4 4 4 4
-+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
-+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
-+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
-+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
-+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
-+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
-+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
-+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
-+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
-+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
-+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
-+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
-+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
-+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
-+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
-+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
-+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
-+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
-+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
-+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
-+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
-+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
-+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
-+5 5 5 5 5 5
-+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
-+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
-+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
-+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
-+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
-+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
-+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
-+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
-+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
-+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
-+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
-+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
-+5 5 5 4 4 4
-+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
-+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
-+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
-+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
-+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
-+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
-+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
-+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
-+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
-+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
-+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
-+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
-+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
-+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
-+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
-+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
-+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
-+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
-+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
-+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
-+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
-+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
-+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
-+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
-+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
-+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
-+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
-+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
-+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
-+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
-+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
-+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
-+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
-+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
-+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
-+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
-+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
-+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
-+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
-+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
-+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
-+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
-+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
-+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
-+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
-+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
-+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
-+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
-+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
-+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
-+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
-+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
-+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
-+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
-+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
-+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
-+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
-+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
-+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
-+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
-+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
-+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
-+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
-+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
-+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
-+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
-+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
-+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
-+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
-+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
-+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
-+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
-+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
-+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
-+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
-+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
-+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
-+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
-+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
-+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
-+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
-+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
-+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
-+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
-+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
-+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
-+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
-+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
-+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
-+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
-+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
-+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
-+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
-+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
-+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
-+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
-+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
-+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
-+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
-+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
-+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
-+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
-+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
-+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
-+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
-+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
-+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
-+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
-+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
-+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
-+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
-+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
-+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
-+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
-+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
-+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
-+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
-+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
-+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
-+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
-+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
-+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
-+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
-+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
-+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
-+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
-+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
-+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
-+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
-+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
-+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
-+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
-+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
-+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
-+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
-+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
-+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
-+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
-+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
-+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
-+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
-+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
-+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
-+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
-+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
-+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
-+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
-+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
-+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
-+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
-+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
-+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
-+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
-+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
-+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
-+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
-+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
-+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
-+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
-+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
-+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
-+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
-+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
-+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
-+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
-+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
-+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
-+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
-+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
-+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
-+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
-+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
-+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
-+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
-+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
-+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
-+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
-+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
-+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
-+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
-+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
-+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
-+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
-+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
-+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
-+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
-+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
-+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
-+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
-+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
-+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
-+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
-+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
-+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
-+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
-+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
-+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
-+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
-+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
-+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
-+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
-+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
-+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
-+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
-+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
-+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
-+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
-+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
-+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
-+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
-+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
-+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
-+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
-+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
-+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
-+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
-+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
-+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
-+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
-+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
-+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
-+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
-+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
-+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
-+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
-+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
-+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
-+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
-+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
-+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
-+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
-+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
-+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
-+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
-+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
-+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
-+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
-+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
-+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
-+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
-+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
-+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
-+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
-+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
-+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
-+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
-+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
-+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
-+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
-+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
-+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
-+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
-+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
-+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
-+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
-+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
-+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
-+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
-+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
-+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
-+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
-+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
-+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
-+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
-+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
-+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
-+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
-+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
-+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
-+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
-+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
-+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
-+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
-+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
-+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
-+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
-+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
-+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
-+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
-+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
-+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
-+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
-+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
-+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
-+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
-+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
-+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
-+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
-+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
-+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
-+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
-+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
-+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
-+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
-+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
-+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
-+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
-+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
-+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
-+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
-+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
-+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
-+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
-+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
-+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
-+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
-+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
-+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
-+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
-+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
-+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
-+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
-+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
-+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
-+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
-+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
-+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
-+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
-+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
-+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
-+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
-+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
-+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
-+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
-+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
-+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
-+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
-+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
-+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
-+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
-+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
-+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
-+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
-+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
-+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
-+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
-+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
-+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
-+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
-+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
-+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
-+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
-+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
-+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
-+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
-+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
-+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
-+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
-+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
-+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
-+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
-+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
-+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
-+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
-+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
-+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
-+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
-+4 4 4 4 4 4
-diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
-index 1717623..25907782 100644
---- a/drivers/video/matrox/matroxfb_DAC1064.c
-+++ b/drivers/video/matrox/matroxfb_DAC1064.c
-@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
-
- #ifdef CONFIG_FB_MATROX_MYSTIQUE
- struct matrox_switch matrox_mystique = {
-- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
-+ .preinit = MGA1064_preinit,
-+ .reset = MGA1064_reset,
-+ .init = MGA1064_init,
-+ .restore = MGA1064_restore,
- };
- EXPORT_SYMBOL(matrox_mystique);
- #endif
-
- #ifdef CONFIG_FB_MATROX_G
- struct matrox_switch matrox_G100 = {
-- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
-+ .preinit = MGAG100_preinit,
-+ .reset = MGAG100_reset,
-+ .init = MGAG100_init,
-+ .restore = MGAG100_restore,
- };
- EXPORT_SYMBOL(matrox_G100);
- #endif
-diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
-index 9a44cec..07e3b43 100644
---- a/drivers/video/matrox/matroxfb_Ti3026.c
-+++ b/drivers/video/matrox/matroxfb_Ti3026.c
-@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
- }
-
- struct matrox_switch matrox_millennium = {
-- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
-+ .preinit = Ti3026_preinit,
-+ .reset = Ti3026_reset,
-+ .init = Ti3026_init,
-+ .restore = Ti3026_restore
- };
- EXPORT_SYMBOL(matrox_millennium);
- #endif
-diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
-index fe92eed..106e085 100644
---- a/drivers/video/mb862xx/mb862xxfb_accel.c
-+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
-@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
- struct mb862xxfb_par *par = info->par;
-
- if (info->var.bits_per_pixel == 32) {
-- info->fbops->fb_fillrect = cfb_fillrect;
-- info->fbops->fb_copyarea = cfb_copyarea;
-- info->fbops->fb_imageblit = cfb_imageblit;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
-+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
-+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
-+ pax_close_kernel();
- } else {
- outreg(disp, GC_L0EM, 3);
-- info->fbops->fb_fillrect = mb86290fb_fillrect;
-- info->fbops->fb_copyarea = mb86290fb_copyarea;
-- info->fbops->fb_imageblit = mb86290fb_imageblit;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
-+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
-+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
-+ pax_close_kernel();
- }
- outreg(draw, GDC_REG_DRAW_BASE, 0);
- outreg(draw, GDC_REG_MODE_MISC, 0x8000);
-diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
-index 081dc47..6e20d0b 100644
---- a/drivers/video/nvidia/nvidia.c
-+++ b/drivers/video/nvidia/nvidia.c
-@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
- info->fix.line_length = (info->var.xres_virtual *
- info->var.bits_per_pixel) >> 3;
- if (info->var.accel_flags) {
-- info->fbops->fb_imageblit = nvidiafb_imageblit;
-- info->fbops->fb_fillrect = nvidiafb_fillrect;
-- info->fbops->fb_copyarea = nvidiafb_copyarea;
-- info->fbops->fb_sync = nvidiafb_sync;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
-+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
-+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
-+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
-+ pax_close_kernel();
- info->pixmap.scan_align = 4;
- info->flags &= ~FBINFO_HWACCEL_DISABLED;
- info->flags |= FBINFO_READS_FAST;
- NVResetGraphics(info);
- } else {
-- info->fbops->fb_imageblit = cfb_imageblit;
-- info->fbops->fb_fillrect = cfb_fillrect;
-- info->fbops->fb_copyarea = cfb_copyarea;
-- info->fbops->fb_sync = NULL;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
-+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
-+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
-+ *(void **)&info->fbops->fb_sync = NULL;
-+ pax_close_kernel();
- info->pixmap.scan_align = 1;
- info->flags |= FBINFO_HWACCEL_DISABLED;
- info->flags &= ~FBINFO_READS_FAST;
-@@ -1173,8 +1177,11 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
- info->pixmap.size = 8 * 1024;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
-- if (!hwcur)
-- info->fbops->fb_cursor = NULL;
-+ if (!hwcur) {
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_cursor = NULL;
-+ pax_close_kernel();
-+ }
-
- info->var.accel_flags = (!noaccel);
-
-diff --git a/drivers/video/output.c b/drivers/video/output.c
-index 0d6f2cd..6285b97 100644
---- a/drivers/video/output.c
-+++ b/drivers/video/output.c
-@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
- new_dev->props = op;
- new_dev->dev.class = &video_output_class;
- new_dev->dev.parent = dev;
-- dev_set_name(&new_dev->dev, name);
-+ dev_set_name(&new_dev->dev, "%s", name);
- dev_set_drvdata(&new_dev->dev, devdata);
- ret_code = device_register(&new_dev->dev);
- if (ret_code) {
-diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
-index 28b1c6c..b9939d9 100644
---- a/drivers/video/s1d13xxxfb.c
-+++ b/drivers/video/s1d13xxxfb.c
-@@ -883,8 +883,10 @@ s1d13xxxfb_probe(struct platform_device *pdev)
-
- switch(prod_id) {
- case S1D13506_PROD_ID: /* activate acceleration */
-- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
-- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
-+ pax_open_kernel();
-+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
-+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
-+ pax_close_kernel();
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
- FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
- break;
-diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
-index dd9533a..aff3199e 100644
---- a/drivers/video/smscufx.c
-+++ b/drivers/video/smscufx.c
-@@ -1172,7 +1172,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
- fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
- info->fbdefio = NULL;
-- info->fbops->fb_mmap = ufx_ops_mmap;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
-+ pax_close_kernel();
- }
-
- pr_debug("released /dev/fb%d user=%d count=%d",
-diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
-index cb5988f..439ecb0 100644
---- a/drivers/video/udlfb.c
-+++ b/drivers/video/udlfb.c
-@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
- dlfb_urb_completion(urb);
-
- error:
-- atomic_add(bytes_sent, &dev->bytes_sent);
-- atomic_add(bytes_identical, &dev->bytes_identical);
-- atomic_add(width*height*2, &dev->bytes_rendered);
-+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
-+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
-+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
- end_cycles = get_cycles();
-- atomic_add(((unsigned int) ((end_cycles - start_cycles)
-+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
- >> 10)), /* Kcycles */
- &dev->cpu_kcycles_used);
-
-@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
- dlfb_urb_completion(urb);
-
- error:
-- atomic_add(bytes_sent, &dev->bytes_sent);
-- atomic_add(bytes_identical, &dev->bytes_identical);
-- atomic_add(bytes_rendered, &dev->bytes_rendered);
-+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
-+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
-+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
- end_cycles = get_cycles();
-- atomic_add(((unsigned int) ((end_cycles - start_cycles)
-+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
- >> 10)), /* Kcycles */
- &dev->cpu_kcycles_used);
- }
-@@ -986,7 +986,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
- fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
- info->fbdefio = NULL;
-- info->fbops->fb_mmap = dlfb_ops_mmap;
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
-+ pax_close_kernel();
- }
-
- pr_warn("released /dev/fb%d user=%d count=%d\n",
-@@ -1368,7 +1370,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
- struct fb_info *fb_info = dev_get_drvdata(fbdev);
- struct dlfb_data *dev = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
-- atomic_read(&dev->bytes_rendered));
-+ atomic_read_unchecked(&dev->bytes_rendered));
- }
-
- static ssize_t metrics_bytes_identical_show(struct device *fbdev,
-@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
- struct fb_info *fb_info = dev_get_drvdata(fbdev);
- struct dlfb_data *dev = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
-- atomic_read(&dev->bytes_identical));
-+ atomic_read_unchecked(&dev->bytes_identical));
- }
-
- static ssize_t metrics_bytes_sent_show(struct device *fbdev,
-@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
- struct fb_info *fb_info = dev_get_drvdata(fbdev);
- struct dlfb_data *dev = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
-- atomic_read(&dev->bytes_sent));
-+ atomic_read_unchecked(&dev->bytes_sent));
- }
-
- static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
-@@ -1392,7 +1394,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
- struct fb_info *fb_info = dev_get_drvdata(fbdev);
- struct dlfb_data *dev = fb_info->par;
- return snprintf(buf, PAGE_SIZE, "%u\n",
-- atomic_read(&dev->cpu_kcycles_used));
-+ atomic_read_unchecked(&dev->cpu_kcycles_used));
- }
-
- static ssize_t edid_show(
-@@ -1449,10 +1451,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
- struct fb_info *fb_info = dev_get_drvdata(fbdev);
- struct dlfb_data *dev = fb_info->par;
-
-- atomic_set(&dev->bytes_rendered, 0);
-- atomic_set(&dev->bytes_identical, 0);
-- atomic_set(&dev->bytes_sent, 0);
-- atomic_set(&dev->cpu_kcycles_used, 0);
-+ atomic_set_unchecked(&dev->bytes_rendered, 0);
-+ atomic_set_unchecked(&dev->bytes_identical, 0);
-+ atomic_set_unchecked(&dev->bytes_sent, 0);
-+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
-
- return count;
- }
-diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
-index 8813588..65454ec 100644
---- a/drivers/video/uvesafb.c
-+++ b/drivers/video/uvesafb.c
-@@ -19,6 +19,7 @@
- #include <linux/io.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
-+#include <linux/moduleloader.h>
- #include <video/edid.h>
- #include <video/uvesafb.h>
- #ifdef CONFIG_X86
-@@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
- struct uvesafb_task *utask;
- struct uvesafb_ktask *task;
-
-- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
-+ if (!capable(CAP_SYS_ADMIN))
- return;
-
- if (msg->seq >= UVESAFB_TASKS_MAX)
-@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
- NULL,
- };
-
-- return call_usermodehelper(v86d_path, argv, envp, 1);
-+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
- }
-
- /*
-@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
- if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
- par->pmi_setpal = par->ypan = 0;
- } else {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_MODULES
-+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
-+#endif
-+ if (!par->pmi_code) {
-+ par->pmi_setpal = par->ypan = 0;
-+ return 0;
-+ }
-+#endif
-+
- par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
- + task->t.regs.edi);
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ pax_open_kernel();
-+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
-+ pax_close_kernel();
-+
-+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
-+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
-+#else
- par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
- par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
-+#endif
-+
- printk(KERN_INFO "uvesafb: protected mode interface info at "
- "%04x:%04x\n",
- (u16)task->t.regs.es, (u16)task->t.regs.edi);
-@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
- par->ypan = ypan;
-
- if (par->pmi_setpal || par->ypan) {
-+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
- if (__supported_pte_mask & _PAGE_NX) {
- par->pmi_setpal = par->ypan = 0;
- printk(KERN_WARNING "uvesafb: NX protection is actively."
- "We have better not to use the PMI.\n");
-- } else {
-+ } else
-+#endif
- uvesafb_vbe_getpmi(task, par);
-- }
- }
- #else
- /* The protected mode interface is not available on non-x86. */
-@@ -1449,8 +1473,11 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
- info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
-
- /* Disable blanking if the user requested so. */
-- if (!blank)
-- info->fbops->fb_blank = NULL;
-+ if (!blank) {
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_blank = NULL;
-+ pax_close_kernel();
-+ }
-
- /*
- * Find out how much IO memory is required for the mode with
-@@ -1526,8 +1553,11 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
- info->flags = FBINFO_FLAG_DEFAULT |
- (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
-
-- if (!par->ypan)
-- info->fbops->fb_pan_display = NULL;
-+ if (!par->ypan) {
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_pan_display = NULL;
-+ pax_close_kernel();
-+ }
- }
-
- static void __devinit uvesafb_init_mtrr(struct fb_info *info)
-@@ -1828,6 +1858,11 @@ out:
- if (par->vbe_modes)
- kfree(par->vbe_modes);
-
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ if (par->pmi_code)
-+ module_free_exec(NULL, par->pmi_code);
-+#endif
-+
- framebuffer_release(info);
- return err;
- }
-@@ -1854,6 +1889,12 @@ static int uvesafb_remove(struct platform_device *dev)
- kfree(par->vbe_state_orig);
- if (par->vbe_state_saved)
- kfree(par->vbe_state_saved);
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ if (par->pmi_code)
-+ module_free_exec(NULL, par->pmi_code);
-+#endif
-+
- }
-
- framebuffer_release(info);
-diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
-index 501b340..d80aa17 100644
---- a/drivers/video/vesafb.c
-+++ b/drivers/video/vesafb.c
-@@ -9,6 +9,7 @@
- */
-
- #include <linux/module.h>
-+#include <linux/moduleloader.h>
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/string.h>
-@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
- static int vram_total __initdata; /* Set total amount of memory */
- static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
- static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
--static void (*pmi_start)(void) __read_mostly;
--static void (*pmi_pal) (void) __read_mostly;
-+static void (*pmi_start)(void) __read_only;
-+static void (*pmi_pal) (void) __read_only;
- static int depth __read_mostly;
- static int vga_compat __read_mostly;
- /* --------------------------------------------------------------------- */
-@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
- unsigned int size_vmode;
- unsigned int size_remap;
- unsigned int size_total;
-+ void *pmi_code = NULL;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
- return -ENODEV;
-@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
- size_remap = size_total;
- vesafb_fix.smem_len = size_remap;
-
--#ifndef __i386__
-- screen_info.vesapm_seg = 0;
--#endif
--
- if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
- printk(KERN_WARNING
- "vesafb: cannot reserve video memory at 0x%lx\n",
-@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
- printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
- vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
-
-+#ifdef __i386__
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
-+ if (!pmi_code)
-+#elif !defined(CONFIG_PAX_KERNEXEC)
-+ if (0)
-+#endif
-+
-+#endif
-+ screen_info.vesapm_seg = 0;
-+
- if (screen_info.vesapm_seg) {
-- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
-- screen_info.vesapm_seg,screen_info.vesapm_off);
-+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
-+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
- }
-
- if (screen_info.vesapm_seg < 0xc000)
-@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
-
- if (ypan || pmi_setpal) {
- unsigned short *pmi_base;
-+
- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
-- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
-- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ pax_open_kernel();
-+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
-+#else
-+ pmi_code = pmi_base;
-+#endif
-+
-+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
-+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ pmi_start = ktva_ktla(pmi_start);
-+ pmi_pal = ktva_ktla(pmi_pal);
-+ pax_close_kernel();
-+#endif
-+
- printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
- if (pmi_base[3]) {
- printk(KERN_INFO "vesafb: pmi: ports = ");
-@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
- info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
- (ypan ? FBINFO_HWACCEL_YPAN : 0);
-
-- if (!ypan)
-- info->fbops->fb_pan_display = NULL;
-+ if (!ypan) {
-+ pax_open_kernel();
-+ *(void **)&info->fbops->fb_pan_display = NULL;
-+ pax_close_kernel();
-+ }
-
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
- err = -ENOMEM;
-@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
- info->node, info->fix.id);
- return 0;
- err:
-+
-+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+ module_free_exec(NULL, pmi_code);
-+#endif
-+
- if (info->screen_base)
- iounmap(info->screen_base);
- framebuffer_release(info);
-diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
-index 88714ae..16c2e11 100644
---- a/drivers/video/via/via_clock.h
-+++ b/drivers/video/via/via_clock.h
-@@ -56,7 +56,7 @@ struct via_clock {
-
- void (*set_engine_pll_state)(u8 state);
- void (*set_engine_pll)(struct via_pll_config config);
--};
-+} __no_const;
-
-
- static inline u32 get_pll_internal_frequency(u32 ref_freq,
-diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
-index cc02a9b..2686ac9 100644
---- a/drivers/virtio/virtio.c
-+++ b/drivers/virtio/virtio.c
-@@ -139,8 +139,11 @@ static int virtio_dev_probe(struct device *_d)
- err = drv->probe(dev);
- if (err)
- add_status(dev, VIRTIO_CONFIG_S_FAILED);
-- else
-+ else {
- add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
-+ if (drv->scan)
-+ drv->scan(dev);
-+ }
-
- return err;
- }
-diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
-index 4a88ac3..d2e1657 100644
---- a/drivers/virtio/virtio_ring.c
-+++ b/drivers/virtio/virtio_ring.c
-@@ -245,10 +245,23 @@ add_head:
- }
- EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
-
--void virtqueue_kick(struct virtqueue *_vq)
-+/**
-+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
-+ * @vq: the struct virtqueue
-+ *
-+ * Instead of virtqueue_kick(), you can do:
-+ * if (virtqueue_kick_prepare(vq))
-+ * virtqueue_notify(vq);
-+ *
-+ * This is sometimes useful because the virtqueue_kick_prepare() needs
-+ * to be serialized, but the actual virtqueue_notify() call does not.
-+ */
-+bool virtqueue_kick_prepare(struct virtqueue *_vq)
- {
- struct vring_virtqueue *vq = to_vvq(_vq);
- u16 new, old;
-+ bool needs_kick;
-+
- START_USE(vq);
- /* Descriptors and available array need to be set before we expose the
- * new available array entries. */
-@@ -261,13 +274,46 @@ void virtqueue_kick(struct virtqueue *_vq)
- /* Need to update avail index before checking if we should notify */
- virtio_mb();
-
-- if (vq->event ?
-- vring_need_event(vring_avail_event(&vq->vring), new, old) :
-- !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
-- /* Prod other side to tell it about changes. */
-- vq->notify(&vq->vq);
--
-+ if (vq->event) {
-+ needs_kick = vring_need_event(vring_avail_event(&vq->vring),
-+ new, old);
-+ } else {
-+ needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
-+ }
- END_USE(vq);
-+ return needs_kick;
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
-+
-+/**
-+ * virtqueue_notify - second half of split virtqueue_kick call.
-+ * @vq: the struct virtqueue
-+ *
-+ * This does not need to be serialized.
-+ */
-+void virtqueue_notify(struct virtqueue *_vq)
-+{
-+ struct vring_virtqueue *vq = to_vvq(_vq);
-+
-+ /* Prod other side to tell it about changes. */
-+ vq->notify(_vq);
-+}
-+EXPORT_SYMBOL_GPL(virtqueue_notify);
-+
-+/**
-+ * virtqueue_kick - update after add_buf
-+ * @vq: the struct virtqueue
-+ *
-+ * After one or more virtqueue_add_buf calls, invoke this to kick
-+ * the other side.
-+ *
-+ * Caller must ensure we don't call this with other virtqueue
-+ * operations at the same time (except where noted).
-+ */
-+void virtqueue_kick(struct virtqueue *vq)
-+{
-+ if (virtqueue_kick_prepare(vq))
-+ virtqueue_notify(vq);
- }
- EXPORT_SYMBOL_GPL(virtqueue_kick);
-
-diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index f6227cc..3e22fab 100644
---- a/drivers/xen/events.c
-+++ b/drivers/xen/events.c
-@@ -1632,7 +1632,7 @@ void xen_irq_resume(void)
- restore_pirqs();
- }
-
--static struct irq_chip xen_dynamic_chip __read_mostly = {
-+static struct irq_chip xen_dynamic_chip = {
- .name = "xen-dyn",
-
- .irq_disable = disable_dynirq,
-@@ -1646,7 +1646,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
- .irq_retrigger = retrigger_dynirq,
- };
-
--static struct irq_chip xen_pirq_chip __read_mostly = {
-+static struct irq_chip xen_pirq_chip = {
- .name = "xen-pirq",
-
- .irq_startup = startup_pirq,
-@@ -1666,7 +1666,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
- .irq_retrigger = retrigger_dynirq,
- };
-
--static struct irq_chip xen_percpu_chip __read_mostly = {
-+static struct irq_chip xen_percpu_chip = {
- .name = "xen-percpu",
-
- .irq_disable = disable_dynirq,
-diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
-index 1aa3897..3a6b3f1 100644
---- a/drivers/xen/xenfs/super.c
-+++ b/drivers/xen/xenfs/super.c
-@@ -116,6 +116,7 @@ static struct file_system_type xenfs_type = {
- .mount = xenfs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("xenfs");
-
- static int __init xenfs_init(void)
- {
-diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
-index fef20db..d28b1ab 100644
---- a/drivers/xen/xenfs/xenstored.c
-+++ b/drivers/xen/xenfs/xenstored.c
-@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
- static int xsd_kva_open(struct inode *inode, struct file *file)
- {
- file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL);
-+#else
- xen_store_interface);
-+#endif
-+
- if (!file->private_data)
- return -ENOMEM;
- return 0;
-diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
-index 2524e4c..2962cc6a 100644
---- a/fs/9p/vfs_addr.c
-+++ b/fs/9p/vfs_addr.c
-@@ -185,7 +185,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
-
- retval = v9fs_file_write_internal(inode,
- v9inode->writeback_fid,
-- (__force const char __user *)buffer,
-+ (const char __force_user *)buffer,
- len, &offset, 0);
- if (retval > 0)
- retval = 0;
-diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
-index 879ed88..dbaf762 100644
---- a/fs/9p/vfs_inode.c
-+++ b/fs/9p/vfs_inode.c
-@@ -527,8 +527,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
- unlock_new_inode(inode);
- return inode;
- error:
-- unlock_new_inode(inode);
-- iput(inode);
-+ iget_failed(inode);
- return ERR_PTR(retval);
-
- }
-@@ -1286,7 +1285,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
- void
- v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
- {
-- char *s = nd_get_link(nd);
-+ const char *s = nd_get_link(nd);
-
- P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
- IS_ERR(s) ? "<error>" : s);
-diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
-index 30d4fa8..dbbc83f 100644
---- a/fs/9p/vfs_inode_dotl.c
-+++ b/fs/9p/vfs_inode_dotl.c
-@@ -169,8 +169,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
- unlock_new_inode(inode);
- return inode;
- error:
-- unlock_new_inode(inode);
-- iput(inode);
-+ iget_failed(inode);
- return ERR_PTR(retval);
-
- }
-diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
-index c70251d..fe305fd 100644
---- a/fs/9p/vfs_super.c
-+++ b/fs/9p/vfs_super.c
-@@ -366,3 +366,4 @@ struct file_system_type v9fs_fs_type = {
- .owner = THIS_MODULE,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
- };
-+MODULE_ALIAS_FS("9p");
-diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
-index 79e2ca7..5828ad1 100644
---- a/fs/Kconfig.binfmt
-+++ b/fs/Kconfig.binfmt
-@@ -86,7 +86,7 @@ config HAVE_AOUT
-
- config BINFMT_AOUT
- tristate "Kernel support for a.out and ECOFF binaries"
-- depends on HAVE_AOUT
-+ depends on HAVE_AOUT && BROKEN
- ---help---
- A.out (Assembler.OUTput) is a set of formats for libraries and
- executables used in the earliest versions of UNIX. Linux used
-diff --git a/fs/adfs/super.c b/fs/adfs/super.c
-index c8bf36a..d7b2b33 100644
---- a/fs/adfs/super.c
-+++ b/fs/adfs/super.c
-@@ -516,6 +516,7 @@ static struct file_system_type adfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("adfs");
-
- static int __init init_adfs_fs(void)
- {
-diff --git a/fs/affs/super.c b/fs/affs/super.c
-index b31507d..5b42a3b 100644
---- a/fs/affs/super.c
-+++ b/fs/affs/super.c
-@@ -597,6 +597,7 @@ static struct file_system_type affs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("affs");
-
- static int __init init_affs_fs(void)
- {
-diff --git a/fs/afs/inode.c b/fs/afs/inode.c
-index d890ae3..5733a4b 100644
---- a/fs/afs/inode.c
-+++ b/fs/afs/inode.c
-@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
- struct afs_vnode *vnode;
- struct super_block *sb;
- struct inode *inode;
-- static atomic_t afs_autocell_ino;
-+ static atomic_unchecked_t afs_autocell_ino;
-
- _enter("{%x:%u},%*.*s,",
- AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
-@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
- data.fid.unique = 0;
- data.fid.vnode = 0;
-
-- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
-+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
- afs_iget5_autocell_test, afs_iget5_set,
- &data);
- if (!inode) {
-diff --git a/fs/afs/super.c b/fs/afs/super.c
-index 356dcf0..c0046cd 100644
---- a/fs/afs/super.c
-+++ b/fs/afs/super.c
-@@ -43,6 +43,7 @@ struct file_system_type afs_fs_type = {
- .kill_sb = afs_kill_super,
- .fs_flags = 0,
- };
-+MODULE_ALIAS_FS("afs");
-
- static const struct super_operations afs_super_ops = {
- .statfs = afs_statfs,
-diff --git a/fs/aio.c b/fs/aio.c
-index 9acfd07..ad962e7 100644
---- a/fs/aio.c
-+++ b/fs/aio.c
-@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
- size += sizeof(struct io_event) * nr_events;
- nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
-
-- if (nr_pages < 0)
-+ if (nr_pages <= 0)
- return -EINVAL;
-
- nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
-@@ -1468,18 +1468,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
- static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
- {
- ssize_t ret;
-+ struct iovec iovstack;
-
- #ifdef CONFIG_COMPAT
- if (compat)
- ret = compat_rw_copy_check_uvector(type,
- (struct compat_iovec __user *)kiocb->ki_buf,
-- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-+ kiocb->ki_nbytes, 1, &iovstack,
- &kiocb->ki_iovec, 1);
- else
- #endif
- ret = rw_copy_check_uvector(type,
- (struct iovec __user *)kiocb->ki_buf,
-- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
-+ kiocb->ki_nbytes, 1, &iovstack,
- &kiocb->ki_iovec, 1);
- if (ret < 0)
- goto out;
-@@ -1488,6 +1489,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
- if (ret < 0)
- goto out;
-
-+ if (kiocb->ki_iovec == &iovstack) {
-+ kiocb->ki_inline_vec = iovstack;
-+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
-+ }
- kiocb->ki_nr_segs = kiocb->ki_nbytes;
- kiocb->ki_cur_seg = 0;
- /* ki_nbytes/left now reflect bytes instead of segs */
-diff --git a/fs/attr.c b/fs/attr.c
-index b8f55c4..4c2b80c 100644
---- a/fs/attr.c
-+++ b/fs/attr.c
-@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
- unsigned long limit;
-
- limit = rlimit(RLIMIT_FSIZE);
-+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
-diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c
-index c038727..4ba2927 100644
---- a/fs/autofs4/init.c
-+++ b/fs/autofs4/init.c
-@@ -26,6 +26,7 @@ static struct file_system_type autofs_fs_type = {
- .mount = autofs_mount,
- .kill_sb = autofs4_kill_sb,
- };
-+MODULE_ALIAS_FS("autofs");
-
- static int __init init_autofs4_fs(void)
- {
-diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
-index e1fbdee..69291a4 100644
---- a/fs/autofs4/waitq.c
-+++ b/fs/autofs4/waitq.c
-@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
- {
- unsigned long sigpipe, flags;
- mm_segment_t fs;
-- const char *data = (const char *)addr;
-+ const char __user *data = (const char __force_user *)addr;
- ssize_t wr = 0;
-
- /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
-@@ -338,6 +338,10 @@ static int validate_request(struct autofs_wait_queue **wait,
- return 1;
- }
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
-+#endif
-+
- int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
- enum autofs_notify notify)
- {
-@@ -371,7 +375,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
-
- /* If this is a direct mount request create a dummy name */
- if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ /* this name does get written to userland via autofs4_write() */
-+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
-+#else
- qstr.len = sprintf(name, "%p", dentry);
-+#endif
- else {
- qstr.len = autofs4_getpath(sbi, dentry, &name);
- if (!qstr.len) {
-diff --git a/fs/befs/endian.h b/fs/befs/endian.h
-index 2722387..c8dd2a7 100644
---- a/fs/befs/endian.h
-+++ b/fs/befs/endian.h
-@@ -11,7 +11,7 @@
-
- #include <asm/byteorder.h>
-
--static inline u64
-+static inline u64 __intentional_overflow(-1)
- fs64_to_cpu(const struct super_block *sb, fs64 n)
- {
- if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
-@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
- return (__force fs64)cpu_to_be64(n);
- }
-
--static inline u32
-+static inline u32 __intentional_overflow(-1)
- fs32_to_cpu(const struct super_block *sb, fs32 n)
- {
- if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
-diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
-index 8342ca6..a9dca40 100644
---- a/fs/befs/linuxvfs.c
-+++ b/fs/befs/linuxvfs.c
-@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
- {
- befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
- if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-- char *link = nd_get_link(nd);
-+ const char *link = nd_get_link(nd);
- if (!IS_ERR(link))
- kfree(link);
- }
-@@ -937,6 +937,7 @@ static struct file_system_type befs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("befs");
-
- static int __init
- init_befs_fs(void)
-diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
-index 697af5b..ab6db36 100644
---- a/fs/bfs/inode.c
-+++ b/fs/bfs/inode.c
-@@ -470,6 +470,7 @@ static struct file_system_type bfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("bfs");
-
- static int __init init_bfs_fs(void)
- {
-diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
-index a6395bd..f1e376a 100644
---- a/fs/binfmt_aout.c
-+++ b/fs/binfmt_aout.c
-@@ -16,6 +16,7 @@
- #include <linux/string.h>
- #include <linux/fs.h>
- #include <linux/file.h>
-+#include <linux/security.h>
- #include <linux/stat.h>
- #include <linux/fcntl.h>
- #include <linux/ptrace.h>
-@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
- #endif
- # define START_STACK(u) ((void __user *)u.start_stack)
-
-+ memset(&dump, 0, sizeof(dump));
-+
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
-@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
-
- /* If the size of the dump file exceeds the rlimit, then see what would happen
- if we wrote the stack, but not the data area. */
-+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
- if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
- dump.u_dsize = 0;
-
- /* Make sure we have enough room to write the stack and data areas. */
-+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
- if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
- dump.u_ssize = 0;
-
-@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- rlim = rlimit(RLIMIT_DATA);
- if (rlim >= RLIM_INFINITY)
- rlim = ~0;
-+
-+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
- if (ex.a_data + ex.a_bss > rlim)
- return -ENOMEM;
-
-@@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
- current->mm->free_area_cache = current->mm->mmap_base;
- current->mm->cached_hole_size = 0;
-
-+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
-+ if (retval < 0) {
-+ /* Someone check-me: is this error path enough? */
-+ send_sig(SIGKILL, current, 0);
-+ return retval;
-+ }
-+
- install_exec_creds(bprm);
- current->flags &= ~PF_FORKNOEXEC;
-
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+ current->mm->pax_flags = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
-+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
-+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
-+ current->mm->pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+ }
-+#endif
-+
- if (N_MAGIC(ex) == OMAGIC) {
- unsigned long text_addr, map_size;
- loff_t pos;
-@@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
-- PROT_READ | PROT_WRITE | PROT_EXEC,
-+ PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
- fd_offset + ex.a_text);
- up_write(&current->mm->mmap_sem);
-@@ -352,13 +387,6 @@ beyond_if:
- return retval;
- }
-
-- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
-- if (retval < 0) {
-- /* Someone check-me: is this error path enough? */
-- send_sig(SIGKILL, current, 0);
-- return retval;
-- }
--
- current->mm->start_stack =
- (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
- #ifdef __alpha__
-diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 2aed667..52b96fd 100644
---- a/fs/binfmt_elf.c
-+++ b/fs/binfmt_elf.c
-@@ -32,6 +32,7 @@
- #include <linux/elf.h>
- #include <linux/utsname.h>
- #include <linux/coredump.h>
-+#include <linux/xattr.h>
- #include <asm/uaccess.h>
- #include <asm/param.h>
- #include <asm/page.h>
-@@ -39,7 +40,7 @@
- static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
- static int load_elf_library(struct file *);
- static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
-- int, int, unsigned long);
-+ int, int, unsigned long) __intentional_overflow(-1);
-
- /*
- * If we don't support core dumping, then supply a NULL so we
-@@ -51,6 +52,14 @@ static int elf_core_dump(struct coredump_params *cprm);
- #define elf_core_dump NULL
- #endif
-
-+#ifdef CONFIG_PAX_MPROTECT
-+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+static void elf_handle_mmap(struct file *file);
-+#endif
-+
- #if ELF_EXEC_PAGESIZE > PAGE_SIZE
- #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
- #else
-@@ -70,6 +79,15 @@ static struct linux_binfmt elf_format = {
- .load_binary = load_elf_binary,
- .load_shlib = load_elf_library,
- .core_dump = elf_core_dump,
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ .handle_mprotect= elf_handle_mprotect,
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ .handle_mmap = elf_handle_mmap,
-+#endif
-+
- .min_coredump = ELF_EXEC_PAGESIZE,
- };
-
-@@ -77,6 +95,8 @@ static struct linux_binfmt elf_format = {
-
- static int set_brk(unsigned long start, unsigned long end)
- {
-+ unsigned long e = end;
-+
- start = ELF_PAGEALIGN(start);
- end = ELF_PAGEALIGN(end);
- if (end > start) {
-@@ -87,7 +107,7 @@ static int set_brk(unsigned long start, unsigned long end)
- if (BAD_ADDR(addr))
- return addr;
- }
-- current->mm->start_brk = current->mm->brk = end;
-+ current->mm->start_brk = current->mm->brk = e;
- return 0;
- }
-
-@@ -148,12 +168,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
- elf_addr_t __user *u_rand_bytes;
- const char *k_platform = ELF_PLATFORM;
- const char *k_base_platform = ELF_BASE_PLATFORM;
-- unsigned char k_rand_bytes[16];
-+ u32 k_rand_bytes[4];
- int items;
- elf_addr_t *elf_info;
- int ei_index = 0;
- const struct cred *cred = current_cred();
- struct vm_area_struct *vma;
-+ unsigned long saved_auxv[AT_VECTOR_SIZE];
-
- /*
- * In some cases (e.g. Hyper-Threading), we want to avoid L1
-@@ -195,8 +216,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
- * Generate 16 random bytes for userspace PRNG seeding.
- */
- get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
-- u_rand_bytes = (elf_addr_t __user *)
-- STACK_ALLOC(p, sizeof(k_rand_bytes));
-+ srandom32(k_rand_bytes[0] ^ random32());
-+ srandom32(k_rand_bytes[1] ^ random32());
-+ srandom32(k_rand_bytes[2] ^ random32());
-+ srandom32(k_rand_bytes[3] ^ random32());
-+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
-+ u_rand_bytes = (elf_addr_t __user *) p;
- if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
- return -EFAULT;
-
-@@ -308,9 +333,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
- return -EFAULT;
- current->mm->env_end = p;
-
-+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
-+
- /* Put the elf_info on the stack in the right place. */
- sp = (elf_addr_t __user *)envp + 1;
-- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
-+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
- return -EFAULT;
- return 0;
- }
-@@ -376,15 +403,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
- an ELF header */
-
- static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
-- struct file *interpreter, unsigned long *interp_map_addr,
-- unsigned long no_base)
-+ struct file *interpreter, unsigned long no_base)
- {
- struct elf_phdr *elf_phdata;
- struct elf_phdr *eppnt;
-- unsigned long load_addr = 0;
-+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
- int load_addr_set = 0;
- unsigned long last_bss = 0, elf_bss = 0;
-- unsigned long error = ~0UL;
-+ unsigned long error = -EINVAL;
- unsigned long total_size;
- int retval, i, size;
-
-@@ -430,6 +456,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- goto out_close;
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
- eppnt = elf_phdata;
- for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
- if (eppnt->p_type == PT_LOAD) {
-@@ -453,8 +484,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- map_addr = elf_map(interpreter, load_addr + vaddr,
- eppnt, elf_prot, elf_type, total_size);
- total_size = 0;
-- if (!*interp_map_addr)
-- *interp_map_addr = map_addr;
- error = map_addr;
- if (BAD_ADDR(map_addr))
- goto out_close;
-@@ -473,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- k = load_addr + eppnt->p_vaddr;
- if (BAD_ADDR(k) ||
- eppnt->p_filesz > eppnt->p_memsz ||
-- eppnt->p_memsz > TASK_SIZE ||
-- TASK_SIZE - eppnt->p_memsz < k) {
-+ eppnt->p_memsz > pax_task_size ||
-+ pax_task_size - eppnt->p_memsz < k) {
- error = -ENOMEM;
- goto out_close;
- }
-@@ -513,11 +542,13 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
-
- /* Map the last of the bss segment */
-- down_write(&current->mm->mmap_sem);
-- error = do_brk(elf_bss, last_bss - elf_bss);
-- up_write(&current->mm->mmap_sem);
-- if (BAD_ADDR(error))
-- goto out_close;
-+ if (last_bss > elf_bss) {
-+ down_write(&current->mm->mmap_sem);
-+ error = do_brk(elf_bss, last_bss - elf_bss);
-+ up_write(&current->mm->mmap_sem);
-+ if (BAD_ADDR(error))
-+ goto out_close;
-+ }
- }
-
- error = load_addr;
-@@ -528,6 +559,336 @@ out:
- return error;
- }
-
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+#ifdef CONFIG_PAX_SOFTMODE
-+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
-+{
-+ unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (elf_phdata->p_flags & PF_PAGEEXEC)
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (elf_phdata->p_flags & PF_SEGMEXEC)
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
-+ pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (elf_phdata->p_flags & PF_MPROTECT)
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+ return pax_flags;
-+}
-+#endif
-+
-+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
-+{
-+ unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
-+ pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+ return pax_flags;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+#ifdef CONFIG_PAX_SOFTMODE
-+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
-+{
-+ unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
-+ pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (pax_flags_softmode & MF_PAX_MPROTECT)
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+ return pax_flags;
-+}
-+#endif
-+
-+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
-+{
-+ unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
-+ pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+ return pax_flags;
-+}
-+#endif
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+static unsigned long pax_parse_defaults(void)
-+{
-+ unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ return pax_flags;
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (randomize_va_space)
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+ return pax_flags;
-+}
-+
-+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
-+{
-+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
-+
-+#ifdef CONFIG_PAX_EI_PAX
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ return pax_flags;
-+#endif
-+
-+ pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
-+ pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
-+ pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
-+ pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
-+ pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
-+ pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+#endif
-+
-+ return pax_flags;
-+
-+}
-+
-+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
-+{
-+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+ unsigned long i;
-+
-+ for (i = 0UL; i < elf_ex->e_phnum; i++)
-+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
-+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
-+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
-+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
-+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
-+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
-+ else
-+#endif
-+
-+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
-+ break;
-+ }
-+#endif
-+
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+}
-+
-+static unsigned long pax_parse_xattr_pax(struct file * const file)
-+{
-+
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+ ssize_t xattr_size, i;
-+ unsigned char xattr_value[sizeof("pemrs") - 1];
-+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
-+
-+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
-+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+
-+ for (i = 0; i < xattr_size; i++)
-+ switch (xattr_value[i]) {
-+ default:
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+
-+#define parse_flag(option1, option2, flag) \
-+ case option1: \
-+ if (pax_flags_hardmode & MF_PAX_##flag) \
-+ return PAX_PARSE_FLAGS_FALLBACK;\
-+ pax_flags_hardmode |= MF_PAX_##flag; \
-+ break; \
-+ case option2: \
-+ if (pax_flags_softmode & MF_PAX_##flag) \
-+ return PAX_PARSE_FLAGS_FALLBACK;\
-+ pax_flags_softmode |= MF_PAX_##flag; \
-+ break;
-+
-+ parse_flag('p', 'P', PAGEEXEC);
-+ parse_flag('e', 'E', EMUTRAMP);
-+ parse_flag('m', 'M', MPROTECT);
-+ parse_flag('r', 'R', RANDMMAP);
-+ parse_flag('s', 'S', SEGMEXEC);
-+
-+#undef parse_flag
-+ }
-+
-+ if (pax_flags_hardmode & pax_flags_softmode)
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+ if (pax_softmode)
-+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
-+ else
-+#endif
-+
-+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
-+#else
-+ return PAX_PARSE_FLAGS_FALLBACK;
-+#endif
-+
-+}
-+
-+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
-+{
-+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
-+
-+ pax_flags = pax_parse_defaults();
-+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
-+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
-+ xattr_pax_flags = pax_parse_xattr_pax(file);
-+
-+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
-+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
-+ pt_pax_flags != xattr_pax_flags)
-+ return -EINVAL;
-+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
-+ pax_flags = xattr_pax_flags;
-+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
-+ pax_flags = pt_pax_flags;
-+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
-+ pax_flags = ei_pax_flags;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ if ((__supported_pte_mask & _PAGE_NX))
-+ pax_flags &= ~MF_PAX_SEGMEXEC;
-+ else
-+ pax_flags &= ~MF_PAX_PAGEEXEC;
-+ }
-+#endif
-+
-+ if (0 > pax_check_flags(&pax_flags))
-+ return -EINVAL;
-+
-+ current->mm->pax_flags = pax_flags;
-+ return 0;
-+}
-+#endif
-+
- /*
- * These are the functions used to load ELF style executables and shared
- * libraries. There is no binary dependent code anywhere else.
-@@ -544,6 +905,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
- {
- unsigned long random_variable = 0;
-
-+#ifdef CONFIG_PAX_RANDUSTACK
-+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
-+ return stack_top - current->mm->delta_stack;
-+#endif
-+
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE)) {
- random_variable = (unsigned long) get_random_int();
-@@ -563,7 +929,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- unsigned long load_addr = 0, load_bias = 0;
- int load_addr_set = 0;
- char * elf_interpreter = NULL;
-- unsigned long error;
-+ unsigned long error = 0;
- struct elf_phdr *elf_ppnt, *elf_phdata;
- unsigned long elf_bss, elf_brk;
- int retval, i;
-@@ -573,11 +939,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long reloc_func_desc __maybe_unused = 0;
- int executable_stack = EXSTACK_DEFAULT;
-- unsigned long def_flags = 0;
- struct {
- struct elfhdr elf_ex;
- struct elfhdr interp_elf_ex;
- } *loc;
-+ unsigned long pax_task_size;
-
- loc = kmalloc(sizeof(*loc), GFP_KERNEL);
- if (!loc) {
-@@ -714,11 +1080,82 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
-
- /* OK, This is the point of no return */
- current->flags &= ~PF_FORKNOEXEC;
-- current->mm->def_flags = def_flags;
-+ current->mm->def_flags = 0;
-
- /* Do this immediately, since STACK_TOP as used in setup_arg_pages
- may depend on the personality. */
- SET_PERSONALITY(loc->elf_ex);
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+ current->mm->pax_flags = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ current->mm->call_dl_resolve = 0UL;
-+#endif
-+
-+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
-+ current->mm->call_syscall = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+ current->mm->delta_mmap = 0UL;
-+ current->mm->delta_stack = 0UL;
-+#endif
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
-+ send_sig(SIGKILL, current, 0);
-+ goto out_free_dentry;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+ pax_set_initial_flags(bprm);
-+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
-+ if (pax_set_initial_flags_func)
-+ (pax_set_initial_flags_func)(bprm);
-+#endif
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
-+ current->mm->context.user_cs_limit = PAGE_SIZE;
-+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
-+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+ current->mm->def_flags |= VM_NOHUGEPAGE;
-+ } else
-+#endif
-+
-+ pax_task_size = TASK_SIZE;
-+
-+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
-+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
-+ put_cpu();
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
-+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
-+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
-+ }
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ executable_stack = EXSTACK_DISABLE_X;
-+ current->personality &= ~READ_IMPLIES_EXEC;
-+ } else
-+#endif
-+
- if (elf_read_implies_exec(loc->elf_ex, executable_stack))
- current->personality |= READ_IMPLIES_EXEC;
-
-@@ -809,6 +1246,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- #else
- load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
- #endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ /* PaX: randomize base address at the default exe base if requested */
-+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
-+#ifdef CONFIG_SPARC64
-+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
-+#else
-+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
-+#endif
-+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
-+ elf_flags |= MAP_FIXED;
-+ }
-+#endif
-+
- }
-
- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -841,9 +1292,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- * allowed task size. Note that p_filesz must always be
- * <= p_memsz so it is only necessary to check p_memsz.
- */
-- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
-- elf_ppnt->p_memsz > TASK_SIZE ||
-- TASK_SIZE - elf_ppnt->p_memsz < k) {
-+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
-+ elf_ppnt->p_memsz > pax_task_size ||
-+ pax_task_size - elf_ppnt->p_memsz < k) {
- /* set_brk can never work. Avoid overflows. */
- send_sig(SIGKILL, current, 0);
- retval = -EINVAL;
-@@ -882,17 +1333,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
- goto out_free_dentry;
- }
- if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
-- send_sig(SIGSEGV, current, 0);
-- retval = -EFAULT; /* Nobody gets to see this, but.. */
-- goto out_free_dentry;
-+ /*
-+ * This bss-zeroing can fail if the ELF
-+ * file specifies odd protections. So
-+ * we don't check the return value
-+ */
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
-+ unsigned long start, size, flags, vm_flags;
-+
-+ start = ELF_PAGEALIGN(elf_brk);
-+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
-+ flags = MAP_FIXED | MAP_PRIVATE;
-+ vm_flags = VM_DONTEXPAND | VM_RESERVED;
-+
-+ down_write(&current->mm->mmap_sem);
-+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
-+ retval = -ENOMEM;
-+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
-+// if (current->personality & ADDR_NO_RANDOMIZE)
-+// vm_flags |= VM_READ | VM_MAYREAD;
-+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
-+ retval = IS_ERR_VALUE(start) ? start : 0;
-+ }
-+ up_write(&current->mm->mmap_sem);
-+ if (retval == 0)
-+ retval = set_brk(start + size, start + size + PAGE_SIZE);
-+ if (retval < 0) {
-+ send_sig(SIGKILL, current, 0);
-+ goto out_free_dentry;
-+ }
-+ }
-+#endif
-+
- if (elf_interpreter) {
-- unsigned long uninitialized_var(interp_map_addr);
--
- elf_entry = load_elf_interp(&loc->interp_elf_ex,
- interpreter,
-- &interp_map_addr,
- load_bias);
- if (!IS_ERR((void *)elf_entry)) {
- /*
-@@ -1099,7 +1577,7 @@ out:
- * Decide what to dump of a segment, part, all or none.
- */
- static unsigned long vma_dump_size(struct vm_area_struct *vma,
-- unsigned long mm_flags)
-+ unsigned long mm_flags, long signr)
- {
- #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-
-@@ -1133,7 +1611,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
- if (vma->vm_file == NULL)
- return 0;
-
-- if (FILTER(MAPPED_PRIVATE))
-+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
- goto whole;
-
- /*
-@@ -1355,9 +1833,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
- {
- elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
- int i = 0;
-- do
-+ do {
- i += 2;
-- while (auxv[i - 2] != AT_NULL);
-+ } while (auxv[i - 2] != AT_NULL);
- fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
- }
-
-@@ -1852,14 +2330,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
- }
-
- static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
-- unsigned long mm_flags)
-+ struct coredump_params *cprm)
- {
- struct vm_area_struct *vma;
- size_t size = 0;
-
- for (vma = first_vma(current, gate_vma); vma != NULL;
- vma = next_vma(vma, gate_vma))
-- size += vma_dump_size(vma, mm_flags);
-+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
- return size;
- }
-
-@@ -1953,7 +2431,7 @@ static int elf_core_dump(struct coredump_params *cprm)
-
- dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
-
-- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
-+ offset += elf_core_vma_data_size(gate_vma, cprm);
- offset += elf_core_extra_data_size();
- e_shoff = offset;
-
-@@ -1967,10 +2445,12 @@ static int elf_core_dump(struct coredump_params *cprm)
- offset = dataoff;
-
- size += sizeof(*elf);
-+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
- if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
- goto end_coredump;
-
- size += sizeof(*phdr4note);
-+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
- if (size > cprm->limit
- || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
- goto end_coredump;
-@@ -1984,7 +2464,7 @@ static int elf_core_dump(struct coredump_params *cprm)
- phdr.p_offset = offset;
- phdr.p_vaddr = vma->vm_start;
- phdr.p_paddr = 0;
-- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
-+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
- phdr.p_memsz = vma->vm_end - vma->vm_start;
- offset += phdr.p_filesz;
- phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -1995,6 +2475,7 @@ static int elf_core_dump(struct coredump_params *cprm)
- phdr.p_align = ELF_EXEC_PAGESIZE;
-
- size += sizeof(phdr);
-+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
- if (size > cprm->limit
- || !dump_write(cprm->file, &phdr, sizeof(phdr)))
- goto end_coredump;
-@@ -2019,7 +2500,7 @@ static int elf_core_dump(struct coredump_params *cprm)
- unsigned long addr;
- unsigned long end;
-
-- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
-+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
-
- for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
- struct page *page;
-@@ -2028,6 +2509,7 @@ static int elf_core_dump(struct coredump_params *cprm)
- page = get_dump_page(addr);
- if (page) {
- void *kaddr = kmap(page);
-+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
- stop = ((size += PAGE_SIZE) > cprm->limit) ||
- !dump_write(cprm->file, kaddr,
- PAGE_SIZE);
-@@ -2045,6 +2527,7 @@ static int elf_core_dump(struct coredump_params *cprm)
-
- if (e_phnum == PN_XNUM) {
- size += sizeof(*shdr4extnum);
-+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
- if (size > cprm->limit
- || !dump_write(cprm->file, shdr4extnum,
- sizeof(*shdr4extnum)))
-@@ -2065,6 +2548,167 @@ out:
-
- #endif /* CONFIG_ELF_CORE */
-
-+#ifdef CONFIG_PAX_MPROTECT
-+/* PaX: non-PIC ELF libraries need relocations on their executable segments
-+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
-+ * we'll remove VM_MAYWRITE for good on RELRO segments.
-+ *
-+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
-+ * basis because we want to allow the common case and not the special ones.
-+ */
-+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
-+{
-+ struct elfhdr elf_h;
-+ struct elf_phdr elf_p;
-+ unsigned long i;
-+ unsigned long oldflags;
-+ bool is_textrel_rw, is_textrel_rx, is_relro;
-+
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
-+ return;
-+
-+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
-+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
-+
-+#ifdef CONFIG_PAX_ELFRELOCS
-+ /* possible TEXTREL */
-+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
-+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
-+#else
-+ is_textrel_rw = false;
-+ is_textrel_rx = false;
-+#endif
-+
-+ /* possible RELRO */
-+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
-+
-+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
-+ return;
-+
-+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
-+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
-+
-+#ifdef CONFIG_PAX_ETEXECRELOCS
-+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
-+#else
-+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
-+#endif
-+
-+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
-+ !elf_check_arch(&elf_h) ||
-+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
-+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
-+ return;
-+
-+ for (i = 0UL; i < elf_h.e_phnum; i++) {
-+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
-+ return;
-+ switch (elf_p.p_type) {
-+ case PT_DYNAMIC:
-+ if (!is_textrel_rw && !is_textrel_rx)
-+ continue;
-+ i = 0UL;
-+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
-+ elf_dyn dyn;
-+
-+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
-+ break;
-+ if (dyn.d_tag == DT_NULL)
-+ break;
-+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
-+ gr_log_textrel(vma);
-+ if (is_textrel_rw)
-+ vma->vm_flags |= VM_MAYWRITE;
-+ else
-+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
-+ vma->vm_flags &= ~VM_MAYWRITE;
-+ break;
-+ }
-+ i++;
-+ }
-+ is_textrel_rw = false;
-+ is_textrel_rx = false;
-+ continue;
-+
-+ case PT_GNU_RELRO:
-+ if (!is_relro)
-+ continue;
-+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
-+ vma->vm_flags &= ~VM_MAYWRITE;
-+ is_relro = false;
-+ continue;
-+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+ case PT_PAX_FLAGS: {
-+ const char *msg_mprotect = "", *msg_emutramp = "";
-+ char *buffer_lib, *buffer_exe;
-+
-+ if (elf_p.p_flags & PF_NOMPROTECT)
-+ msg_mprotect = "MPROTECT disabled";
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
-+ msg_emutramp = "EMUTRAMP enabled";
-+#endif
-+
-+ if (!msg_mprotect[0] && !msg_emutramp[0])
-+ continue;
-+
-+ if (!printk_ratelimit())
-+ continue;
-+
-+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
-+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
-+ if (buffer_lib && buffer_exe) {
-+ char *path_lib, *path_exe;
-+
-+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
-+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
-+
-+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
-+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
-+
-+ }
-+ free_page((unsigned long)buffer_exe);
-+ free_page((unsigned long)buffer_lib);
-+ continue;
-+ }
-+#endif
-+
-+ }
-+ }
-+}
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+
-+extern int grsec_enable_log_rwxmaps;
-+
-+static void elf_handle_mmap(struct file *file)
-+{
-+ struct elfhdr elf_h;
-+ struct elf_phdr elf_p;
-+ unsigned long i;
-+
-+ if (!grsec_enable_log_rwxmaps)
-+ return;
-+
-+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
-+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
-+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
-+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
-+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
-+ return;
-+
-+ for (i = 0UL; i < elf_h.e_phnum; i++) {
-+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
-+ return;
-+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
-+ gr_log_ptgnustack(file);
-+ }
-+}
-+#endif
-+
- static int __init init_elf_binfmt(void)
- {
- return register_binfmt(&elf_format);
-diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
-index 1bffbe0..c8c283e 100644
---- a/fs/binfmt_flat.c
-+++ b/fs/binfmt_flat.c
-@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
- realdatastart = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process data, errno %d\n",
- (int)-realdatastart);
-+ down_write(&current->mm->mmap_sem);
- do_munmap(current->mm, textpos, text_len);
-+ up_write(&current->mm->mmap_sem);
- ret = realdatastart;
- goto err;
- }
-@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
- }
- if (IS_ERR_VALUE(result)) {
- printk("Unable to read data+bss, errno %d\n", (int)-result);
-+ down_write(&current->mm->mmap_sem);
- do_munmap(current->mm, textpos, text_len);
- do_munmap(current->mm, realdatastart, len);
-+ up_write(&current->mm->mmap_sem);
- ret = result;
- goto err;
- }
-@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
- }
- if (IS_ERR_VALUE(result)) {
- printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-+ down_write(&current->mm->mmap_sem);
- do_munmap(current->mm, textpos, text_len + data_len + extra +
- MAX_SHARED_LIBS * sizeof(unsigned long));
-+ up_write(&current->mm->mmap_sem);
- ret = result;
- goto err;
- }
-diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
-index 7423cb9..9379ddd 100644
---- a/fs/binfmt_misc.c
-+++ b/fs/binfmt_misc.c
-@@ -719,6 +719,7 @@ static struct file_system_type bm_fs_type = {
- .mount = bm_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("binfmt_misc");
-
- static int __init init_misc_binfmt(void)
- {
-diff --git a/fs/bio.c b/fs/bio.c
-index b84d851..0dd5077 100644
---- a/fs/bio.c
-+++ b/fs/bio.c
-@@ -848,7 +848,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
- /*
- * Overflow, abort
- */
-- if (end < start)
-+ if (end < start || end - start > INT_MAX - nr_pages)
- return ERR_PTR(-EINVAL);
-
- nr_pages += end - start;
-@@ -982,7 +982,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
- /*
- * Overflow, abort
- */
-- if (end < start)
-+ if (end < start || end - start > INT_MAX - nr_pages)
- return ERR_PTR(-EINVAL);
-
- nr_pages += end - start;
-@@ -1244,7 +1244,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
- const int read = bio_data_dir(bio) == READ;
- struct bio_map_data *bmd = bio->bi_private;
- int i;
-- char *p = bmd->sgvecs[0].iov_base;
-+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
-
- bio_for_each_segment_all(bvec, bio, i) {
- char *addr = page_address(bvec->bv_page);
-diff --git a/fs/block_dev.c b/fs/block_dev.c
-index c103267..260cbd9 100644
---- a/fs/block_dev.c
-+++ b/fs/block_dev.c
-@@ -690,7 +690,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
- else if (bdev->bd_contains == bdev)
- return true; /* is a whole device which isn't held */
-
-- else if (whole->bd_holder == bd_may_claim)
-+ else if (whole->bd_holder == (void *)bd_may_claim)
- return true; /* is a partition of a device that is being partitioned */
- else if (whole->bd_holder != NULL)
- return false; /* is a partition of a held device */
-diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index dede441..f2a2507 100644
---- a/fs/btrfs/ctree.c
-+++ b/fs/btrfs/ctree.c
-@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
- free_extent_buffer(buf);
- add_root_to_dirty_list(root);
- } else {
-- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
-- parent_start = parent->start;
-- else
-+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
-+ if (parent)
-+ parent_start = parent->start;
-+ else
-+ parent_start = 0;
-+ } else
- parent_start = 0;
-
- WARN_ON(trans->transid != btrfs_header_generation(parent));
-diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
-index a694317..dc698a1 100644
---- a/fs/btrfs/extent-tree.c
-+++ b/fs/btrfs/extent-tree.c
-@@ -5644,7 +5644,7 @@ again:
-
- if (ret == -ENOSPC && num_bytes > min_alloc_size) {
- num_bytes = num_bytes >> 1;
-- num_bytes = num_bytes & ~(root->sectorsize - 1);
-+ num_bytes = num_bytes & ~((u64)root->sectorsize - 1);
- num_bytes = max(num_bytes, min_alloc_size);
- do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes, data, CHUNK_ALLOC_FORCE);
-diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 7cbe2f8..20cc43f 100644
---- a/fs/btrfs/ioctl.c
-+++ b/fs/btrfs/ioctl.c
-@@ -2770,7 +2770,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
- up_read(&info->groups_sem);
- }
-
-- user_dest = (struct btrfs_ioctl_space_info *)
-+ user_dest = (struct btrfs_ioctl_space_info __user *)
- (arg + sizeof(struct btrfs_ioctl_space_args));
-
- if (copy_to_user(user_dest, dest_orig, alloc_size))
-diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
-index cfb5543..1ae7347 100644
---- a/fs/btrfs/relocation.c
-+++ b/fs/btrfs/relocation.c
-@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
- }
- spin_unlock(&rc->reloc_root_tree.lock);
-
-- BUG_ON((struct btrfs_root *)node->data != root);
-+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
-
- if (!del) {
- spin_lock(&rc->reloc_root_tree.lock);
-diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
-index ddf2c90..37afd35 100644
---- a/fs/btrfs/scrub.c
-+++ b/fs/btrfs/scrub.c
-@@ -348,7 +348,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
- ret < 0 ? -1 : ref_level,
- ret < 0 ? -1 : ref_root);
- } while (ret != 1);
-+ btrfs_release_path(path);
- } else {
-+ btrfs_release_path(path);
- swarn.path = path;
- iterate_extent_inodes(fs_info, path, found_key.objectid,
- extent_offset,
-diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
-index 200f63b..490b833 100644
---- a/fs/btrfs/super.c
-+++ b/fs/btrfs/super.c
-@@ -1227,6 +1227,7 @@ static struct file_system_type btrfs_fs_type = {
- .kill_sb = kill_anon_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("btrfs");
-
- /*
- * used by btrfsctl to scan devices when no FS is mounted
-diff --git a/fs/buffer.c b/fs/buffer.c
-index c457f84..3e206d5 100644
---- a/fs/buffer.c
-+++ b/fs/buffer.c
-@@ -3326,7 +3326,7 @@ void __init buffer_init(void)
- bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-- SLAB_MEM_SPREAD),
-+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
- NULL);
-
- /*
-diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
-index 622f469..e8d2d55 100644
---- a/fs/cachefiles/bind.c
-+++ b/fs/cachefiles/bind.c
-@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
- args);
-
- /* start by checking things over */
-- ASSERT(cache->fstop_percent >= 0 &&
-- cache->fstop_percent < cache->fcull_percent &&
-+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
- cache->fcull_percent < cache->frun_percent &&
- cache->frun_percent < 100);
-
-- ASSERT(cache->bstop_percent >= 0 &&
-- cache->bstop_percent < cache->bcull_percent &&
-+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
- cache->bcull_percent < cache->brun_percent &&
- cache->brun_percent < 100);
-
-diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
-index 0a1467b..6a53245 100644
---- a/fs/cachefiles/daemon.c
-+++ b/fs/cachefiles/daemon.c
-@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
- if (n > buflen)
- return -EMSGSIZE;
-
-- if (copy_to_user(_buffer, buffer, n) != 0)
-+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
- return -EFAULT;
-
- return n;
-@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
- if (test_bit(CACHEFILES_DEAD, &cache->flags))
- return -EIO;
-
-- if (datalen < 0 || datalen > PAGE_SIZE - 1)
-+ if (datalen > PAGE_SIZE - 1)
- return -EOPNOTSUPP;
-
- /* drag the command string into the kernel so we can parse it */
-@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
- if (args[0] != '%' || args[1] != '\0')
- return -EINVAL;
-
-- if (fstop < 0 || fstop >= cache->fcull_percent)
-+ if (fstop >= cache->fcull_percent)
- return cachefiles_daemon_range_error(cache, args);
-
- cache->fstop_percent = fstop;
-@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
- if (args[0] != '%' || args[1] != '\0')
- return -EINVAL;
-
-- if (bstop < 0 || bstop >= cache->bcull_percent)
-+ if (bstop >= cache->bcull_percent)
- return cachefiles_daemon_range_error(cache, args);
-
- cache->bstop_percent = bstop;
-diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
-index bd6bc1b..b627b53 100644
---- a/fs/cachefiles/internal.h
-+++ b/fs/cachefiles/internal.h
-@@ -57,7 +57,7 @@ struct cachefiles_cache {
- wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
- struct rb_root active_nodes; /* active nodes (can't be culled) */
- rwlock_t active_lock; /* lock for active_nodes */
-- atomic_t gravecounter; /* graveyard uniquifier */
-+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
- unsigned frun_percent; /* when to stop culling (% files) */
- unsigned fcull_percent; /* when to start culling (% files) */
- unsigned fstop_percent; /* when to stop allocating (% files) */
-@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
- * proc.c
- */
- #ifdef CONFIG_CACHEFILES_HISTOGRAM
--extern atomic_t cachefiles_lookup_histogram[HZ];
--extern atomic_t cachefiles_mkdir_histogram[HZ];
--extern atomic_t cachefiles_create_histogram[HZ];
-+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
-+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
-+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
-
- extern int __init cachefiles_proc_init(void);
- extern void cachefiles_proc_cleanup(void);
- static inline
--void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
-+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
- {
- unsigned long jif = jiffies - start_jif;
- if (jif >= HZ)
- jif = HZ - 1;
-- atomic_inc(&histogram[jif]);
-+ atomic_inc_unchecked(&histogram[jif]);
- }
-
- #else
-diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
-index a0358c2..d6137f2 100644
---- a/fs/cachefiles/namei.c
-+++ b/fs/cachefiles/namei.c
-@@ -318,7 +318,7 @@ try_again:
- /* first step is to make up a grave dentry in the graveyard */
- sprintf(nbuffer, "%08x%08x",
- (uint32_t) get_seconds(),
-- (uint32_t) atomic_inc_return(&cache->gravecounter));
-+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
-
- /* do the multiway lock magic */
- trap = lock_rename(cache->graveyard, dir);
-diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
-index eccd339..4c1d995 100644
---- a/fs/cachefiles/proc.c
-+++ b/fs/cachefiles/proc.c
-@@ -14,9 +14,9 @@
- #include <linux/seq_file.h>
- #include "internal.h"
-
--atomic_t cachefiles_lookup_histogram[HZ];
--atomic_t cachefiles_mkdir_histogram[HZ];
--atomic_t cachefiles_create_histogram[HZ];
-+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
-+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
-+atomic_unchecked_t cachefiles_create_histogram[HZ];
-
- /*
- * display the latency histogram
-@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
- return 0;
- default:
- index = (unsigned long) v - 3;
-- x = atomic_read(&cachefiles_lookup_histogram[index]);
-- y = atomic_read(&cachefiles_mkdir_histogram[index]);
-- z = atomic_read(&cachefiles_create_histogram[index]);
-+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
-+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
-+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
- if (x == 0 && y == 0 && z == 0)
- return 0;
-
-diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
-index b4d2438..0935840 100644
---- a/fs/cachefiles/rdwr.c
-+++ b/fs/cachefiles/rdwr.c
-@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = file->f_op->write(
-- file, (const void __user *) data, len, &pos);
-+ file, (const void __force_user *) data, len, &pos);
- set_fs(old_fs);
- kunmap(page);
- if (ret != len)
-diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
-index 7903e62..096162e 100644
---- a/fs/ceph/dir.c
-+++ b/fs/ceph/dir.c
-@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_mds_client *mdsc = fsc->mdsc;
- unsigned frag = fpos_frag(filp->f_pos);
-- int off = fpos_off(filp->f_pos);
-+ unsigned int off = fpos_off(filp->f_pos);
- int err;
- u32 ftype;
- struct ceph_mds_reply_info_parsed *rinfo;
-@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
- if (nd &&
- (nd->flags & LOOKUP_OPEN) &&
- !(nd->intent.open.flags & O_CREAT)) {
-- int mode = nd->intent.open.create_mode & ~current->fs->umask;
-+ int mode = nd->intent.open.create_mode & ~current_umask();
- return ceph_lookup_open(dir, dentry, nd, mode, 1);
- }
-
-diff --git a/fs/ceph/super.c b/fs/ceph/super.c
-index 3c981db..eb87cfb 100644
---- a/fs/ceph/super.c
-+++ b/fs/ceph/super.c
-@@ -785,7 +785,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
- /*
- * construct our own bdi so we can control readahead, etc.
- */
--static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
-+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
-
- static int ceph_register_bdi(struct super_block *sb,
- struct ceph_fs_client *fsc)
-@@ -802,7 +802,7 @@ static int ceph_register_bdi(struct super_block *sb,
- default_backing_dev_info.ra_pages;
-
- err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
-- atomic_long_inc_return(&bdi_seq));
-+ atomic_long_inc_return_unchecked(&bdi_seq));
- if (!err)
- sb->s_bdi = &fsc->backing_dev_info;
- return err;
-@@ -901,6 +901,7 @@ static struct file_system_type ceph_fs_type = {
- .kill_sb = ceph_kill_sb,
- .fs_flags = FS_RENAME_DOES_D_MOVE,
- };
-+MODULE_ALIAS_FS("ceph");
-
- #define _STRINGIFY(x) #x
- #define STRINGIFY(x) _STRINGIFY(x)
-diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
-index 84e8c07..6170d31 100644
---- a/fs/cifs/cifs_debug.c
-+++ b/fs/cifs/cifs_debug.c
-@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
-
- if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
- #ifdef CONFIG_CIFS_STATS2
-- atomic_set(&totBufAllocCount, 0);
-- atomic_set(&totSmBufAllocCount, 0);
-+ atomic_set_unchecked(&totBufAllocCount, 0);
-+ atomic_set_unchecked(&totSmBufAllocCount, 0);
- #endif /* CONFIG_CIFS_STATS2 */
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp1, &cifs_tcp_ses_list) {
-@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
- tcon = list_entry(tmp3,
- struct cifs_tcon,
- tcon_list);
-- atomic_set(&tcon->num_smbs_sent, 0);
-- atomic_set(&tcon->num_writes, 0);
-- atomic_set(&tcon->num_reads, 0);
-- atomic_set(&tcon->num_oplock_brks, 0);
-- atomic_set(&tcon->num_opens, 0);
-- atomic_set(&tcon->num_posixopens, 0);
-- atomic_set(&tcon->num_posixmkdirs, 0);
-- atomic_set(&tcon->num_closes, 0);
-- atomic_set(&tcon->num_deletes, 0);
-- atomic_set(&tcon->num_mkdirs, 0);
-- atomic_set(&tcon->num_rmdirs, 0);
-- atomic_set(&tcon->num_renames, 0);
-- atomic_set(&tcon->num_t2renames, 0);
-- atomic_set(&tcon->num_ffirst, 0);
-- atomic_set(&tcon->num_fnext, 0);
-- atomic_set(&tcon->num_fclose, 0);
-- atomic_set(&tcon->num_hardlinks, 0);
-- atomic_set(&tcon->num_symlinks, 0);
-- atomic_set(&tcon->num_locks, 0);
-+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
-+ atomic_set_unchecked(&tcon->num_writes, 0);
-+ atomic_set_unchecked(&tcon->num_reads, 0);
-+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
-+ atomic_set_unchecked(&tcon->num_opens, 0);
-+ atomic_set_unchecked(&tcon->num_posixopens, 0);
-+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
-+ atomic_set_unchecked(&tcon->num_closes, 0);
-+ atomic_set_unchecked(&tcon->num_deletes, 0);
-+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
-+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
-+ atomic_set_unchecked(&tcon->num_renames, 0);
-+ atomic_set_unchecked(&tcon->num_t2renames, 0);
-+ atomic_set_unchecked(&tcon->num_ffirst, 0);
-+ atomic_set_unchecked(&tcon->num_fnext, 0);
-+ atomic_set_unchecked(&tcon->num_fclose, 0);
-+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
-+ atomic_set_unchecked(&tcon->num_symlinks, 0);
-+ atomic_set_unchecked(&tcon->num_locks, 0);
- }
- }
- }
-@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
- smBufAllocCount.counter, cifs_min_small);
- #ifdef CONFIG_CIFS_STATS2
- seq_printf(m, "Total Large %d Small %d Allocations\n",
-- atomic_read(&totBufAllocCount),
-- atomic_read(&totSmBufAllocCount));
-+ atomic_read_unchecked(&totBufAllocCount),
-+ atomic_read_unchecked(&totSmBufAllocCount));
- #endif /* CONFIG_CIFS_STATS2 */
-
- seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
-@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
- if (tcon->need_reconnect)
- seq_puts(m, "\tDISCONNECTED ");
- seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
-- atomic_read(&tcon->num_smbs_sent),
-- atomic_read(&tcon->num_oplock_brks));
-+ atomic_read_unchecked(&tcon->num_smbs_sent),
-+ atomic_read_unchecked(&tcon->num_oplock_brks));
- seq_printf(m, "\nReads: %d Bytes: %lld",
-- atomic_read(&tcon->num_reads),
-+ atomic_read_unchecked(&tcon->num_reads),
- (long long)(tcon->bytes_read));
- seq_printf(m, "\nWrites: %d Bytes: %lld",
-- atomic_read(&tcon->num_writes),
-+ atomic_read_unchecked(&tcon->num_writes),
- (long long)(tcon->bytes_written));
- seq_printf(m, "\nFlushes: %d",
-- atomic_read(&tcon->num_flushes));
-+ atomic_read_unchecked(&tcon->num_flushes));
- seq_printf(m, "\nLocks: %d HardLinks: %d "
- "Symlinks: %d",
-- atomic_read(&tcon->num_locks),
-- atomic_read(&tcon->num_hardlinks),
-- atomic_read(&tcon->num_symlinks));
-+ atomic_read_unchecked(&tcon->num_locks),
-+ atomic_read_unchecked(&tcon->num_hardlinks),
-+ atomic_read_unchecked(&tcon->num_symlinks));
- seq_printf(m, "\nOpens: %d Closes: %d "
- "Deletes: %d",
-- atomic_read(&tcon->num_opens),
-- atomic_read(&tcon->num_closes),
-- atomic_read(&tcon->num_deletes));
-+ atomic_read_unchecked(&tcon->num_opens),
-+ atomic_read_unchecked(&tcon->num_closes),
-+ atomic_read_unchecked(&tcon->num_deletes));
- seq_printf(m, "\nPosix Opens: %d "
- "Posix Mkdirs: %d",
-- atomic_read(&tcon->num_posixopens),
-- atomic_read(&tcon->num_posixmkdirs));
-+ atomic_read_unchecked(&tcon->num_posixopens),
-+ atomic_read_unchecked(&tcon->num_posixmkdirs));
- seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
-- atomic_read(&tcon->num_mkdirs),
-- atomic_read(&tcon->num_rmdirs));
-+ atomic_read_unchecked(&tcon->num_mkdirs),
-+ atomic_read_unchecked(&tcon->num_rmdirs));
- seq_printf(m, "\nRenames: %d T2 Renames %d",
-- atomic_read(&tcon->num_renames),
-- atomic_read(&tcon->num_t2renames));
-+ atomic_read_unchecked(&tcon->num_renames),
-+ atomic_read_unchecked(&tcon->num_t2renames));
- seq_printf(m, "\nFindFirst: %d FNext %d "
- "FClose %d",
-- atomic_read(&tcon->num_ffirst),
-- atomic_read(&tcon->num_fnext),
-- atomic_read(&tcon->num_fclose));
-+ atomic_read_unchecked(&tcon->num_ffirst),
-+ atomic_read_unchecked(&tcon->num_fnext),
-+ atomic_read_unchecked(&tcon->num_fclose));
- }
- }
- }
-diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
-index 25bb97f..a0095de 100644
---- a/fs/cifs/cifsfs.c
-+++ b/fs/cifs/cifsfs.c
-@@ -799,6 +799,7 @@ struct file_system_type cifs_fs_type = {
- .kill_sb = cifs_kill_sb,
- /* .fs_flags */
- };
-+MODULE_ALIAS_FS("cifs");
- const struct inode_operations cifs_dir_inode_ops = {
- .create = cifs_create,
- .lookup = cifs_lookup,
-@@ -1018,7 +1019,7 @@ cifs_init_request_bufs(void)
- cifs_req_cachep = kmem_cache_create("cifs_request",
- CIFSMaxBufSize +
- MAX_CIFS_HDR_SIZE, 0,
-- SLAB_HWCACHE_ALIGN, NULL);
-+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
- if (cifs_req_cachep == NULL)
- return -ENOMEM;
-
-@@ -1045,7 +1046,7 @@ cifs_init_request_bufs(void)
- efficient to alloc 1 per page off the slab compared to 17K (5page)
- alloc of large cifs buffers even when page debugging is on */
- cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
-- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
-+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
- NULL);
- if (cifs_sm_req_cachep == NULL) {
- mempool_destroy(cifs_req_poolp);
-@@ -1130,8 +1131,8 @@ init_cifs(void)
- atomic_set(&bufAllocCount, 0);
- atomic_set(&smBufAllocCount, 0);
- #ifdef CONFIG_CIFS_STATS2
-- atomic_set(&totBufAllocCount, 0);
-- atomic_set(&totSmBufAllocCount, 0);
-+ atomic_set_unchecked(&totBufAllocCount, 0);
-+ atomic_set_unchecked(&totSmBufAllocCount, 0);
- #endif /* CONFIG_CIFS_STATS2 */
-
- atomic_set(&midCount, 0);
-diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
-index 7b68088..17a275b 100644
---- a/fs/cifs/cifsglob.h
-+++ b/fs/cifs/cifsglob.h
-@@ -390,28 +390,28 @@ struct cifs_tcon {
- __u16 Flags; /* optional support bits */
- enum statusEnum tidStatus;
- #ifdef CONFIG_CIFS_STATS
-- atomic_t num_smbs_sent;
-- atomic_t num_writes;
-- atomic_t num_reads;
-- atomic_t num_flushes;
-- atomic_t num_oplock_brks;
-- atomic_t num_opens;
-- atomic_t num_closes;
-- atomic_t num_deletes;
-- atomic_t num_mkdirs;
-- atomic_t num_posixopens;
-- atomic_t num_posixmkdirs;
-- atomic_t num_rmdirs;
-- atomic_t num_renames;
-- atomic_t num_t2renames;
-- atomic_t num_ffirst;
-- atomic_t num_fnext;
-- atomic_t num_fclose;
-- atomic_t num_hardlinks;
-- atomic_t num_symlinks;
-- atomic_t num_locks;
-- atomic_t num_acl_get;
-- atomic_t num_acl_set;
-+ atomic_unchecked_t num_smbs_sent;
-+ atomic_unchecked_t num_writes;
-+ atomic_unchecked_t num_reads;
-+ atomic_unchecked_t num_flushes;
-+ atomic_unchecked_t num_oplock_brks;
-+ atomic_unchecked_t num_opens;
-+ atomic_unchecked_t num_closes;
-+ atomic_unchecked_t num_deletes;
-+ atomic_unchecked_t num_mkdirs;
-+ atomic_unchecked_t num_posixopens;
-+ atomic_unchecked_t num_posixmkdirs;
-+ atomic_unchecked_t num_rmdirs;
-+ atomic_unchecked_t num_renames;
-+ atomic_unchecked_t num_t2renames;
-+ atomic_unchecked_t num_ffirst;
-+ atomic_unchecked_t num_fnext;
-+ atomic_unchecked_t num_fclose;
-+ atomic_unchecked_t num_hardlinks;
-+ atomic_unchecked_t num_symlinks;
-+ atomic_unchecked_t num_locks;
-+ atomic_unchecked_t num_acl_get;
-+ atomic_unchecked_t num_acl_set;
- #ifdef CONFIG_CIFS_STATS2
- unsigned long long time_writes;
- unsigned long long time_reads;
-@@ -626,7 +626,7 @@ convert_delimiter(char *path, char delim)
- }
-
- #ifdef CONFIG_CIFS_STATS
--#define cifs_stats_inc atomic_inc
-+#define cifs_stats_inc atomic_inc_unchecked
-
- static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
- unsigned int bytes)
-@@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
- /* Various Debug counters */
- GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
- #ifdef CONFIG_CIFS_STATS2
--GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
--GLOBAL_EXTERN atomic_t totSmBufAllocCount;
-+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
-+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
- #endif
- GLOBAL_EXTERN atomic_t smBufAllocCount;
- GLOBAL_EXTERN atomic_t midCount;
-diff --git a/fs/cifs/file.c b/fs/cifs/file.c
-index f9d2863..12e111d 100644
---- a/fs/cifs/file.c
-+++ b/fs/cifs/file.c
-@@ -1691,10 +1691,14 @@ static int cifs_writepages(struct address_space *mapping,
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
-- index = wbc->range_start >> PAGE_CACHE_SHIFT;
-- end = wbc->range_end >> PAGE_CACHE_SHIFT;
-- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
-+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- range_whole = true;
-+ index = 0;
-+ end = ULONG_MAX;
-+ } else {
-+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
-+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
-+ }
- scanned = true;
- }
- retry:
-diff --git a/fs/cifs/link.c b/fs/cifs/link.c
-index 6b0e064..94e6c3c 100644
---- a/fs/cifs/link.c
-+++ b/fs/cifs/link.c
-@@ -600,7 +600,7 @@ symlink_exit:
-
- void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
- {
-- char *p = nd_get_link(nd);
-+ const char *p = nd_get_link(nd);
- if (!IS_ERR(p))
- kfree(p);
- }
-diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
-index 703ef5c..2a44ed5 100644
---- a/fs/cifs/misc.c
-+++ b/fs/cifs/misc.c
-@@ -156,7 +156,7 @@ cifs_buf_get(void)
- memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
- atomic_inc(&bufAllocCount);
- #ifdef CONFIG_CIFS_STATS2
-- atomic_inc(&totBufAllocCount);
-+ atomic_inc_unchecked(&totBufAllocCount);
- #endif /* CONFIG_CIFS_STATS2 */
- }
-
-@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
- /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
- atomic_inc(&smBufAllocCount);
- #ifdef CONFIG_CIFS_STATS2
-- atomic_inc(&totSmBufAllocCount);
-+ atomic_inc_unchecked(&totSmBufAllocCount);
- #endif /* CONFIG_CIFS_STATS2 */
-
- }
-diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
-index 52a820a..1d8ab03 100644
---- a/fs/cifs/readdir.c
-+++ b/fs/cifs/readdir.c
-@@ -86,14 +86,17 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
-
- dentry = d_lookup(parent, name);
- if (dentry) {
-+ int err;
- inode = dentry->d_inode;
- /* update inode in place if i_ino didn't change */
- if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
- cifs_fattr_to_inode(inode, fattr);
- return dentry;
- }
-- d_drop(dentry);
-+ err = d_invalidate(dentry);
- dput(dentry);
-+ if (err)
-+ return NULL;
- }
-
- /*
-diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
-index 80d8508..6de30aa 100644
---- a/fs/cifs/smbencrypt.c
-+++ b/fs/cifs/smbencrypt.c
-@@ -220,7 +220,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
- }
-
- rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
-- memset(wpwd, 0, 129 * sizeof(__le16));
-+ memzero_explicit(wpwd, sizeof(wpwd));
-
- return rc;
- }
-diff --git a/fs/coda/cache.c b/fs/coda/cache.c
-index 4b2e5cb..67b96bb 100644
---- a/fs/coda/cache.c
-+++ b/fs/coda/cache.c
-@@ -24,7 +24,7 @@
- #include "coda_linux.h"
- #include "coda_cache.h"
-
--static atomic_t permission_epoch = ATOMIC_INIT(0);
-+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
-
- /* replace or extend an acl cache hit */
- void coda_cache_enter(struct inode *inode, int mask)
-@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
- struct coda_inode_info *cii = ITOC(inode);
-
- spin_lock(&cii->c_lock);
-- cii->c_cached_epoch = atomic_read(&permission_epoch);
-+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
- if (cii->c_uid != current_fsuid()) {
- cii->c_uid = current_fsuid();
- cii->c_cached_perm = mask;
-@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
- {
- struct coda_inode_info *cii = ITOC(inode);
- spin_lock(&cii->c_lock);
-- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
-+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
- spin_unlock(&cii->c_lock);
- }
-
- /* remove all acl caches */
- void coda_cache_clear_all(struct super_block *sb)
- {
-- atomic_inc(&permission_epoch);
-+ atomic_inc_unchecked(&permission_epoch);
- }
-
-
-@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
- spin_lock(&cii->c_lock);
- hit = (mask & cii->c_cached_perm) == mask &&
- cii->c_uid == current_fsuid() &&
-- cii->c_cached_epoch == atomic_read(&permission_epoch);
-+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
- spin_unlock(&cii->c_lock);
-
- return hit;
-diff --git a/fs/coda/inode.c b/fs/coda/inode.c
-index 871b277..7dcf232 100644
---- a/fs/coda/inode.c
-+++ b/fs/coda/inode.c
-@@ -326,4 +326,5 @@ struct file_system_type coda_fs_type = {
- .kill_sb = kill_anon_super,
- .fs_flags = FS_BINARY_MOUNTDATA,
- };
-+MODULE_ALIAS_FS("coda");
-
-diff --git a/fs/compat.c b/fs/compat.c
-index 4bf082d..d33d8b7 100644
---- a/fs/compat.c
-+++ b/fs/compat.c
-@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
- static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
- {
- compat_ino_t ino = stat->ino;
-- typeof(ubuf->st_uid) uid = 0;
-- typeof(ubuf->st_gid) gid = 0;
-+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
-+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
- int err;
-
- SET_UID(uid, stat->uid);
-@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
-
- set_fs(KERNEL_DS);
- /* The __user pointer cast is valid because of the set_fs() */
-- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
-+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
- set_fs(oldfs);
- /* truncating is ok because it's a user address */
- if (!ret)
-@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
- goto out;
-
- ret = -EINVAL;
-- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
-+ if (nr_segs > UIO_MAXIOV)
- goto out;
- if (nr_segs > fast_segs) {
- ret = -ENOMEM;
-@@ -849,6 +849,7 @@ struct compat_old_linux_dirent {
-
- struct compat_readdir_callback {
- struct compat_old_linux_dirent __user *dirent;
-+ struct file * file;
- int result;
- };
-
-@@ -866,6 +867,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
- buf->result = -EOVERFLOW;
- return -EOVERFLOW;
- }
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- buf->result++;
- dirent = buf->dirent;
- if (!access_ok(VERIFY_WRITE, dirent,
-@@ -898,6 +903,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
-
- buf.result = 0;
- buf.dirent = dirent;
-+ buf.file = file;
-
- error = vfs_readdir(file, compat_fillonedir, &buf);
- if (buf.result)
-@@ -918,6 +924,7 @@ struct compat_linux_dirent {
- struct compat_getdents_callback {
- struct compat_linux_dirent __user *current_dir;
- struct compat_linux_dirent __user *previous;
-+ struct file * file;
- int count;
- int error;
- };
-@@ -939,6 +946,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
- buf->error = -EOVERFLOW;
- return -EOVERFLOW;
- }
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- dirent = buf->previous;
- if (dirent) {
- if (__put_user(offset, &dirent->d_off))
-@@ -986,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
- buf.previous = NULL;
- buf.count = count;
- buf.error = 0;
-+ buf.file = file;
-
- error = vfs_readdir(file, compat_filldir, &buf);
- if (error >= 0)
-@@ -1007,6 +1019,7 @@ out:
- struct compat_getdents_callback64 {
- struct linux_dirent64 __user *current_dir;
- struct linux_dirent64 __user *previous;
-+ struct file * file;
- int count;
- int error;
- };
-@@ -1023,6 +1036,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- dirent = buf->previous;
-
- if (dirent) {
-@@ -1074,13 +1091,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
- buf.previous = NULL;
- buf.count = count;
- buf.error = 0;
-+ buf.file = file;
-
- error = vfs_readdir(file, compat_filldir64, &buf);
- if (error >= 0)
- error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
-- typeof(lastdirent->d_off) d_off = file->f_pos;
-+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
- if (__put_user_unaligned(d_off, &lastdirent->d_off))
- error = -EFAULT;
- else
-diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
-index 112e45a..b59845b 100644
---- a/fs/compat_binfmt_elf.c
-+++ b/fs/compat_binfmt_elf.c
-@@ -30,11 +30,13 @@
- #undef elf_phdr
- #undef elf_shdr
- #undef elf_note
-+#undef elf_dyn
- #undef elf_addr_t
- #define elfhdr elf32_hdr
- #define elf_phdr elf32_phdr
- #define elf_shdr elf32_shdr
- #define elf_note elf32_note
-+#define elf_dyn Elf32_Dyn
- #define elf_addr_t Elf32_Addr
-
- /*
-diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
-index f854cf9..7ff0465 100644
---- a/fs/compat_ioctl.c
-+++ b/fs/compat_ioctl.c
-@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
- return -EFAULT;
- if (__get_user(udata, &ss32->iomem_base))
- return -EFAULT;
-- ss.iomem_base = compat_ptr(udata);
-+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
- if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
- __get_user(ss.port_high, &ss32->port_high))
- return -EFAULT;
-@@ -704,8 +704,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
- for (i = 0; i < nmsgs; i++) {
- if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
- return -EFAULT;
-- if (get_user(datap, &umsgs[i].buf) ||
-- put_user(compat_ptr(datap), &tmsgs[i].buf))
-+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
-+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
- return -EFAULT;
- }
- return sys_ioctl(fd, cmd, (unsigned long)tdata);
-@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
- copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
- copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
- copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
-- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
-+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
- return -EFAULT;
-
- return ioctl_preallocate(file, p);
-@@ -1646,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
- {
- unsigned int a, b;
-- a = *(unsigned int *)p;
-- b = *(unsigned int *)q;
-+ a = *(const unsigned int *)p;
-+ b = *(const unsigned int *)q;
- if (a > b)
- return 1;
- if (a < b)
-diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
-index 5ef72c8..0c72810 100644
---- a/fs/configfs/dir.c
-+++ b/fs/configfs/dir.c
-@@ -1059,10 +1059,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
- static int configfs_depend_prep(struct dentry *origin,
- struct config_item *target)
- {
-- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
-+ struct configfs_dirent *child_sd, *sd;
- int ret = 0;
-
-- BUG_ON(!origin || !sd);
-+ BUG_ON(!origin || !origin->d_fsdata);
-+ sd = origin->d_fsdata;
-
- if (sd->s_element == target) /* Boo-yah */
- goto out;
-@@ -1587,7 +1588,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
- }
- for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
- struct configfs_dirent *next;
-- const char * name;
-+ const unsigned char * name;
-+ char d_name[sizeof(next->s_dentry->d_iname)];
- int len;
- struct inode *inode = NULL;
-
-@@ -1597,7 +1599,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
- continue;
-
- name = configfs_get_name(next);
-- len = strlen(name);
-+ if (next->s_dentry && name == next->s_dentry->d_iname) {
-+ len = next->s_dentry->d_name.len;
-+ memcpy(d_name, name, len);
-+ name = d_name;
-+ } else
-+ len = strlen(name);
-
- /*
- * We'll have a dentry and an inode for
-diff --git a/fs/configfs/item.c b/fs/configfs/item.c
-index 50cee7f..8238ebd 100644
---- a/fs/configfs/item.c
-+++ b/fs/configfs/item.c
-@@ -116,7 +116,7 @@ void config_item_init_type_name(struct config_item *item,
- const char *name,
- struct config_item_type *type)
- {
-- config_item_set_name(item, name);
-+ config_item_set_name(item, "%s", name);
- item->ci_type = type;
- config_item_init(item);
- }
-@@ -125,7 +125,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
- void config_group_init_type_name(struct config_group *group, const char *name,
- struct config_item_type *type)
- {
-- config_item_set_name(&group->cg_item, name);
-+ config_item_set_name(&group->cg_item, "%s", name);
- group->cg_item.ci_type = type;
- config_group_init(group);
- }
-diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
-index 276e15c..aeac324 100644
---- a/fs/configfs/mount.c
-+++ b/fs/configfs/mount.c
-@@ -117,6 +117,7 @@ static struct file_system_type configfs_fs_type = {
- .mount = configfs_do_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("configfs");
-
- int configfs_pin_fs(void)
- {
-diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
-index 739fb59..5385976 100644
---- a/fs/cramfs/inode.c
-+++ b/fs/cramfs/inode.c
-@@ -576,6 +576,7 @@ static struct file_system_type cramfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("cramfs");
-
- static int __init init_cramfs_fs(void)
- {
-diff --git a/fs/dcache.c b/fs/dcache.c
-index 8bc98af..68601d9 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -103,11 +103,11 @@ static unsigned int d_hash_shift __read_mostly;
-
- static struct hlist_bl_head *dentry_hashtable __read_mostly;
-
--static inline struct hlist_bl_head *d_hash(struct dentry *parent,
-- unsigned long hash)
-+static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
-+ unsigned int hash)
- {
-- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
-- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
-+ hash += (unsigned long) parent / L1_CACHE_BYTES;
-+ hash = hash + (hash >> D_HASHBITS);
- return dentry_hashtable + (hash & D_HASHMASK);
- }
-
-@@ -478,15 +478,18 @@ repeat:
- return;
- }
-
-- if (dentry->d_flags & DCACHE_OP_DELETE) {
-+ /* Unreachable? Get rid of it */
-+ if (unlikely(d_unhashed(dentry)))
-+ goto kill_it;
-+
-+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
-+ goto kill_it;
-+
-+ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
- if (dentry->d_op->d_delete(dentry))
- goto kill_it;
- }
-
-- /* Unreachable? Get rid of it */
-- if (d_unhashed(dentry))
-- goto kill_it;
--
- /*
- * If this dentry needs lookup, don't set the referenced flag so that it
- * is more likely to be cleaned up by the dcache shrinker in case of
-@@ -1016,13 +1019,13 @@ ascend:
- /* might go back up the wrong parent if we have had a rename */
- if (!locked && read_seqretry(&rename_lock, seq))
- goto rename_retry;
-- next = child->d_child.next;
-- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
-+ /* go into the first sibling still alive */
-+ do {
-+ next = child->d_child.next;
- if (next == &this_parent->d_subdirs)
- goto ascend;
- child = list_entry(next, struct dentry, d_child);
-- next = next->next;
-- }
-+ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
- rcu_read_unlock();
- goto resume;
- }
-@@ -1235,6 +1238,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
- dentry->d_sb = sb;
- dentry->d_op = NULL;
- dentry->d_fsdata = NULL;
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ atomic_set(&dentry->chroot_refcnt, 0);
-+#endif
- INIT_HLIST_BL_NODE(&dentry->d_hash);
- INIT_LIST_HEAD(&dentry->d_lru);
- INIT_LIST_HEAD(&dentry->d_subdirs);
-@@ -3082,7 +3088,8 @@ void __init vfs_caches_init(unsigned long mempages)
- mempages -= reserve;
-
- names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
-- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
-+ SLAB_NO_SANITIZE, NULL);
-
- dcache_init();
- inode_init();
-diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
-index a15f1e2..3077628 100644
---- a/fs/debugfs/inode.c
-+++ b/fs/debugfs/inode.c
-@@ -164,6 +164,7 @@ static struct file_system_type debug_fs_type = {
- .mount = debug_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("debugfs");
-
- static int debugfs_create_by_name(const char *name, mode_t mode,
- struct dentry *parent,
-@@ -277,11 +278,20 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
- * If debugfs is not enabled in the kernel, the value -%ENODEV will be
- * returned.
- */
-+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
-+extern int grsec_enable_sysfs_restrict;
-+#endif
-+
- struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
- {
-- return debugfs_create_file(name,
-- S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
-- parent, NULL, NULL);
-+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
-+
-+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
-+ if (grsec_enable_sysfs_restrict)
-+ mode = S_IFDIR | S_IRWXU;
-+#endif
-+
-+ return debugfs_create_file(name, mode, parent, NULL, NULL);
- }
- EXPORT_SYMBOL_GPL(debugfs_create_dir);
-
-diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index 782569b..175dea4 100644
---- a/fs/ecryptfs/inode.c
-+++ b/fs/ecryptfs/inode.c
-@@ -705,7 +705,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
- old_fs = get_fs();
- set_fs(get_ds());
- rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
-- (char __user *)lower_buf,
-+ (char __force_user *)lower_buf,
- lower_bufsiz);
- set_fs(old_fs);
- if (rc < 0)
-@@ -751,7 +751,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
- }
- old_fs = get_fs();
- set_fs(get_ds());
-- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
-+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
- set_fs(old_fs);
- if (rc < 0) {
- kfree(buf);
-@@ -766,7 +766,7 @@ out:
- static void
- ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
- {
-- char *buf = nd_get_link(nd);
-+ const char *buf = nd_get_link(nd);
- if (!IS_ERR(buf)) {
- /* Free the char* */
- kfree(buf);
-diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
-index 5ce56e7..d80e1db 100644
---- a/fs/ecryptfs/keystore.c
-+++ b/fs/ecryptfs/keystore.c
-@@ -1152,7 +1152,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
- struct ecryptfs_message *msg = NULL;
- char *auth_tok_sig;
- char *payload = NULL;
-- size_t payload_len;
-+ size_t payload_len = 0;
- int rc;
-
- rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok);
-@@ -1204,8 +1204,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
- crypt_stat->key_size);
- }
- out:
-- if (msg)
-- kfree(msg);
-+ kfree(msg);
- kfree(payload);
- return rc;
- }
-diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
-index 62b8ddc..7df8b1c 100644
---- a/fs/ecryptfs/main.c
-+++ b/fs/ecryptfs/main.c
-@@ -639,6 +639,7 @@ static struct file_system_type ecryptfs_fs_type = {
- .kill_sb = ecryptfs_kill_block_super,
- .fs_flags = 0
- };
-+MODULE_ALIAS_FS("ecryptfs");
-
- /**
- * inode_info_init_once
-diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
-index de42310..867dddd 100644
---- a/fs/ecryptfs/miscdev.c
-+++ b/fs/ecryptfs/miscdev.c
-@@ -338,7 +338,7 @@ check_list:
- goto out_unlock_msg_ctx;
- i = 5;
- if (msg_ctx->msg) {
-- if (copy_to_user(&buf[i], packet_length, packet_length_size))
-+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
- goto out_unlock_msg_ctx;
- i += packet_length_size;
- if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
-diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
-index 608c1c3..7d040a8 100644
---- a/fs/ecryptfs/read_write.c
-+++ b/fs/ecryptfs/read_write.c
-@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
- return -EIO;
- fs_save = get_fs();
- set_fs(get_ds());
-- rc = vfs_write(lower_file, data, size, &offset);
-+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
- set_fs(fs_save);
- mark_inode_dirty_sync(ecryptfs_inode);
- return rc;
-@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
- return -EIO;
- fs_save = get_fs();
- set_fs(get_ds());
-- rc = vfs_read(lower_file, data, size, &offset);
-+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
- set_fs(fs_save);
- return rc;
- }
-diff --git a/fs/efs/super.c b/fs/efs/super.c
-index 0f31acb..395ebc9 100644
---- a/fs/efs/super.c
-+++ b/fs/efs/super.c
-@@ -33,6 +33,7 @@ static struct file_system_type efs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("efs");
-
- static struct pt_types sgi_pt_types[] = {
- {0x00, "SGI vh"},
-diff --git a/fs/eventpoll.c b/fs/eventpoll.c
-index 451b9b8..12e5a03 100644
---- a/fs/eventpoll.c
-+++ b/fs/eventpoll.c
-@@ -1560,8 +1560,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
- error = PTR_ERR(file);
- goto out_free_fd;
- }
-- fd_install(fd, file);
- ep->file = file;
-+ fd_install(fd, file);
- return fd;
-
- out_free_fd:
-diff --git a/fs/exec.c b/fs/exec.c
-index 7adb43f..be703f8 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -55,12 +55,35 @@
- #include <linux/pipe_fs_i.h>
- #include <linux/oom.h>
- #include <linux/compat.h>
-+#include <linux/random.h>
-+#include <linux/seq_file.h>
-+#include <linux/mman.h>
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+#include <linux/kallsyms.h>
-+#include <linux/kdebug.h>
-+#endif
-+
-+#include <trace/events/fs.h>
-
- #include <asm/uaccess.h>
-+#include <asm/sections.h>
- #include <asm/mmu_context.h>
- #include <asm/tlb.h>
- #include "internal.h"
-
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
-+{
-+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
-+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
-+EXPORT_SYMBOL(pax_set_initial_flags_func);
-+#endif
-+
- int core_uses_pid;
- char core_pattern[CORENAME_MAX_SIZE] = "core";
- unsigned int core_pipe_limit;
-@@ -70,20 +93,23 @@ struct core_name {
- char *corename;
- int used, size;
- };
--static atomic_t call_count = ATOMIC_INIT(1);
-+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
-
- /* The maximal length of core_pattern is also specified in sysctl.c */
-
- static LIST_HEAD(formats);
- static DEFINE_RWLOCK(binfmt_lock);
-
-+extern int gr_process_kernel_exec_ban(void);
-+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
-+
- int __register_binfmt(struct linux_binfmt * fmt, int insert)
- {
- if (!fmt)
- return -EINVAL;
- write_lock(&binfmt_lock);
-- insert ? list_add(&fmt->lh, &formats) :
-- list_add_tail(&fmt->lh, &formats);
-+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
-+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
- write_unlock(&binfmt_lock);
- return 0;
- }
-@@ -93,7 +119,7 @@ EXPORT_SYMBOL(__register_binfmt);
- void unregister_binfmt(struct linux_binfmt * fmt)
- {
- write_lock(&binfmt_lock);
-- list_del(&fmt->lh);
-+ pax_list_del((struct list_head *)&fmt->lh);
- write_unlock(&binfmt_lock);
- }
-
-@@ -188,18 +214,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
- int write)
- {
- struct page *page;
-- int ret;
-
--#ifdef CONFIG_STACK_GROWSUP
-- if (write) {
-- ret = expand_downwards(bprm->vma, pos);
-- if (ret < 0)
-- return NULL;
-- }
--#endif
-- ret = get_user_pages(current, bprm->mm, pos,
-- 1, write, 1, &page, NULL);
-- if (ret <= 0)
-+ if (0 > expand_downwards(bprm->vma, pos))
-+ return NULL;
-+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
- return NULL;
-
- if (write) {
-@@ -215,6 +233,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
- if (size <= ARG_MAX)
- return page;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ // only allow 512KB for argv+env on suid/sgid binaries
-+ // to prevent easy ASLR exhaustion
-+ if (((bprm->cred->euid != current_euid()) ||
-+ (bprm->cred->egid != current_egid())) &&
-+ (size > (512 * 1024))) {
-+ put_page(page);
-+ return NULL;
-+ }
-+#endif
-+
- /*
- * Limit to 1/4-th the stack size for the argv+env strings.
- * This ensures that:
-@@ -274,6 +303,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
- vma->vm_end = STACK_TOP_MAX;
- vma->vm_start = vma->vm_end - PAGE_SIZE;
- vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+#endif
-+
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- INIT_LIST_HEAD(&vma->anon_vma_chain);
-
-@@ -288,6 +322,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
- mm->stack_vm = mm->total_vm = 1;
- up_write(&mm->mmap_sem);
- bprm->p = vma->vm_end - sizeof(void *);
-+
-+#ifdef CONFIG_PAX_RANDUSTACK
-+ if (randomize_va_space)
-+ bprm->p ^= random32() & ~PAGE_MASK;
-+#endif
-+
- return 0;
- err:
- up_write(&mm->mmap_sem);
-@@ -403,12 +443,12 @@ struct user_arg_ptr {
- union {
- const char __user *const __user *native;
- #ifdef CONFIG_COMPAT
-- compat_uptr_t __user *compat;
-+ const compat_uptr_t __user *compat;
- #endif
- } ptr;
- };
-
--static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
-+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
- {
- const char __user *native;
-
-@@ -417,14 +457,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
- compat_uptr_t compat;
-
- if (get_user(compat, argv.ptr.compat + nr))
-- return ERR_PTR(-EFAULT);
-+ return (const char __force_user *)ERR_PTR(-EFAULT);
-
- return compat_ptr(compat);
- }
- #endif
-
- if (get_user(native, argv.ptr.native + nr))
-- return ERR_PTR(-EFAULT);
-+ return (const char __force_user *)ERR_PTR(-EFAULT);
-
- return native;
- }
-@@ -443,11 +483,12 @@ static int count(struct user_arg_ptr argv, int max)
- if (!p)
- break;
-
-- if (IS_ERR(p))
-+ if (IS_ERR((const char __force_kernel *)p))
- return -EFAULT;
-
-- if (i++ >= max)
-+ if (i >= max)
- return -E2BIG;
-+ ++i;
-
- if (fatal_signal_pending(current))
- return -ERESTARTNOHAND;
-@@ -477,7 +518,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
-
- ret = -EFAULT;
- str = get_user_arg_ptr(argv, argc);
-- if (IS_ERR(str))
-+ if (IS_ERR((const char __force_kernel *)str))
- goto out;
-
- len = strnlen_user(str, MAX_ARG_STRLEN);
-@@ -559,7 +600,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
- int r;
- mm_segment_t oldfs = get_fs();
- struct user_arg_ptr argv = {
-- .ptr.native = (const char __user *const __user *)__argv,
-+ .ptr.native = (const char __user * const __force_user *)__argv,
- };
-
- set_fs(KERNEL_DS);
-@@ -594,7 +635,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- unsigned long new_end = old_end - shift;
- struct mmu_gather tlb;
-
-- BUG_ON(new_start > new_end);
-+ if (new_start >= new_end || new_start < mmap_min_addr)
-+ return -ENOMEM;
-
- /*
- * ensure there are no vmas between where we want to go
-@@ -603,6 +645,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- if (vma != find_vma(mm, new_start))
- return -EFAULT;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ BUG_ON(pax_find_mirror_vma(vma));
-+#endif
-+
- /*
- * cover the whole range: [new_start, old_end)
- */
-@@ -683,10 +729,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
- stack_top = arch_align_stack(stack_top);
- stack_top = PAGE_ALIGN(stack_top);
-
-- if (unlikely(stack_top < mmap_min_addr) ||
-- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
-- return -ENOMEM;
--
- stack_shift = vma->vm_end - stack_top;
-
- bprm->p -= stack_shift;
-@@ -698,8 +740,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
- bprm->exec -= stack_shift;
-
- down_write(&mm->mmap_sem);
-+
-+ /* Move stack pages down in memory. */
-+ if (stack_shift) {
-+ ret = shift_arg_pages(vma, stack_shift);
-+ if (ret)
-+ goto out_unlock;
-+ }
-+
- vm_flags = VM_STACK_FLAGS;
-
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ vm_flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (mm->pax_flags & MF_PAX_MPROTECT)
-+ vm_flags &= ~VM_MAYEXEC;
-+#endif
-+
-+ }
-+#endif
-+
- /*
- * Adjust stack execute permissions; explicitly enable for
- * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
-@@ -718,13 +780,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
- goto out_unlock;
- BUG_ON(prev != vma);
-
-- /* Move stack pages down in memory. */
-- if (stack_shift) {
-- ret = shift_arg_pages(vma, stack_shift);
-- if (ret)
-- goto out_unlock;
-- }
--
- /* mprotect_fixup is overkill to remove the temporary stack flags */
- vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
-
-@@ -748,6 +803,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
- #endif
- current->mm->start_stack = bprm->p;
- ret = expand_stack(vma, stack_base);
-+
-+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
-+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
-+ unsigned long size, flags, vm_flags;
-+
-+ size = STACK_TOP - vma->vm_end;
-+ flags = MAP_FIXED | MAP_PRIVATE;
-+ vm_flags = VM_DONTEXPAND | VM_RESERVED;
-+
-+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
-+
-+#ifdef CONFIG_X86
-+ if (!ret) {
-+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
-+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
-+ }
-+#endif
-+
-+ }
-+#endif
-+
- if (ret)
- ret = -EFAULT;
-
-@@ -782,6 +858,8 @@ struct file *open_exec(const char *name)
-
- fsnotify_open(file);
-
-+ trace_open_exec(name);
-+
- err = deny_write_access(file);
- if (err)
- goto exit;
-@@ -805,7 +883,7 @@ int kernel_read(struct file *file, loff_t offset,
- old_fs = get_fs();
- set_fs(get_ds());
- /* The cast to a user pointer is valid due to the set_fs() */
-- result = vfs_read(file, (void __user *)addr, count, &pos);
-+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
- set_fs(old_fs);
- return result;
- }
-@@ -841,6 +919,7 @@ static int exec_mmap(struct mm_struct *mm)
- tsk->mm = mm;
- tsk->active_mm = mm;
- activate_mm(active_mm, mm);
-+ populate_stack();
- task_unlock(tsk);
- arch_pick_mmap_layout(mm);
- if (old_mm) {
-@@ -903,9 +982,13 @@ static int de_thread(struct task_struct *tsk)
- if (!thread_group_leader(tsk)) {
- struct task_struct *leader = tsk->group_leader;
-
-- sig->notify_count = -1; /* for exit_notify() */
- for (;;) {
- write_lock_irq(&tasklist_lock);
-+ /*
-+ * Do this under tasklist_lock to ensure that
-+ * exit_notify() can't miss ->group_exit_task
-+ */
-+ sig->notify_count = -1;
- if (likely(leader->exit_state))
- break;
- __set_current_state(TASK_UNINTERRUPTIBLE);
-@@ -1070,6 +1153,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
- perf_event_comm(tsk);
- }
-
-+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
-+{
-+ int i, ch;
-+
-+ /* Copies the binary name from after last slash */
-+ for (i = 0; (ch = *(fn++)) != '\0';) {
-+ if (ch == '/')
-+ i = 0; /* overwrite what we wrote */
-+ else
-+ if (i < len - 1)
-+ tcomm[i++] = ch;
-+ }
-+ tcomm[i] = '\0';
-+}
-+
- int flush_old_exec(struct linux_binprm * bprm)
- {
- int retval;
-@@ -1084,6 +1182,7 @@ int flush_old_exec(struct linux_binprm * bprm)
-
- set_mm_exe_file(bprm->mm, bprm->file);
-
-+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
- /*
- * Release all of the old mmap stuff
- */
-@@ -1116,10 +1215,6 @@ EXPORT_SYMBOL(would_dump);
-
- void setup_new_exec(struct linux_binprm * bprm)
- {
-- int i, ch;
-- const char *name;
-- char tcomm[sizeof(current->comm)];
--
- arch_pick_mmap_layout(current->mm);
-
- /* This is the point of no return */
-@@ -1130,18 +1225,7 @@ void setup_new_exec(struct linux_binprm * bprm)
- else
- set_dumpable(current->mm, suid_dumpable);
-
-- name = bprm->filename;
--
-- /* Copies the binary name from after last slash */
-- for (i=0; (ch = *(name++)) != '\0';) {
-- if (ch == '/')
-- i = 0; /* overwrite what we wrote */
-- else
-- if (i < (sizeof(tcomm) - 1))
-- tcomm[i++] = ch;
-- }
-- tcomm[i] = '\0';
-- set_task_comm(current, tcomm);
-+ set_task_comm(current, bprm->tcomm);
-
- /* Set the new mm task size. We have to do that late because it may
- * depend on TIF_32BIT which is only updated in flush_thread() on
-@@ -1229,7 +1313,7 @@ void install_exec_creds(struct linux_binprm *bprm)
- * wait until new credentials are committed
- * by commit_creds() above
- */
-- if (get_dumpable(current->mm) != SUID_DUMP_USER)
-+ if (get_dumpable(current->mm) != SUID_DUMPABLE_ENABLED)
- perf_event_exit_task(current);
- /*
- * cred_guard_mutex must be held at least to this point to prevent
-@@ -1259,6 +1343,13 @@ int check_unsafe_exec(struct linux_binprm *bprm)
- bprm->unsafe |= LSM_UNSAFE_PTRACE;
- }
-
-+ /*
-+ * This isn't strictly necessary, but it makes it harder for LSMs to
-+ * mess up.
-+ */
-+ if (current->no_new_privs)
-+ bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
-+
- n_fs = 1;
- spin_lock(&p->fs->lock);
- rcu_read_lock();
-@@ -1268,7 +1359,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
- }
- rcu_read_unlock();
-
-- if (p->fs->users > n_fs) {
-+ if (atomic_read(&p->fs->users) > n_fs) {
- bprm->unsafe |= LSM_UNSAFE_SHARE;
- } else {
- res = -EAGAIN;
-@@ -1296,6 +1387,9 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
- return;
-
-+ if (current->no_new_privs)
-+ return;
-+
- inode = bprm->file->f_path.dentry->d_inode;
- mode = ACCESS_ONCE(inode->i_mode);
- if (!(mode & (S_ISUID|S_ISGID)))
-@@ -1321,8 +1415,8 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
- }
- }
-
--/*
-- * Fill the binprm structure from the inode.
-+/*
-+ * Fill the binprm structure from the inode.
- * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
- *
- * This may be called multiple times for binary chains (scripts for example).
-@@ -1478,6 +1572,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
-
- EXPORT_SYMBOL(search_binary_handler);
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+static DEFINE_PER_CPU(u64, exec_counter);
-+static int __init init_exec_counters(void)
-+{
-+ unsigned int cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ per_cpu(exec_counter, cpu) = (u64)cpu;
-+ }
-+
-+ return 0;
-+}
-+early_initcall(init_exec_counters);
-+static inline void increment_exec_counter(void)
-+{
-+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
-+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
-+}
-+#else
-+static inline void increment_exec_counter(void) {}
-+#endif
-+
-+extern void gr_handle_exec_args(struct linux_binprm *bprm,
-+ struct user_arg_ptr argv);
-+
- /*
- * sys_execve() executes a new program.
- */
-@@ -1486,6 +1605,11 @@ static int do_execve_common(const char *filename,
- struct user_arg_ptr envp,
- struct pt_regs *regs)
- {
-+#ifdef CONFIG_GRKERNSEC
-+ struct file *old_exec_file;
-+ struct acl_subject_label *old_acl;
-+ struct rlimit old_rlim[RLIM_NLIMITS];
-+#endif
- struct linux_binprm *bprm;
- struct file *file;
- struct files_struct *displaced;
-@@ -1493,6 +1617,8 @@ static int do_execve_common(const char *filename,
- int retval;
- const struct cred *cred = current_cred();
-
-+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
-+
- /*
- * We move the actual failure in case of RLIMIT_NPROC excess from
- * set*uid() to execve() because too many poorly written programs
-@@ -1533,12 +1659,22 @@ static int do_execve_common(const char *filename,
- if (IS_ERR(file))
- goto out_unmark;
-
-+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
-+ retval = -EPERM;
-+ goto out_file;
-+ }
-+
- sched_exec();
-
- bprm->file = file;
- bprm->filename = filename;
- bprm->interp = filename;
-
-+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
-+ retval = -EACCES;
-+ goto out_file;
-+ }
-+
- retval = bprm_mm_init(bprm);
- if (retval)
- goto out_file;
-@@ -1555,24 +1691,70 @@ static int do_execve_common(const char *filename,
- if (retval < 0)
- goto out;
-
-+#ifdef CONFIG_GRKERNSEC
-+ old_acl = current->acl;
-+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
-+ old_exec_file = current->exec_file;
-+ get_file(file);
-+ current->exec_file = file;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ /* limit suid stack to 8MB
-+ we saved the old limits above and will restore them if this exec fails
-+ */
-+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
-+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
-+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
-+#endif
-+
-+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
-+ retval = -EPERM;
-+ goto out_fail;
-+ }
-+
-+ if (!gr_tpe_allow(file)) {
-+ retval = -EACCES;
-+ goto out_fail;
-+ }
-+
-+ if (gr_check_crash_exec(file)) {
-+ retval = -EACCES;
-+ goto out_fail;
-+ }
-+
-+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
-+ bprm->unsafe);
-+ if (retval < 0)
-+ goto out_fail;
-+
- retval = copy_strings_kernel(1, &bprm->filename, bprm);
- if (retval < 0)
-- goto out;
-+ goto out_fail;
-
- bprm->exec = bprm->p;
- retval = copy_strings(bprm->envc, envp, bprm);
- if (retval < 0)
-- goto out;
-+ goto out_fail;
-
- retval = copy_strings(bprm->argc, argv, bprm);
- if (retval < 0)
-- goto out;
-+ goto out_fail;
-+
-+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
-+
-+ gr_handle_exec_args(bprm, argv);
-
- retval = search_binary_handler(bprm,regs);
- if (retval < 0)
-- goto out;
-+ goto out_fail;
-+#ifdef CONFIG_GRKERNSEC
-+ if (old_exec_file)
-+ fput(old_exec_file);
-+#endif
-
- /* execve succeeded */
-+
-+ increment_exec_counter();
- current->fs->in_exec = 0;
- current->in_execve = 0;
- acct_update_integrals(current);
-@@ -1581,6 +1763,14 @@ static int do_execve_common(const char *filename,
- put_files_struct(displaced);
- return retval;
-
-+out_fail:
-+#ifdef CONFIG_GRKERNSEC
-+ current->acl = old_acl;
-+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
-+ fput(current->exec_file);
-+ current->exec_file = old_exec_file;
-+#endif
-+
- out:
- if (bprm->mm) {
- acct_arg_size(bprm, 0);
-@@ -1654,7 +1844,7 @@ static int expand_corename(struct core_name *cn)
- {
- char *old_corename = cn->corename;
-
-- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
-+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
- cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
-
- if (!cn->corename) {
-@@ -1751,7 +1941,7 @@ static int format_corename(struct core_name *cn, long signr)
- int pid_in_pattern = 0;
- int err = 0;
-
-- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
-+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
- cn->corename = kmalloc(cn->size, GFP_KERNEL);
- cn->used = 0;
-
-@@ -1848,6 +2038,309 @@ out:
- return ispipe;
- }
-
-+int pax_check_flags(unsigned long *flags)
-+{
-+ int retval = 0;
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
-+ if (*flags & MF_PAX_SEGMEXEC)
-+ {
-+ *flags &= ~MF_PAX_SEGMEXEC;
-+ retval = -EINVAL;
-+ }
-+#endif
-+
-+ if ((*flags & MF_PAX_PAGEEXEC)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ && (*flags & MF_PAX_SEGMEXEC)
-+#endif
-+
-+ )
-+ {
-+ *flags &= ~MF_PAX_PAGEEXEC;
-+ retval = -EINVAL;
-+ }
-+
-+ if ((*flags & MF_PAX_MPROTECT)
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
-+#endif
-+
-+ )
-+ {
-+ *flags &= ~MF_PAX_MPROTECT;
-+ retval = -EINVAL;
-+ }
-+
-+ if ((*flags & MF_PAX_EMUTRAMP)
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
-+#endif
-+
-+ )
-+ {
-+ *flags &= ~MF_PAX_EMUTRAMP;
-+ retval = -EINVAL;
-+ }
-+
-+ return retval;
-+}
-+
-+EXPORT_SYMBOL(pax_check_flags);
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+char *pax_get_path(const struct path *path, char *buf, int buflen)
-+{
-+ char *pathname = d_path(path, buf, buflen);
-+
-+ if (IS_ERR(pathname))
-+ goto toolong;
-+
-+ pathname = mangle_path(buf, pathname, "\t\n\\");
-+ if (!pathname)
-+ goto toolong;
-+
-+ *pathname = 0;
-+ return buf;
-+
-+toolong:
-+ return "<path too long>";
-+}
-+EXPORT_SYMBOL(pax_get_path);
-+
-+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ struct task_struct *tsk = current;
-+ struct mm_struct *mm = current->mm;
-+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
-+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
-+ char *path_exec = NULL;
-+ char *path_fault = NULL;
-+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
-+
-+ if (buffer_exec && buffer_fault) {
-+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
-+
-+ down_read(&mm->mmap_sem);
-+ vma = mm->mmap;
-+ while (vma && (!vma_exec || !vma_fault)) {
-+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
-+ vma_exec = vma;
-+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
-+ vma_fault = vma;
-+ vma = vma->vm_next;
-+ }
-+ if (vma_exec)
-+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
-+ if (vma_fault) {
-+ start = vma_fault->vm_start;
-+ end = vma_fault->vm_end;
-+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
-+ if (vma_fault->vm_file)
-+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
-+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
-+ path_fault = "<heap>";
-+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
-+ path_fault = "<stack>";
-+ else
-+ path_fault = "<anonymous mapping>";
-+ }
-+ up_read(&mm->mmap_sem);
-+ }
-+ if (tsk->signal->curr_ip)
-+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
-+ else
-+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
-+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
-+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
-+ task_uid(tsk), task_euid(tsk), pc, sp);
-+ free_page((unsigned long)buffer_exec);
-+ free_page((unsigned long)buffer_fault);
-+ pax_report_insns(regs, pc, sp);
-+ do_coredump(SIGKILL, SIGKILL, regs);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+void pax_report_refcount_overflow(struct pt_regs *regs)
-+{
-+ if (current->signal->curr_ip)
-+ printk(KERN_EMERG "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
-+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
-+ else
-+ printk(KERN_EMERG "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
-+ current->comm, task_pid_nr(current), current_uid(), current_euid());
-+ print_symbol(KERN_EMERG "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
-+ preempt_disable();
-+ show_regs(regs);
-+ preempt_enable();
-+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
-+static noinline int check_stack_object(const void *obj, unsigned long len)
-+{
-+ const void * const stack = task_stack_page(current);
-+ const void * const stackend = stack + THREAD_SIZE;
-+
-+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
-+ const void *frame = NULL;
-+ const void *oldframe;
-+#endif
-+
-+ if (obj + len < obj)
-+ return -1;
-+
-+ if (obj + len <= stack || stackend <= obj)
-+ return 0;
-+
-+ if (obj < stack || stackend < obj + len)
-+ return -1;
-+
-+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
-+ oldframe = __builtin_frame_address(1);
-+ if (oldframe)
-+ frame = __builtin_frame_address(2);
-+ /*
-+ low ----------------------------------------------> high
-+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
-+ ^----------------^
-+ allow copies only within here
-+ */
-+ while (stack <= frame && frame < stackend) {
-+ /* if obj + len extends past the last frame, this
-+ check won't pass and the next frame will be 0,
-+ causing us to bail out and correctly report
-+ the copy as invalid
-+ */
-+ if (obj + len <= frame)
-+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
-+ oldframe = frame;
-+ frame = *(const void * const *)frame;
-+ }
-+ return -1;
-+#else
-+ return 1;
-+#endif
-+}
-+
-+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
-+{
-+ if (current->signal->curr_ip)
-+ printk(KERN_EMERG "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
-+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
-+ else
-+ printk(KERN_EMERG "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
-+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
-+ dump_stack();
-+ gr_handle_kernel_exploit();
-+ do_group_exit(SIGKILL);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+
-+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
-+{
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
-+#ifdef CONFIG_MODULES
-+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
-+#else
-+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
-+#endif
-+
-+#else
-+ unsigned long textlow = (unsigned long)_stext;
-+ unsigned long texthigh = (unsigned long)_etext;
-+
-+#ifdef CONFIG_X86_64
-+ /* check against linear mapping as well */
-+ if (high > (unsigned long)__va(__pa(textlow)) &&
-+ low < (unsigned long)__va(__pa(texthigh)))
-+ return true;
-+#endif
-+
-+#endif
-+
-+ if (high <= textlow || low >= texthigh)
-+ return false;
-+ else
-+ return true;
-+}
-+#endif
-+
-+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
-+{
-+#ifdef CONFIG_PAX_USERCOPY
-+ const char *type;
-+#endif
-+
-+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
-+ unsigned long stackstart = (unsigned long)task_stack_page(current);
-+ unsigned long currentsp = (unsigned long)&stackstart;
-+ if (unlikely(currentsp < stackstart + 512 ||
-+ currentsp >= stackstart + THREAD_SIZE))
-+ BUG();
-+#endif
-+
-+#ifndef CONFIG_PAX_USERCOPY_DEBUG
-+ if (const_size)
-+ return;
-+#endif
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+ if (!n)
-+ return;
-+
-+ type = check_heap_object(ptr, n);
-+ if (!type) {
-+ int ret = check_stack_object(ptr, n);
-+ if (ret == 1 || ret == 2)
-+ return;
-+ if (ret == 0) {
-+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
-+ type = "<kernel text>";
-+ else
-+ return;
-+ } else
-+ type = "<process stack>";
-+ }
-+
-+ pax_report_usercopy(ptr, n, to_user, type);
-+#endif
-+
-+}
-+EXPORT_SYMBOL(__check_object_size);
-+
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+void pax_track_stack(void)
-+{
-+ unsigned long sp = (unsigned long)&sp;
-+ if (sp < current_thread_info()->lowest_stack &&
-+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
-+ current_thread_info()->lowest_stack = sp;
-+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
-+ BUG();
-+}
-+EXPORT_SYMBOL(pax_track_stack);
-+#endif
-+
-+#ifdef CONFIG_PAX_SIZE_OVERFLOW
-+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+{
-+ printk(KERN_EMERG "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
-+ dump_stack();
-+ do_group_exit(SIGKILL);
-+}
-+EXPORT_SYMBOL(report_size_overflow);
-+#endif
-+
- static int zap_process(struct task_struct *start, int exit_code)
- {
- struct task_struct *t;
-@@ -2021,17 +2514,17 @@ static void coredump_finish(struct mm_struct *mm)
- void set_dumpable(struct mm_struct *mm, int value)
- {
- switch (value) {
-- case 0:
-+ case SUID_DUMPABLE_DISABLED:
- clear_bit(MMF_DUMPABLE, &mm->flags);
- smp_wmb();
- clear_bit(MMF_DUMP_SECURELY, &mm->flags);
- break;
-- case 1:
-+ case SUID_DUMPABLE_ENABLED:
- set_bit(MMF_DUMPABLE, &mm->flags);
- smp_wmb();
- clear_bit(MMF_DUMP_SECURELY, &mm->flags);
- break;
-- case 2:
-+ case SUID_DUMPABLE_SAFE:
- set_bit(MMF_DUMP_SECURELY, &mm->flags);
- smp_wmb();
- set_bit(MMF_DUMPABLE, &mm->flags);
-@@ -2044,7 +2537,7 @@ static int __get_dumpable(unsigned long mm_flags)
- int ret;
-
- ret = mm_flags & MMF_DUMPABLE_MASK;
-- return (ret >= 2) ? 2 : ret;
-+ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
- }
-
- /*
-@@ -2065,17 +2558,17 @@ static void wait_for_dump_helpers(struct file *file)
- pipe = file->f_path.dentry->d_inode->i_pipe;
-
- pipe_lock(pipe);
-- pipe->readers++;
-- pipe->writers--;
-+ atomic_inc(&pipe->readers);
-+ atomic_dec(&pipe->writers);
-
-- while ((pipe->readers > 1) && (!signal_pending(current))) {
-+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
- wake_up_interruptible_sync(&pipe->wait);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- pipe_wait(pipe);
- }
-
-- pipe->readers--;
-- pipe->writers++;
-+ atomic_dec(&pipe->readers);
-+ atomic_inc(&pipe->writers);
- pipe_unlock(pipe);
-
- }
-@@ -2136,7 +2629,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
- int retval = 0;
- int flag = 0;
- int ispipe;
-- static atomic_t core_dump_count = ATOMIC_INIT(0);
-+ bool need_nonrelative = false;
-+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
- struct coredump_params cprm = {
- .signr = signr,
- .regs = regs,
-@@ -2151,6 +2645,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
-
- audit_core_dumps(signr);
-
-+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
-+ gr_handle_brute_attach(cprm.mm_flags);
-+
- binfmt = mm->binfmt;
- if (!binfmt || !binfmt->core_dump)
- goto fail;
-@@ -2161,14 +2658,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
- if (!cred)
- goto fail;
- /*
-- * We cannot trust fsuid as being the "true" uid of the
-- * process nor do we know its entire history. We only know it
-- * was tainted so we dump it as root in mode 2.
-+ * We cannot trust fsuid as being the "true" uid of the process
-+ * nor do we know its entire history. We only know it was tainted
-+ * so we dump it as root in mode 2, and only into a controlled
-+ * environment (pipe handler or fully qualified path).
- */
-- if (__get_dumpable(cprm.mm_flags) == 2) {
-+ if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
- /* Setuid core dump mode */
- flag = O_EXCL; /* Stop rewrite attacks */
- cred->fsuid = 0; /* Dump root private */
-+ need_nonrelative = true;
- }
-
- retval = coredump_wait(exit_code, &core_state);
-@@ -2218,7 +2717,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
- }
- cprm.limit = RLIM_INFINITY;
-
-- dump_count = atomic_inc_return(&core_dump_count);
-+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
- if (core_pipe_limit && (core_pipe_limit < dump_count)) {
- printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
- task_tgid_vnr(current), current->comm);
-@@ -2245,9 +2744,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
- } else {
- struct inode *inode;
-
-+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
-+
- if (cprm.limit < binfmt->min_coredump)
- goto fail_unlock;
-
-+ if (need_nonrelative && cn.corename[0] != '/') {
-+ printk(KERN_WARNING "Pid %d(%s) can only dump core "\
-+ "to fully qualified path!\n",
-+ task_tgid_vnr(current), current->comm);
-+ printk(KERN_WARNING "Skipping core dump\n");
-+ goto fail_unlock;
-+ }
-+
- cprm.file = filp_open(cn.corename,
- O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
- 0600);
-@@ -2288,7 +2797,7 @@ close_fail:
- filp_close(cprm.file, NULL);
- fail_dropcount:
- if (ispipe)
-- atomic_dec(&core_dump_count);
-+ atomic_dec_unchecked(&core_dump_count);
- fail_unlock:
- kfree(cn.corename);
- fail_corename:
-@@ -2307,7 +2816,7 @@ fail:
- */
- int dump_write(struct file *file, const void *addr, int nr)
- {
-- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
- }
- EXPORT_SYMBOL(dump_write);
-
-diff --git a/fs/exofs/super.c b/fs/exofs/super.c
-index 7ed5000..cbe7b49 100644
---- a/fs/exofs/super.c
-+++ b/fs/exofs/super.c
-@@ -1008,6 +1008,7 @@ static struct file_system_type exofs_type = {
- .mount = exofs_mount,
- .kill_sb = generic_shutdown_super,
- };
-+MODULE_ALIAS_FS("exofs");
-
- static int __init init_exofs(void)
- {
-diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
-index a8cbe1b..fed04cb 100644
---- a/fs/ext2/balloc.c
-+++ b/fs/ext2/balloc.c
-@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
-
- free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
- root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
- sbi->s_resuid != current_fsuid() &&
- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- return 0;
-diff --git a/fs/ext2/super.c b/fs/ext2/super.c
-index 94b9e32..3476e8c 100644
---- a/fs/ext2/super.c
-+++ b/fs/ext2/super.c
-@@ -259,10 +259,8 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
- #ifdef CONFIG_EXT2_FS_XATTR
- if (test_opt(sb, XATTR_USER))
- seq_puts(seq, ",user_xattr");
-- if (!test_opt(sb, XATTR_USER) &&
-- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
-+ if (!test_opt(sb, XATTR_USER))
- seq_puts(seq, ",nouser_xattr");
-- }
- #endif
-
- #ifdef CONFIG_EXT2_FS_POSIX_ACL
-@@ -827,8 +825,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
- if (def_mount_opts & EXT2_DEFM_UID16)
- set_opt(sbi->s_mount_opt, NO_UID32);
- #ifdef CONFIG_EXT2_FS_XATTR
-- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
-- set_opt(sbi->s_mount_opt, XATTR_USER);
-+ /* always enable user xattrs */
-+ set_opt(sbi->s_mount_opt, XATTR_USER);
- #endif
- #ifdef CONFIG_EXT2_FS_POSIX_ACL
- if (def_mount_opts & EXT2_DEFM_ACL)
-@@ -1495,6 +1493,7 @@ static struct file_system_type ext2_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ext2");
-
- static int __init init_ext2_fs(void)
- {
-diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
-index d27b71f..071b0e1 100644
---- a/fs/ext2/xattr.c
-+++ b/fs/ext2/xattr.c
-@@ -248,7 +248,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
- struct buffer_head *bh = NULL;
- struct ext2_xattr_entry *entry;
- char *end;
-- size_t rest = buffer_size;
-+ size_t rest = buffer_size, total_size = 0;
- int error;
-
- ea_idebug(inode, "buffer=%p, buffer_size=%ld",
-@@ -306,9 +306,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
- buffer += size;
- }
- rest -= size;
-+ total_size += size;
- }
- }
-- error = buffer_size - rest; /* total size */
-+ error = total_size;
-
- cleanup:
- brelse(bh);
-diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
-index a203892..4e64db5 100644
---- a/fs/ext3/balloc.c
-+++ b/fs/ext3/balloc.c
-@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
-
- free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
- root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-+ if (free_blocks < root_blocks + 1 &&
- !use_reservation && sbi->s_resuid != current_fsuid() &&
-- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
-+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
-+ !capable_nolog(CAP_SYS_RESOURCE)) {
- return 0;
- }
- return 1;
-diff --git a/fs/ext3/super.c b/fs/ext3/super.c
-index 562ede3..62fff74 100644
---- a/fs/ext3/super.c
-+++ b/fs/ext3/super.c
-@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
- #ifdef CONFIG_EXT3_FS_XATTR
- if (test_opt(sb, XATTR_USER))
- seq_puts(seq, ",user_xattr");
-- if (!test_opt(sb, XATTR_USER) &&
-- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
-+ if (!test_opt(sb, XATTR_USER))
- seq_puts(seq, ",nouser_xattr");
-- }
- #endif
- #ifdef CONFIG_EXT3_FS_POSIX_ACL
- if (test_opt(sb, POSIX_ACL))
-@@ -1699,8 +1697,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
- if (def_mount_opts & EXT3_DEFM_UID16)
- set_opt(sbi->s_mount_opt, NO_UID32);
- #ifdef CONFIG_EXT3_FS_XATTR
-- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
-- set_opt(sbi->s_mount_opt, XATTR_USER);
-+ /* always enable user xattrs */
-+ set_opt(sbi->s_mount_opt, XATTR_USER);
- #endif
- #ifdef CONFIG_EXT3_FS_POSIX_ACL
- if (def_mount_opts & EXT3_DEFM_ACL)
-@@ -3058,6 +3056,7 @@ static struct file_system_type ext3_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ext3");
-
- static int __init init_ext3_fs(void)
- {
-diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
-index d565759..a1315f2 100644
---- a/fs/ext3/xattr.c
-+++ b/fs/ext3/xattr.c
-@@ -335,7 +335,7 @@ static int
- ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
- char *buffer, size_t buffer_size)
- {
-- size_t rest = buffer_size;
-+ size_t rest = buffer_size, total_size = 0;
-
- for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
- const struct xattr_handler *handler =
-@@ -352,9 +352,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
- buffer += size;
- }
- rest -= size;
-+ total_size += size;
- }
- }
-- return buffer_size - rest;
-+ return total_size;
- }
-
- static int
-diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index 2845a1f..f29de63 100644
---- a/fs/ext4/balloc.c
-+++ b/fs/ext4/balloc.c
-@@ -441,8 +441,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
- /* Hm, nope. Are (enough) root reserved clusters available? */
- if (sbi->s_resuid == current_fsuid() ||
- ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
-- capable(CAP_SYS_RESOURCE) ||
-- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
-+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
-+ capable_nolog(CAP_SYS_RESOURCE)) {
-
- if (free_clusters >= (nclusters + dirty_clusters))
- return 1;
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 6858d9d..590047a 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -1218,19 +1218,19 @@ struct ext4_sb_info {
- unsigned long s_mb_last_start;
-
- /* stats for buddy allocator */
-- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
-- atomic_t s_bal_success; /* we found long enough chunks */
-- atomic_t s_bal_allocated; /* in blocks */
-- atomic_t s_bal_ex_scanned; /* total extents scanned */
-- atomic_t s_bal_goals; /* goal hits */
-- atomic_t s_bal_breaks; /* too long searches */
-- atomic_t s_bal_2orders; /* 2^order hits */
-+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
-+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
-+ atomic_unchecked_t s_bal_allocated; /* in blocks */
-+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
-+ atomic_unchecked_t s_bal_goals; /* goal hits */
-+ atomic_unchecked_t s_bal_breaks; /* too long searches */
-+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
- spinlock_t s_bal_lock;
- unsigned long s_mb_buddies_generated;
- unsigned long long s_mb_generation_time;
-- atomic_t s_mb_lost_chunks;
-- atomic_t s_mb_preallocated;
-- atomic_t s_mb_discarded;
-+ atomic_unchecked_t s_mb_lost_chunks;
-+ atomic_unchecked_t s_mb_preallocated;
-+ atomic_unchecked_t s_mb_discarded;
- atomic_t s_lock_busy;
-
- /* locality groups */
-diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 5baa7ba..917bb08 100644
---- a/fs/ext4/mballoc.c
-+++ b/fs/ext4/mballoc.c
-@@ -1796,7 +1796,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
- BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
-
- if (EXT4_SB(sb)->s_mb_stats)
-- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
-+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
-
- break;
- }
-@@ -2094,7 +2094,7 @@ repeat:
- ac->ac_status = AC_STATUS_CONTINUE;
- ac->ac_flags |= EXT4_MB_HINT_FIRST;
- cr = 3;
-- atomic_inc(&sbi->s_mb_lost_chunks);
-+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
- goto repeat;
- }
- }
-@@ -2601,25 +2601,25 @@ int ext4_mb_release(struct super_block *sb)
- if (sbi->s_mb_stats) {
- ext4_msg(sb, KERN_INFO,
- "mballoc: %u blocks %u reqs (%u success)",
-- atomic_read(&sbi->s_bal_allocated),
-- atomic_read(&sbi->s_bal_reqs),
-- atomic_read(&sbi->s_bal_success));
-+ atomic_read_unchecked(&sbi->s_bal_allocated),
-+ atomic_read_unchecked(&sbi->s_bal_reqs),
-+ atomic_read_unchecked(&sbi->s_bal_success));
- ext4_msg(sb, KERN_INFO,
- "mballoc: %u extents scanned, %u goal hits, "
- "%u 2^N hits, %u breaks, %u lost",
-- atomic_read(&sbi->s_bal_ex_scanned),
-- atomic_read(&sbi->s_bal_goals),
-- atomic_read(&sbi->s_bal_2orders),
-- atomic_read(&sbi->s_bal_breaks),
-- atomic_read(&sbi->s_mb_lost_chunks));
-+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
-+ atomic_read_unchecked(&sbi->s_bal_goals),
-+ atomic_read_unchecked(&sbi->s_bal_2orders),
-+ atomic_read_unchecked(&sbi->s_bal_breaks),
-+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
- ext4_msg(sb, KERN_INFO,
- "mballoc: %lu generated and it took %Lu",
- sbi->s_mb_buddies_generated,
- sbi->s_mb_generation_time);
- ext4_msg(sb, KERN_INFO,
- "mballoc: %u preallocated, %u discarded",
-- atomic_read(&sbi->s_mb_preallocated),
-- atomic_read(&sbi->s_mb_discarded));
-+ atomic_read_unchecked(&sbi->s_mb_preallocated),
-+ atomic_read_unchecked(&sbi->s_mb_discarded));
- }
-
- free_percpu(sbi->s_locality_groups);
-@@ -3103,16 +3103,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
- struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
-
- if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
-- atomic_inc(&sbi->s_bal_reqs);
-- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
-+ atomic_inc_unchecked(&sbi->s_bal_reqs);
-+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
- if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
-- atomic_inc(&sbi->s_bal_success);
-- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
-+ atomic_inc_unchecked(&sbi->s_bal_success);
-+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
- if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
- ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-- atomic_inc(&sbi->s_bal_goals);
-+ atomic_inc_unchecked(&sbi->s_bal_goals);
- if (ac->ac_found > sbi->s_mb_max_to_scan)
-- atomic_inc(&sbi->s_bal_breaks);
-+ atomic_inc_unchecked(&sbi->s_bal_breaks);
- }
-
- if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
-@@ -3539,7 +3539,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
- trace_ext4_mb_new_inode_pa(ac, pa);
-
- ext4_mb_use_inode_pa(ac, pa);
-- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
-+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
-
- ei = EXT4_I(ac->ac_inode);
- grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
-@@ -3599,7 +3599,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
- trace_ext4_mb_new_group_pa(ac, pa);
-
- ext4_mb_use_group_pa(ac, pa);
-- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
-+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
-
- grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
- lg = ac->ac_lg;
-@@ -3688,7 +3688,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
- * from the bitmap and continue.
- */
- }
-- atomic_add(free, &sbi->s_mb_discarded);
-+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
-
- return err;
- }
-@@ -3706,7 +3706,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
- ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
- mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
-- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
-+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
- trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
-
- return 0;
-diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
-index f3358ab..fbb1d90 100644
---- a/fs/ext4/mmp.c
-+++ b/fs/ext4/mmp.c
-@@ -73,7 +73,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
- void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
- const char *function, unsigned int line, const char *msg)
- {
-- __ext4_warning(sb, function, line, msg);
-+ __ext4_warning(sb, function, line, "%s", msg);
- __ext4_warning(sb, function, line,
- "MMP failure info: last update time: %llu, last update "
- "node: %s, last update device: %s\n",
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 422be11..ef4b528 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -92,6 +92,8 @@ static struct file_system_type ext2_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ext2");
-+MODULE_ALIAS("ext2");
- #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
- #else
- #define IS_EXT2_SB(sb) (0)
-@@ -106,6 +108,8 @@ static struct file_system_type ext3_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ext3");
-+MODULE_ALIAS("ext3");
- #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
- #else
- #define IS_EXT3_SB(sb) (0)
-@@ -1438,7 +1442,7 @@ static ext4_fsblk_t get_sb_block(void **data)
- }
-
- #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
--static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
-+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
- "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
-
- #ifdef CONFIG_QUOTA
-@@ -2460,7 +2464,7 @@ struct ext4_attr {
- ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
- const char *, size_t);
- int offset;
--};
-+} __do_const;
-
- static int parse_strtoul(const char *buf,
- unsigned long max, unsigned long *value)
-@@ -3167,7 +3171,6 @@ int ext4_calculate_overhead(struct super_block *sb)
- ext4_fsblk_t overhead = 0;
- char *buf = (char *) get_zeroed_page(GFP_KERNEL);
-
-- memset(buf, 0, PAGE_SIZE);
- if (!buf)
- return -ENOMEM;
-
-@@ -5044,7 +5047,6 @@ static inline int ext2_feature_set_ok(struct super_block *sb)
- return 0;
- return 1;
- }
--MODULE_ALIAS("ext2");
- #else
- static inline void register_as_ext2(void) { }
- static inline void unregister_as_ext2(void) { }
-@@ -5077,7 +5079,6 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
- return 0;
- return 1;
- }
--MODULE_ALIAS("ext3");
- #else
- static inline void register_as_ext3(void) { }
- static inline void unregister_as_ext3(void) { }
-@@ -5091,6 +5092,7 @@ static struct file_system_type ext4_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ext4");
-
- static int __init ext4_init_feat_adverts(void)
- {
-diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index c6ac876..8ea8de1 100644
---- a/fs/ext4/xattr.c
-+++ b/fs/ext4/xattr.c
-@@ -343,7 +343,7 @@ static int
- ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
- char *buffer, size_t buffer_size)
- {
-- size_t rest = buffer_size;
-+ size_t rest = buffer_size, total_size = 0;
-
- for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
- const struct xattr_handler *handler =
-@@ -360,9 +360,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
- buffer += size;
- }
- rest -= size;
-+ total_size += size;
- }
- }
-- return buffer_size - rest;
-+ return total_size;
- }
-
- static int
-diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
-index 216b419..350a088 100644
---- a/fs/fat/namei_msdos.c
-+++ b/fs/fat/namei_msdos.c
-@@ -674,6 +674,7 @@ static struct file_system_type msdos_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("msdos");
-
- static int __init init_msdos_fs(void)
- {
-diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
-index c25cf15..e5ea08a 100644
---- a/fs/fat/namei_vfat.c
-+++ b/fs/fat/namei_vfat.c
-@@ -1090,6 +1090,7 @@ static struct file_system_type vfat_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("vfat");
-
- static int __init init_vfat_fs(void)
- {
-diff --git a/fs/fcntl.c b/fs/fcntl.c
-index 22764c7..86372c9 100644
---- a/fs/fcntl.c
-+++ b/fs/fcntl.c
-@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
- if (err)
- return err;
-
-+ if (gr_handle_chroot_fowner(pid, type))
-+ return -ENOENT;
-+ if (gr_check_protected_task_fowner(pid, type))
-+ return -EACCES;
-+
- f_modown(filp, pid, type, force);
- return 0;
- }
-@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
-
- static int f_setown_ex(struct file *filp, unsigned long arg)
- {
-- struct f_owner_ex * __user owner_p = (void * __user)arg;
-+ struct f_owner_ex __user *owner_p = (void __user *)arg;
- struct f_owner_ex owner;
- struct pid *pid;
- int type;
-@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
-
- static int f_getown_ex(struct file *filp, unsigned long arg)
- {
-- struct f_owner_ex * __user owner_p = (void * __user)arg;
-+ struct f_owner_ex __user *owner_p = (void __user *)arg;
- struct f_owner_ex owner;
- int ret = 0;
-
-@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
- switch (cmd) {
- case F_DUPFD:
- case F_DUPFD_CLOEXEC:
-+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
- if (arg >= rlimit(RLIMIT_NOFILE))
- break;
- err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
-diff --git a/fs/fhandle.c b/fs/fhandle.c
-index 6b08864..030db71 100644
---- a/fs/fhandle.c
-+++ b/fs/fhandle.c
-@@ -8,6 +8,7 @@
- #include <linux/fs_struct.h>
- #include <linux/fsnotify.h>
- #include <linux/personality.h>
-+#include <linux/grsecurity.h>
- #include <asm/uaccess.h>
- #include "internal.h"
-
-@@ -176,7 +177,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
- * the directory. Ideally we would like CAP_DAC_SEARCH.
- * But we don't have that
- */
-- if (!capable(CAP_DAC_READ_SEARCH)) {
-+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
- retval = -EPERM;
- goto out_err;
- }
-@@ -196,8 +197,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
- goto out_err;
- }
- /* copy the full handle */
-- if (copy_from_user(handle, ufh,
-- sizeof(struct file_handle) +
-+ *handle = f_handle;
-+ if (copy_from_user(&handle->f_handle,
-+ &ufh->f_handle,
- f_handle.handle_bytes)) {
- retval = -EFAULT;
- goto out_handle;
-diff --git a/fs/fifo.c b/fs/fifo.c
-index cf6f434..3d7942c 100644
---- a/fs/fifo.c
-+++ b/fs/fifo.c
-@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
- */
- filp->f_op = &read_pipefifo_fops;
- pipe->r_counter++;
-- if (pipe->readers++ == 0)
-+ if (atomic_inc_return(&pipe->readers) == 1)
- wake_up_partner(inode);
-
-- if (!pipe->writers) {
-+ if (!atomic_read(&pipe->writers)) {
- if ((filp->f_flags & O_NONBLOCK)) {
- /* suppress POLLHUP until we have
- * seen a writer */
-@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
- * errno=ENXIO when there is no process reading the FIFO.
- */
- ret = -ENXIO;
-- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
-+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
- goto err;
-
- filp->f_op = &write_pipefifo_fops;
- pipe->w_counter++;
-- if (!pipe->writers++)
-+ if (atomic_inc_return(&pipe->writers) == 1)
- wake_up_partner(inode);
-
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- if (wait_for_partner(inode, &pipe->r_counter))
- goto err_wr;
- }
-@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
- */
- filp->f_op = &rdwr_pipefifo_fops;
-
-- pipe->readers++;
-- pipe->writers++;
-+ atomic_inc(&pipe->readers);
-+ atomic_inc(&pipe->writers);
- pipe->r_counter++;
- pipe->w_counter++;
-- if (pipe->readers == 1 || pipe->writers == 1)
-+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
- wake_up_partner(inode);
- break;
-
-@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
- return 0;
-
- err_rd:
-- if (!--pipe->readers)
-+ if (atomic_dec_and_test(&pipe->readers))
- wake_up_interruptible(&pipe->wait);
- ret = -ERESTARTSYS;
- goto err;
-
- err_wr:
-- if (!--pipe->writers)
-+ if (atomic_dec_and_test(&pipe->writers))
- wake_up_interruptible(&pipe->wait);
- ret = -ERESTARTSYS;
- goto err;
-
- err:
-- if (!pipe->readers && !pipe->writers)
-+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
- free_pipe_info(inode);
-
- err_nocleanup:
-diff --git a/fs/file.c b/fs/file.c
-index 30bfc99..b4b9a12 100644
---- a/fs/file.c
-+++ b/fs/file.c
-@@ -15,6 +15,7 @@
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #include <linux/file.h>
-+#include <linux/security.h>
- #include <linux/fdtable.h>
- #include <linux/bitops.h>
- #include <linux/interrupt.h>
-@@ -199,7 +200,7 @@ out:
- * Return <0 error code on error; 1 on successful completion.
- * The files->file_lock should be held on entry, and will be held on exit.
- */
--static int expand_fdtable(struct files_struct *files, int nr)
-+static int expand_fdtable(struct files_struct *files, unsigned int nr)
- __releases(files->file_lock)
- __acquires(files->file_lock)
- {
-@@ -244,7 +245,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
- * expanded and execution may have blocked.
- * The files->file_lock should be held on entry, and will be held on exit.
- */
--int expand_files(struct files_struct *files, int nr)
-+int expand_files(struct files_struct *files, unsigned int nr)
- {
- struct fdtable *fdt;
-
-@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
- * N.B. For clone tasks sharing a files structure, this test
- * will limit the total number of files that can be opened.
- */
-+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
- if (nr >= rlimit(RLIMIT_NOFILE))
- return -EMFILE;
-
-diff --git a/fs/filesystems.c b/fs/filesystems.c
-index 0845f84..bf3fd0571 100644
---- a/fs/filesystems.c
-+++ b/fs/filesystems.c
-@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
- int len = dot ? dot - name : strlen(name);
-
- fs = __get_fs_type(name, len);
-- if (!fs && (request_module("%.*s", len, name) == 0))
-+
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
-+#else
-+ if (!fs && (request_module("fs-%.*s", len, name) == 0))
-+#endif
- fs = __get_fs_type(name, len);
-
- if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
-diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
-index 9d1c995..7685971 100644
---- a/fs/freevxfs/vxfs_super.c
-+++ b/fs/freevxfs/vxfs_super.c
-@@ -52,7 +52,6 @@ MODULE_AUTHOR("Christoph Hellwig");
- MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver");
- MODULE_LICENSE("Dual BSD/GPL");
-
--MODULE_ALIAS("vxfs"); /* makes mount -t vxfs autoload the module */
-
-
- static void vxfs_put_super(struct super_block *);
-@@ -259,6 +258,8 @@ static struct file_system_type vxfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */
-+MODULE_ALIAS("vxfs");
-
- static int __init
- vxfs_init(void)
-diff --git a/fs/fs_struct.c b/fs/fs_struct.c
-index 78b519c..8445fa6 100644
---- a/fs/fs_struct.c
-+++ b/fs/fs_struct.c
-@@ -4,6 +4,7 @@
- #include <linux/path.h>
- #include <linux/slab.h>
- #include <linux/fs_struct.h>
-+#include <linux/grsecurity.h>
- #include "internal.h"
-
- static inline void path_get_longterm(struct path *path)
-@@ -26,15 +27,19 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
- {
- struct path old_root;
-
-+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
- old_root = fs->root;
- fs->root = *path;
- path_get_longterm(path);
-+ gr_set_chroot_entries(current, path);
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
-- if (old_root.dentry)
-+ if (old_root.dentry) {
-+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
- path_put_longterm(&old_root);
-+ }
- }
-
- /*
-@@ -74,6 +79,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
- && fs->root.mnt == old_root->mnt) {
- path_get_longterm(new_root);
- fs->root = *new_root;
-+ /* This function is only called
-+ from pivot_root(). Leave our
-+ gr_chroot_dentry and is_chrooted flags
-+ as-is, so that a pivoted root isn't treated
-+ as a chroot
-+ */
-+ //gr_set_chroot_entries(p, new_root);
- count++;
- }
- if (fs->pwd.dentry == old_root->dentry
-@@ -94,6 +106,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
-
- void free_fs_struct(struct fs_struct *fs)
- {
-+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
- path_put_longterm(&fs->root);
- path_put_longterm(&fs->pwd);
- kmem_cache_free(fs_cachep, fs);
-@@ -109,7 +122,8 @@ void exit_fs(struct task_struct *tsk)
- spin_lock(&fs->lock);
- write_seqcount_begin(&fs->seq);
- tsk->fs = NULL;
-- kill = !--fs->users;
-+ gr_clear_chroot_entries(tsk);
-+ kill = !atomic_dec_return(&fs->users);
- write_seqcount_end(&fs->seq);
- spin_unlock(&fs->lock);
- task_unlock(tsk);
-@@ -123,7 +137,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
- /* We don't need to lock fs - think why ;-) */
- if (fs) {
-- fs->users = 1;
-+ atomic_set(&fs->users, 1);
- fs->in_exec = 0;
- spin_lock_init(&fs->lock);
- seqcount_init(&fs->seq);
-@@ -132,9 +146,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
- spin_lock(&old->lock);
- fs->root = old->root;
- path_get_longterm(&fs->root);
-+ /* instead of calling gr_set_chroot_entries here,
-+ we call it from every caller of this function
-+ */
- fs->pwd = old->pwd;
- path_get_longterm(&fs->pwd);
- spin_unlock(&old->lock);
-+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
- }
- return fs;
- }
-@@ -150,8 +168,9 @@ int unshare_fs_struct(void)
-
- task_lock(current);
- spin_lock(&fs->lock);
-- kill = !--fs->users;
-+ kill = !atomic_dec_return(&fs->users);
- current->fs = new_fs;
-+ gr_set_chroot_entries(current, &new_fs->root);
- spin_unlock(&fs->lock);
- task_unlock(current);
-
-@@ -164,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
-
- int current_umask(void)
- {
-- return current->fs->umask;
-+ return current->fs->umask | gr_acl_umask();
- }
- EXPORT_SYMBOL(current_umask);
-
- /* to be mentioned only in INIT_TASK */
- struct fs_struct init_fs = {
-- .users = 1,
-+ .users = ATOMIC_INIT(1),
- .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- .seq = SEQCNT_ZERO,
- .umask = 0022,
-@@ -186,12 +205,13 @@ void daemonize_fs_struct(void)
- task_lock(current);
-
- spin_lock(&init_fs.lock);
-- init_fs.users++;
-+ atomic_inc(&init_fs.users);
- spin_unlock(&init_fs.lock);
-
- spin_lock(&fs->lock);
- current->fs = &init_fs;
-- kill = !--fs->users;
-+ gr_set_chroot_entries(current, &current->fs->root);
-+ kill = !atomic_dec_return(&fs->users);
- spin_unlock(&fs->lock);
-
- task_unlock(current);
-diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
-index 9905350..97ff49a 100644
---- a/fs/fscache/cookie.c
-+++ b/fs/fscache/cookie.c
-@@ -19,7 +19,7 @@
-
- struct kmem_cache *fscache_cookie_jar;
-
--static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
-+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
-
- static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
- static int fscache_alloc_object(struct fscache_cache *cache,
-@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
- parent ? (char *) parent->def->name : "<no-parent>",
- def->name, netfs_data);
-
-- fscache_stat(&fscache_n_acquires);
-+ fscache_stat_unchecked(&fscache_n_acquires);
-
- /* if there's no parent cookie, then we don't create one here either */
- if (!parent) {
-- fscache_stat(&fscache_n_acquires_null);
-+ fscache_stat_unchecked(&fscache_n_acquires_null);
- _leave(" [no parent]");
- return NULL;
- }
-@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
- /* allocate and initialise a cookie */
- cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
- if (!cookie) {
-- fscache_stat(&fscache_n_acquires_oom);
-+ fscache_stat_unchecked(&fscache_n_acquires_oom);
- _leave(" [ENOMEM]");
- return NULL;
- }
-@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
-
- switch (cookie->def->type) {
- case FSCACHE_COOKIE_TYPE_INDEX:
-- fscache_stat(&fscache_n_cookie_index);
-+ fscache_stat_unchecked(&fscache_n_cookie_index);
- break;
- case FSCACHE_COOKIE_TYPE_DATAFILE:
-- fscache_stat(&fscache_n_cookie_data);
-+ fscache_stat_unchecked(&fscache_n_cookie_data);
- break;
- default:
-- fscache_stat(&fscache_n_cookie_special);
-+ fscache_stat_unchecked(&fscache_n_cookie_special);
- break;
- }
-
-@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
- if (fscache_acquire_non_index_cookie(cookie) < 0) {
- atomic_dec(&parent->n_children);
- __fscache_cookie_put(cookie);
-- fscache_stat(&fscache_n_acquires_nobufs);
-+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
- _leave(" = NULL");
- return NULL;
- }
- }
-
-- fscache_stat(&fscache_n_acquires_ok);
-+ fscache_stat_unchecked(&fscache_n_acquires_ok);
- _leave(" = %p", cookie);
- return cookie;
- }
-@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
- cache = fscache_select_cache_for_object(cookie->parent);
- if (!cache) {
- up_read(&fscache_addremove_sem);
-- fscache_stat(&fscache_n_acquires_no_cache);
-+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
- _leave(" = -ENOMEDIUM [no cache]");
- return -ENOMEDIUM;
- }
-@@ -256,14 +256,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
- object = cache->ops->alloc_object(cache, cookie);
- fscache_stat_d(&fscache_n_cop_alloc_object);
- if (IS_ERR(object)) {
-- fscache_stat(&fscache_n_object_no_alloc);
-+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
- ret = PTR_ERR(object);
- goto error;
- }
-
-- fscache_stat(&fscache_n_object_alloc);
-+ fscache_stat_unchecked(&fscache_n_object_alloc);
-
-- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
-+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
-
- _debug("ALLOC OBJ%x: %s {%lx}",
- object->debug_id, cookie->def->name, object->events);
-@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
- struct fscache_object *object;
- struct hlist_node *_p;
-
-- fscache_stat(&fscache_n_updates);
-+ fscache_stat_unchecked(&fscache_n_updates);
-
- if (!cookie) {
-- fscache_stat(&fscache_n_updates_null);
-+ fscache_stat_unchecked(&fscache_n_updates_null);
- _leave(" [no cookie]");
- return;
- }
-@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
- struct fscache_object *object;
- unsigned long event;
-
-- fscache_stat(&fscache_n_relinquishes);
-+ fscache_stat_unchecked(&fscache_n_relinquishes);
- if (retire)
-- fscache_stat(&fscache_n_relinquishes_retire);
-+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
-
- if (!cookie) {
-- fscache_stat(&fscache_n_relinquishes_null);
-+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
- _leave(" [no cookie]");
- return;
- }
-@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
-
- /* wait for the cookie to finish being instantiated (or to fail) */
- if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
-- fscache_stat(&fscache_n_relinquishes_waitcrt);
-+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- }
-diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
-index f6aad48..88dcf26 100644
---- a/fs/fscache/internal.h
-+++ b/fs/fscache/internal.h
-@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
- extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
- extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
-
--extern atomic_t fscache_n_op_pend;
--extern atomic_t fscache_n_op_run;
--extern atomic_t fscache_n_op_enqueue;
--extern atomic_t fscache_n_op_deferred_release;
--extern atomic_t fscache_n_op_release;
--extern atomic_t fscache_n_op_gc;
--extern atomic_t fscache_n_op_cancelled;
--extern atomic_t fscache_n_op_rejected;
-+extern atomic_unchecked_t fscache_n_op_pend;
-+extern atomic_unchecked_t fscache_n_op_run;
-+extern atomic_unchecked_t fscache_n_op_enqueue;
-+extern atomic_unchecked_t fscache_n_op_deferred_release;
-+extern atomic_unchecked_t fscache_n_op_release;
-+extern atomic_unchecked_t fscache_n_op_gc;
-+extern atomic_unchecked_t fscache_n_op_cancelled;
-+extern atomic_unchecked_t fscache_n_op_rejected;
-
--extern atomic_t fscache_n_attr_changed;
--extern atomic_t fscache_n_attr_changed_ok;
--extern atomic_t fscache_n_attr_changed_nobufs;
--extern atomic_t fscache_n_attr_changed_nomem;
--extern atomic_t fscache_n_attr_changed_calls;
-+extern atomic_unchecked_t fscache_n_attr_changed;
-+extern atomic_unchecked_t fscache_n_attr_changed_ok;
-+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
-+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
-+extern atomic_unchecked_t fscache_n_attr_changed_calls;
-
--extern atomic_t fscache_n_allocs;
--extern atomic_t fscache_n_allocs_ok;
--extern atomic_t fscache_n_allocs_wait;
--extern atomic_t fscache_n_allocs_nobufs;
--extern atomic_t fscache_n_allocs_intr;
--extern atomic_t fscache_n_allocs_object_dead;
--extern atomic_t fscache_n_alloc_ops;
--extern atomic_t fscache_n_alloc_op_waits;
-+extern atomic_unchecked_t fscache_n_allocs;
-+extern atomic_unchecked_t fscache_n_allocs_ok;
-+extern atomic_unchecked_t fscache_n_allocs_wait;
-+extern atomic_unchecked_t fscache_n_allocs_nobufs;
-+extern atomic_unchecked_t fscache_n_allocs_intr;
-+extern atomic_unchecked_t fscache_n_allocs_object_dead;
-+extern atomic_unchecked_t fscache_n_alloc_ops;
-+extern atomic_unchecked_t fscache_n_alloc_op_waits;
-
--extern atomic_t fscache_n_retrievals;
--extern atomic_t fscache_n_retrievals_ok;
--extern atomic_t fscache_n_retrievals_wait;
--extern atomic_t fscache_n_retrievals_nodata;
--extern atomic_t fscache_n_retrievals_nobufs;
--extern atomic_t fscache_n_retrievals_intr;
--extern atomic_t fscache_n_retrievals_nomem;
--extern atomic_t fscache_n_retrievals_object_dead;
--extern atomic_t fscache_n_retrieval_ops;
--extern atomic_t fscache_n_retrieval_op_waits;
-+extern atomic_unchecked_t fscache_n_retrievals;
-+extern atomic_unchecked_t fscache_n_retrievals_ok;
-+extern atomic_unchecked_t fscache_n_retrievals_wait;
-+extern atomic_unchecked_t fscache_n_retrievals_nodata;
-+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
-+extern atomic_unchecked_t fscache_n_retrievals_intr;
-+extern atomic_unchecked_t fscache_n_retrievals_nomem;
-+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
-+extern atomic_unchecked_t fscache_n_retrieval_ops;
-+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
-
--extern atomic_t fscache_n_stores;
--extern atomic_t fscache_n_stores_ok;
--extern atomic_t fscache_n_stores_again;
--extern atomic_t fscache_n_stores_nobufs;
--extern atomic_t fscache_n_stores_oom;
--extern atomic_t fscache_n_store_ops;
--extern atomic_t fscache_n_store_calls;
--extern atomic_t fscache_n_store_pages;
--extern atomic_t fscache_n_store_radix_deletes;
--extern atomic_t fscache_n_store_pages_over_limit;
-+extern atomic_unchecked_t fscache_n_stores;
-+extern atomic_unchecked_t fscache_n_stores_ok;
-+extern atomic_unchecked_t fscache_n_stores_again;
-+extern atomic_unchecked_t fscache_n_stores_nobufs;
-+extern atomic_unchecked_t fscache_n_stores_oom;
-+extern atomic_unchecked_t fscache_n_store_ops;
-+extern atomic_unchecked_t fscache_n_store_calls;
-+extern atomic_unchecked_t fscache_n_store_pages;
-+extern atomic_unchecked_t fscache_n_store_radix_deletes;
-+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
-
--extern atomic_t fscache_n_store_vmscan_not_storing;
--extern atomic_t fscache_n_store_vmscan_gone;
--extern atomic_t fscache_n_store_vmscan_busy;
--extern atomic_t fscache_n_store_vmscan_cancelled;
-+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
-+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
-+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
-+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
-
--extern atomic_t fscache_n_marks;
--extern atomic_t fscache_n_uncaches;
-+extern atomic_unchecked_t fscache_n_marks;
-+extern atomic_unchecked_t fscache_n_uncaches;
-
--extern atomic_t fscache_n_acquires;
--extern atomic_t fscache_n_acquires_null;
--extern atomic_t fscache_n_acquires_no_cache;
--extern atomic_t fscache_n_acquires_ok;
--extern atomic_t fscache_n_acquires_nobufs;
--extern atomic_t fscache_n_acquires_oom;
-+extern atomic_unchecked_t fscache_n_acquires;
-+extern atomic_unchecked_t fscache_n_acquires_null;
-+extern atomic_unchecked_t fscache_n_acquires_no_cache;
-+extern atomic_unchecked_t fscache_n_acquires_ok;
-+extern atomic_unchecked_t fscache_n_acquires_nobufs;
-+extern atomic_unchecked_t fscache_n_acquires_oom;
-
--extern atomic_t fscache_n_updates;
--extern atomic_t fscache_n_updates_null;
--extern atomic_t fscache_n_updates_run;
-+extern atomic_unchecked_t fscache_n_updates;
-+extern atomic_unchecked_t fscache_n_updates_null;
-+extern atomic_unchecked_t fscache_n_updates_run;
-
--extern atomic_t fscache_n_relinquishes;
--extern atomic_t fscache_n_relinquishes_null;
--extern atomic_t fscache_n_relinquishes_waitcrt;
--extern atomic_t fscache_n_relinquishes_retire;
-+extern atomic_unchecked_t fscache_n_relinquishes;
-+extern atomic_unchecked_t fscache_n_relinquishes_null;
-+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
-+extern atomic_unchecked_t fscache_n_relinquishes_retire;
-
--extern atomic_t fscache_n_cookie_index;
--extern atomic_t fscache_n_cookie_data;
--extern atomic_t fscache_n_cookie_special;
-+extern atomic_unchecked_t fscache_n_cookie_index;
-+extern atomic_unchecked_t fscache_n_cookie_data;
-+extern atomic_unchecked_t fscache_n_cookie_special;
-
--extern atomic_t fscache_n_object_alloc;
--extern atomic_t fscache_n_object_no_alloc;
--extern atomic_t fscache_n_object_lookups;
--extern atomic_t fscache_n_object_lookups_negative;
--extern atomic_t fscache_n_object_lookups_positive;
--extern atomic_t fscache_n_object_lookups_timed_out;
--extern atomic_t fscache_n_object_created;
--extern atomic_t fscache_n_object_avail;
--extern atomic_t fscache_n_object_dead;
-+extern atomic_unchecked_t fscache_n_object_alloc;
-+extern atomic_unchecked_t fscache_n_object_no_alloc;
-+extern atomic_unchecked_t fscache_n_object_lookups;
-+extern atomic_unchecked_t fscache_n_object_lookups_negative;
-+extern atomic_unchecked_t fscache_n_object_lookups_positive;
-+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
-+extern atomic_unchecked_t fscache_n_object_created;
-+extern atomic_unchecked_t fscache_n_object_avail;
-+extern atomic_unchecked_t fscache_n_object_dead;
-
--extern atomic_t fscache_n_checkaux_none;
--extern atomic_t fscache_n_checkaux_okay;
--extern atomic_t fscache_n_checkaux_update;
--extern atomic_t fscache_n_checkaux_obsolete;
-+extern atomic_unchecked_t fscache_n_checkaux_none;
-+extern atomic_unchecked_t fscache_n_checkaux_okay;
-+extern atomic_unchecked_t fscache_n_checkaux_update;
-+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
-
- extern atomic_t fscache_n_cop_alloc_object;
- extern atomic_t fscache_n_cop_lookup_object;
-@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
- atomic_inc(stat);
- }
-
-+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
-+{
-+ atomic_inc_unchecked(stat);
-+}
-+
- static inline void fscache_stat_d(atomic_t *stat)
- {
- atomic_dec(stat);
-@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
-
- #define __fscache_stat(stat) (NULL)
- #define fscache_stat(stat) do {} while (0)
-+#define fscache_stat_unchecked(stat) do {} while (0)
- #define fscache_stat_d(stat) do {} while (0)
- #endif
-
-diff --git a/fs/fscache/object.c b/fs/fscache/object.c
-index b6b897c..0ffff9c 100644
---- a/fs/fscache/object.c
-+++ b/fs/fscache/object.c
-@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
- /* update the object metadata on disk */
- case FSCACHE_OBJECT_UPDATING:
- clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
-- fscache_stat(&fscache_n_updates_run);
-+ fscache_stat_unchecked(&fscache_n_updates_run);
- fscache_stat(&fscache_n_cop_update_object);
- object->cache->ops->update_object(object);
- fscache_stat_d(&fscache_n_cop_update_object);
-@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
- spin_lock(&object->lock);
- object->state = FSCACHE_OBJECT_DEAD;
- spin_unlock(&object->lock);
-- fscache_stat(&fscache_n_object_dead);
-+ fscache_stat_unchecked(&fscache_n_object_dead);
- goto terminal_transit;
-
- /* handle the parent cache of this object being withdrawn from
-@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
- spin_lock(&object->lock);
- object->state = FSCACHE_OBJECT_DEAD;
- spin_unlock(&object->lock);
-- fscache_stat(&fscache_n_object_dead);
-+ fscache_stat_unchecked(&fscache_n_object_dead);
- goto terminal_transit;
-
- /* complain about the object being woken up once it is
-@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
- parent->cookie->def->name, cookie->def->name,
- object->cache->tag->name);
-
-- fscache_stat(&fscache_n_object_lookups);
-+ fscache_stat_unchecked(&fscache_n_object_lookups);
- fscache_stat(&fscache_n_cop_lookup_object);
- ret = object->cache->ops->lookup_object(object);
- fscache_stat_d(&fscache_n_cop_lookup_object);
-@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
- if (ret == -ETIMEDOUT) {
- /* probably stuck behind another object, so move this one to
- * the back of the queue */
-- fscache_stat(&fscache_n_object_lookups_timed_out);
-+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
- set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
- }
-
-@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
-
- spin_lock(&object->lock);
- if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
-- fscache_stat(&fscache_n_object_lookups_negative);
-+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
-
- /* transit here to allow write requests to begin stacking up
- * and read requests to begin returning ENODATA */
-@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
- * result, in which case there may be data available */
- spin_lock(&object->lock);
- if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
-- fscache_stat(&fscache_n_object_lookups_positive);
-+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
-
- clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
-
-@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
- set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
- } else {
- ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
-- fscache_stat(&fscache_n_object_created);
-+ fscache_stat_unchecked(&fscache_n_object_created);
-
- object->state = FSCACHE_OBJECT_AVAILABLE;
- spin_unlock(&object->lock);
-@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
- fscache_enqueue_dependents(object);
-
- fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
-- fscache_stat(&fscache_n_object_avail);
-+ fscache_stat_unchecked(&fscache_n_object_avail);
-
- _leave("");
- }
-@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- enum fscache_checkaux result;
-
- if (!object->cookie->def->check_aux) {
-- fscache_stat(&fscache_n_checkaux_none);
-+ fscache_stat_unchecked(&fscache_n_checkaux_none);
- return FSCACHE_CHECKAUX_OKAY;
- }
-
-@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- switch (result) {
- /* entry okay as is */
- case FSCACHE_CHECKAUX_OKAY:
-- fscache_stat(&fscache_n_checkaux_okay);
-+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
- break;
-
- /* entry requires update */
- case FSCACHE_CHECKAUX_NEEDS_UPDATE:
-- fscache_stat(&fscache_n_checkaux_update);
-+ fscache_stat_unchecked(&fscache_n_checkaux_update);
- break;
-
- /* entry requires deletion */
- case FSCACHE_CHECKAUX_OBSOLETE:
-- fscache_stat(&fscache_n_checkaux_obsolete);
-+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
- break;
-
- default:
-diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
-index 30afdfa..2256596 100644
---- a/fs/fscache/operation.c
-+++ b/fs/fscache/operation.c
-@@ -17,7 +17,7 @@
- #include <linux/slab.h>
- #include "internal.h"
-
--atomic_t fscache_op_debug_id;
-+atomic_unchecked_t fscache_op_debug_id;
- EXPORT_SYMBOL(fscache_op_debug_id);
-
- /**
-@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
- ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
-- fscache_stat(&fscache_n_op_enqueue);
-+ fscache_stat_unchecked(&fscache_n_op_enqueue);
- switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_ASYNC:
- _debug("queue async");
-@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- if (op->processor)
- fscache_enqueue_operation(op);
-- fscache_stat(&fscache_n_op_run);
-+ fscache_stat_unchecked(&fscache_n_op_run);
- }
-
- /*
-@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
- if (object->n_ops > 1) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_in_progress, ==, 0);
-@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
- object->n_exclusive++; /* reads and writes must wait */
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- ret = 0;
- } else {
- /* not allowed to submit ops in any other state */
-@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
- if (object->n_exclusive > 0) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_exclusive, ==, 0);
-@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
- object->n_ops++;
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
-- fscache_stat(&fscache_n_op_pend);
-+ fscache_stat_unchecked(&fscache_n_op_pend);
- ret = 0;
- } else if (object->state == FSCACHE_OBJECT_DYING ||
- object->state == FSCACHE_OBJECT_LC_DYING ||
- object->state == FSCACHE_OBJECT_WITHDRAWING) {
-- fscache_stat(&fscache_n_op_rejected);
-+ fscache_stat_unchecked(&fscache_n_op_rejected);
- ret = -ENOBUFS;
- } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
- fscache_report_unexpected_submission(object, op, ostate);
-@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
-
- ret = -EBUSY;
- if (!list_empty(&op->pend_link)) {
-- fscache_stat(&fscache_n_op_cancelled);
-+ fscache_stat_unchecked(&fscache_n_op_cancelled);
- list_del_init(&op->pend_link);
- object->n_ops--;
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
-@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
- if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
- BUG();
-
-- fscache_stat(&fscache_n_op_release);
-+ fscache_stat_unchecked(&fscache_n_op_release);
-
- if (op->release) {
- op->release(op);
-@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
- * lock, and defer it otherwise */
- if (!spin_trylock(&object->lock)) {
- _debug("defer put");
-- fscache_stat(&fscache_n_op_deferred_release);
-+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
-
- cache = object->cache;
- spin_lock(&cache->op_gc_list_lock);
-@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
-
- _debug("GC DEFERRED REL OBJ%x OP%x",
- object->debug_id, op->debug_id);
-- fscache_stat(&fscache_n_op_gc);
-+ fscache_stat_unchecked(&fscache_n_op_gc);
-
- ASSERTCMP(atomic_read(&op->usage), ==, 0);
-
-diff --git a/fs/fscache/page.c b/fs/fscache/page.c
-index 3f7a59b..cf196cc 100644
---- a/fs/fscache/page.c
-+++ b/fs/fscache/page.c
-@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
- val = radix_tree_lookup(&cookie->stores, page->index);
- if (!val) {
- rcu_read_unlock();
-- fscache_stat(&fscache_n_store_vmscan_not_storing);
-+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
- __fscache_uncache_page(cookie, page);
- return true;
- }
-@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
- spin_unlock(&cookie->stores_lock);
-
- if (xpage) {
-- fscache_stat(&fscache_n_store_vmscan_cancelled);
-- fscache_stat(&fscache_n_store_radix_deletes);
-+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
-+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
- ASSERTCMP(xpage, ==, page);
- } else {
-- fscache_stat(&fscache_n_store_vmscan_gone);
-+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
- }
-
- wake_up_bit(&cookie->flags, 0);
-@@ -107,7 +107,7 @@ page_busy:
- /* we might want to wait here, but that could deadlock the allocator as
- * the work threads writing to the cache may all end up sleeping
- * on memory allocation */
-- fscache_stat(&fscache_n_store_vmscan_busy);
-+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
- return false;
- }
- EXPORT_SYMBOL(__fscache_maybe_release_page);
-@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
- FSCACHE_COOKIE_STORING_TAG);
- if (!radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG)) {
-- fscache_stat(&fscache_n_store_radix_deletes);
-+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
- xpage = radix_tree_delete(&cookie->stores, page->index);
- }
- spin_unlock(&cookie->stores_lock);
-@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
-
- _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
-
-- fscache_stat(&fscache_n_attr_changed_calls);
-+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
-
- if (fscache_object_is_active(object)) {
- fscache_stat(&fscache_n_cop_attr_changed);
-@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
-
-- fscache_stat(&fscache_n_attr_changed);
-+ fscache_stat_unchecked(&fscache_n_attr_changed);
-
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op) {
-- fscache_stat(&fscache_n_attr_changed_nomem);
-+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
- if (fscache_submit_exclusive_op(object, op) < 0)
- goto nobufs;
- spin_unlock(&cookie->lock);
-- fscache_stat(&fscache_n_attr_changed_ok);
-+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
- fscache_put_operation(op);
- _leave(" = 0");
- return 0;
-@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
- nobufs:
- spin_unlock(&cookie->lock);
- kfree(op);
-- fscache_stat(&fscache_n_attr_changed_nobufs);
-+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
- _leave(" = %d", -ENOBUFS);
- return -ENOBUFS;
- }
-@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
- /* allocate a retrieval operation and attempt to submit it */
- op = kzalloc(sizeof(*op), GFP_NOIO);
- if (!op) {
-- fscache_stat(&fscache_n_retrievals_nomem);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
- return NULL;
- }
-
-@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
- return 0;
- }
-
-- fscache_stat(&fscache_n_retrievals_wait);
-+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
-
- jif = jiffies;
- if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
- fscache_wait_bit_interruptible,
- TASK_INTERRUPTIBLE) != 0) {
-- fscache_stat(&fscache_n_retrievals_intr);
-+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
- _leave(" = -ERESTARTSYS");
- return -ERESTARTSYS;
- }
-@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
- */
- static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
- struct fscache_retrieval *op,
-- atomic_t *stat_op_waits,
-- atomic_t *stat_object_dead)
-+ atomic_unchecked_t *stat_op_waits,
-+ atomic_unchecked_t *stat_object_dead)
- {
- int ret;
-
-@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
- goto check_if_dead;
-
- _debug(">>> WT");
-- fscache_stat(stat_op_waits);
-+ fscache_stat_unchecked(stat_op_waits);
- if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
- fscache_wait_bit_interruptible,
- TASK_INTERRUPTIBLE) < 0) {
-@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
-
- check_if_dead:
- if (unlikely(fscache_object_is_dead(object))) {
-- fscache_stat(stat_object_dead);
-+ fscache_stat_unchecked(stat_object_dead);
- return -ENOBUFS;
- }
- return 0;
-@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
-
- _enter("%p,%p,,,", cookie, page);
-
-- fscache_stat(&fscache_n_retrievals);
-+ fscache_stat_unchecked(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- goto nobufs_unlock;
- spin_unlock(&cookie->lock);
-
-- fscache_stat(&fscache_n_retrieval_ops);
-+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
-
- /* pin the netfs read context in case we need to do the actual netfs
- * read because we've encountered a cache read failure */
-@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
-
- error:
- if (ret == -ENOMEM)
-- fscache_stat(&fscache_n_retrievals_nomem);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
-- fscache_stat(&fscache_n_retrievals_intr);
-+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
-- fscache_stat(&fscache_n_retrievals_nodata);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
- else if (ret < 0)
-- fscache_stat(&fscache_n_retrievals_nobufs);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
- else
-- fscache_stat(&fscache_n_retrievals_ok);
-+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
-@@ -429,7 +429,7 @@ nobufs_unlock:
- spin_unlock(&cookie->lock);
- kfree(op);
- nobufs:
-- fscache_stat(&fscache_n_retrievals_nobufs);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
- }
-@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
-
- _enter("%p,,%d,,,", cookie, *nr_pages);
-
-- fscache_stat(&fscache_n_retrievals);
-+ fscache_stat_unchecked(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- goto nobufs_unlock;
- spin_unlock(&cookie->lock);
-
-- fscache_stat(&fscache_n_retrieval_ops);
-+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
-
- /* pin the netfs read context in case we need to do the actual netfs
- * read because we've encountered a cache read failure */
-@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
-
- error:
- if (ret == -ENOMEM)
-- fscache_stat(&fscache_n_retrievals_nomem);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
-- fscache_stat(&fscache_n_retrievals_intr);
-+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
-- fscache_stat(&fscache_n_retrievals_nodata);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
- else if (ret < 0)
-- fscache_stat(&fscache_n_retrievals_nobufs);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
- else
-- fscache_stat(&fscache_n_retrievals_ok);
-+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
-@@ -545,7 +545,7 @@ nobufs_unlock:
- spin_unlock(&cookie->lock);
- kfree(op);
- nobufs:
-- fscache_stat(&fscache_n_retrievals_nobufs);
-+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
- }
-@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
-
- _enter("%p,%p,,,", cookie, page);
-
-- fscache_stat(&fscache_n_allocs);
-+ fscache_stat_unchecked(&fscache_n_allocs);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
- goto nobufs_unlock;
- spin_unlock(&cookie->lock);
-
-- fscache_stat(&fscache_n_alloc_ops);
-+ fscache_stat_unchecked(&fscache_n_alloc_ops);
-
- ret = fscache_wait_for_retrieval_activation(
- object, op,
-@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
-
- error:
- if (ret == -ERESTARTSYS)
-- fscache_stat(&fscache_n_allocs_intr);
-+ fscache_stat_unchecked(&fscache_n_allocs_intr);
- else if (ret < 0)
-- fscache_stat(&fscache_n_allocs_nobufs);
-+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
- else
-- fscache_stat(&fscache_n_allocs_ok);
-+ fscache_stat_unchecked(&fscache_n_allocs_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
-@@ -625,7 +625,7 @@ nobufs_unlock:
- spin_unlock(&cookie->lock);
- kfree(op);
- nobufs:
-- fscache_stat(&fscache_n_allocs_nobufs);
-+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
- }
-@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
-
- spin_lock(&cookie->stores_lock);
-
-- fscache_stat(&fscache_n_store_calls);
-+ fscache_stat_unchecked(&fscache_n_store_calls);
-
- /* find a page to store */
- page = NULL;
-@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
- page = results[0];
- _debug("gang %d [%lx]", n, page->index);
- if (page->index > op->store_limit) {
-- fscache_stat(&fscache_n_store_pages_over_limit);
-+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
- goto superseded;
- }
-
-@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
-- fscache_stat(&fscache_n_store_pages);
-+ fscache_stat_unchecked(&fscache_n_store_pages);
- fscache_stat(&fscache_n_cop_write_page);
- ret = object->cache->ops->write_page(op, page);
- fscache_stat_d(&fscache_n_cop_write_page);
-@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERT(PageFsCache(page));
-
-- fscache_stat(&fscache_n_stores);
-+ fscache_stat_unchecked(&fscache_n_stores);
-
- op = kzalloc(sizeof(*op), GFP_NOIO);
- if (!op)
-@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
-- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
-+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
- op->store_limit = object->store_limit;
-
- if (fscache_submit_op(object, &op->op) < 0)
-@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
-
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
-- fscache_stat(&fscache_n_store_ops);
-- fscache_stat(&fscache_n_stores_ok);
-+ fscache_stat_unchecked(&fscache_n_store_ops);
-+ fscache_stat_unchecked(&fscache_n_stores_ok);
-
- /* the work queue now carries its own ref on the object */
- fscache_put_operation(&op->op);
-@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
- return 0;
-
- already_queued:
-- fscache_stat(&fscache_n_stores_again);
-+ fscache_stat_unchecked(&fscache_n_stores_again);
- already_pending:
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- kfree(op);
-- fscache_stat(&fscache_n_stores_ok);
-+ fscache_stat_unchecked(&fscache_n_stores_ok);
- _leave(" = 0");
- return 0;
-
-@@ -851,14 +851,14 @@ nobufs:
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- kfree(op);
-- fscache_stat(&fscache_n_stores_nobufs);
-+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-
- nomem_free:
- kfree(op);
- nomem:
-- fscache_stat(&fscache_n_stores_oom);
-+ fscache_stat_unchecked(&fscache_n_stores_oom);
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
-- fscache_stat(&fscache_n_uncaches);
-+ fscache_stat_unchecked(&fscache_n_uncaches);
-
- /* cache withdrawal may beat us to it */
- if (!PageFsCache(page))
-@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
- unsigned long loop;
-
- #ifdef CONFIG_FSCACHE_STATS
-- atomic_add(pagevec->nr, &fscache_n_marks);
-+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
- #endif
-
- for (loop = 0; loop < pagevec->nr; loop++) {
-diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
-index 73c0bd7..adb2f79 100644
---- a/fs/fscache/stats.c
-+++ b/fs/fscache/stats.c
-@@ -18,95 +18,95 @@
- /*
- * operation counters
- */
--atomic_t fscache_n_op_pend;
--atomic_t fscache_n_op_run;
--atomic_t fscache_n_op_enqueue;
--atomic_t fscache_n_op_requeue;
--atomic_t fscache_n_op_deferred_release;
--atomic_t fscache_n_op_release;
--atomic_t fscache_n_op_gc;
--atomic_t fscache_n_op_cancelled;
--atomic_t fscache_n_op_rejected;
-+atomic_unchecked_t fscache_n_op_pend;
-+atomic_unchecked_t fscache_n_op_run;
-+atomic_unchecked_t fscache_n_op_enqueue;
-+atomic_unchecked_t fscache_n_op_requeue;
-+atomic_unchecked_t fscache_n_op_deferred_release;
-+atomic_unchecked_t fscache_n_op_release;
-+atomic_unchecked_t fscache_n_op_gc;
-+atomic_unchecked_t fscache_n_op_cancelled;
-+atomic_unchecked_t fscache_n_op_rejected;
-
--atomic_t fscache_n_attr_changed;
--atomic_t fscache_n_attr_changed_ok;
--atomic_t fscache_n_attr_changed_nobufs;
--atomic_t fscache_n_attr_changed_nomem;
--atomic_t fscache_n_attr_changed_calls;
-+atomic_unchecked_t fscache_n_attr_changed;
-+atomic_unchecked_t fscache_n_attr_changed_ok;
-+atomic_unchecked_t fscache_n_attr_changed_nobufs;
-+atomic_unchecked_t fscache_n_attr_changed_nomem;
-+atomic_unchecked_t fscache_n_attr_changed_calls;
-
--atomic_t fscache_n_allocs;
--atomic_t fscache_n_allocs_ok;
--atomic_t fscache_n_allocs_wait;
--atomic_t fscache_n_allocs_nobufs;
--atomic_t fscache_n_allocs_intr;
--atomic_t fscache_n_allocs_object_dead;
--atomic_t fscache_n_alloc_ops;
--atomic_t fscache_n_alloc_op_waits;
-+atomic_unchecked_t fscache_n_allocs;
-+atomic_unchecked_t fscache_n_allocs_ok;
-+atomic_unchecked_t fscache_n_allocs_wait;
-+atomic_unchecked_t fscache_n_allocs_nobufs;
-+atomic_unchecked_t fscache_n_allocs_intr;
-+atomic_unchecked_t fscache_n_allocs_object_dead;
-+atomic_unchecked_t fscache_n_alloc_ops;
-+atomic_unchecked_t fscache_n_alloc_op_waits;
-
--atomic_t fscache_n_retrievals;
--atomic_t fscache_n_retrievals_ok;
--atomic_t fscache_n_retrievals_wait;
--atomic_t fscache_n_retrievals_nodata;
--atomic_t fscache_n_retrievals_nobufs;
--atomic_t fscache_n_retrievals_intr;
--atomic_t fscache_n_retrievals_nomem;
--atomic_t fscache_n_retrievals_object_dead;
--atomic_t fscache_n_retrieval_ops;
--atomic_t fscache_n_retrieval_op_waits;
-+atomic_unchecked_t fscache_n_retrievals;
-+atomic_unchecked_t fscache_n_retrievals_ok;
-+atomic_unchecked_t fscache_n_retrievals_wait;
-+atomic_unchecked_t fscache_n_retrievals_nodata;
-+atomic_unchecked_t fscache_n_retrievals_nobufs;
-+atomic_unchecked_t fscache_n_retrievals_intr;
-+atomic_unchecked_t fscache_n_retrievals_nomem;
-+atomic_unchecked_t fscache_n_retrievals_object_dead;
-+atomic_unchecked_t fscache_n_retrieval_ops;
-+atomic_unchecked_t fscache_n_retrieval_op_waits;
-
--atomic_t fscache_n_stores;
--atomic_t fscache_n_stores_ok;
--atomic_t fscache_n_stores_again;
--atomic_t fscache_n_stores_nobufs;
--atomic_t fscache_n_stores_oom;
--atomic_t fscache_n_store_ops;
--atomic_t fscache_n_store_calls;
--atomic_t fscache_n_store_pages;
--atomic_t fscache_n_store_radix_deletes;
--atomic_t fscache_n_store_pages_over_limit;
-+atomic_unchecked_t fscache_n_stores;
-+atomic_unchecked_t fscache_n_stores_ok;
-+atomic_unchecked_t fscache_n_stores_again;
-+atomic_unchecked_t fscache_n_stores_nobufs;
-+atomic_unchecked_t fscache_n_stores_oom;
-+atomic_unchecked_t fscache_n_store_ops;
-+atomic_unchecked_t fscache_n_store_calls;
-+atomic_unchecked_t fscache_n_store_pages;
-+atomic_unchecked_t fscache_n_store_radix_deletes;
-+atomic_unchecked_t fscache_n_store_pages_over_limit;
-
--atomic_t fscache_n_store_vmscan_not_storing;
--atomic_t fscache_n_store_vmscan_gone;
--atomic_t fscache_n_store_vmscan_busy;
--atomic_t fscache_n_store_vmscan_cancelled;
-+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
-+atomic_unchecked_t fscache_n_store_vmscan_gone;
-+atomic_unchecked_t fscache_n_store_vmscan_busy;
-+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
-
--atomic_t fscache_n_marks;
--atomic_t fscache_n_uncaches;
-+atomic_unchecked_t fscache_n_marks;
-+atomic_unchecked_t fscache_n_uncaches;
-
--atomic_t fscache_n_acquires;
--atomic_t fscache_n_acquires_null;
--atomic_t fscache_n_acquires_no_cache;
--atomic_t fscache_n_acquires_ok;
--atomic_t fscache_n_acquires_nobufs;
--atomic_t fscache_n_acquires_oom;
-+atomic_unchecked_t fscache_n_acquires;
-+atomic_unchecked_t fscache_n_acquires_null;
-+atomic_unchecked_t fscache_n_acquires_no_cache;
-+atomic_unchecked_t fscache_n_acquires_ok;
-+atomic_unchecked_t fscache_n_acquires_nobufs;
-+atomic_unchecked_t fscache_n_acquires_oom;
-
--atomic_t fscache_n_updates;
--atomic_t fscache_n_updates_null;
--atomic_t fscache_n_updates_run;
-+atomic_unchecked_t fscache_n_updates;
-+atomic_unchecked_t fscache_n_updates_null;
-+atomic_unchecked_t fscache_n_updates_run;
-
--atomic_t fscache_n_relinquishes;
--atomic_t fscache_n_relinquishes_null;
--atomic_t fscache_n_relinquishes_waitcrt;
--atomic_t fscache_n_relinquishes_retire;
-+atomic_unchecked_t fscache_n_relinquishes;
-+atomic_unchecked_t fscache_n_relinquishes_null;
-+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
-+atomic_unchecked_t fscache_n_relinquishes_retire;
-
--atomic_t fscache_n_cookie_index;
--atomic_t fscache_n_cookie_data;
--atomic_t fscache_n_cookie_special;
-+atomic_unchecked_t fscache_n_cookie_index;
-+atomic_unchecked_t fscache_n_cookie_data;
-+atomic_unchecked_t fscache_n_cookie_special;
-
--atomic_t fscache_n_object_alloc;
--atomic_t fscache_n_object_no_alloc;
--atomic_t fscache_n_object_lookups;
--atomic_t fscache_n_object_lookups_negative;
--atomic_t fscache_n_object_lookups_positive;
--atomic_t fscache_n_object_lookups_timed_out;
--atomic_t fscache_n_object_created;
--atomic_t fscache_n_object_avail;
--atomic_t fscache_n_object_dead;
-+atomic_unchecked_t fscache_n_object_alloc;
-+atomic_unchecked_t fscache_n_object_no_alloc;
-+atomic_unchecked_t fscache_n_object_lookups;
-+atomic_unchecked_t fscache_n_object_lookups_negative;
-+atomic_unchecked_t fscache_n_object_lookups_positive;
-+atomic_unchecked_t fscache_n_object_lookups_timed_out;
-+atomic_unchecked_t fscache_n_object_created;
-+atomic_unchecked_t fscache_n_object_avail;
-+atomic_unchecked_t fscache_n_object_dead;
-
--atomic_t fscache_n_checkaux_none;
--atomic_t fscache_n_checkaux_okay;
--atomic_t fscache_n_checkaux_update;
--atomic_t fscache_n_checkaux_obsolete;
-+atomic_unchecked_t fscache_n_checkaux_none;
-+atomic_unchecked_t fscache_n_checkaux_okay;
-+atomic_unchecked_t fscache_n_checkaux_update;
-+atomic_unchecked_t fscache_n_checkaux_obsolete;
-
- atomic_t fscache_n_cop_alloc_object;
- atomic_t fscache_n_cop_lookup_object;
-@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
- seq_puts(m, "FS-Cache statistics\n");
-
- seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
-- atomic_read(&fscache_n_cookie_index),
-- atomic_read(&fscache_n_cookie_data),
-- atomic_read(&fscache_n_cookie_special));
-+ atomic_read_unchecked(&fscache_n_cookie_index),
-+ atomic_read_unchecked(&fscache_n_cookie_data),
-+ atomic_read_unchecked(&fscache_n_cookie_special));
-
- seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
-- atomic_read(&fscache_n_object_alloc),
-- atomic_read(&fscache_n_object_no_alloc),
-- atomic_read(&fscache_n_object_avail),
-- atomic_read(&fscache_n_object_dead));
-+ atomic_read_unchecked(&fscache_n_object_alloc),
-+ atomic_read_unchecked(&fscache_n_object_no_alloc),
-+ atomic_read_unchecked(&fscache_n_object_avail),
-+ atomic_read_unchecked(&fscache_n_object_dead));
- seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
-- atomic_read(&fscache_n_checkaux_none),
-- atomic_read(&fscache_n_checkaux_okay),
-- atomic_read(&fscache_n_checkaux_update),
-- atomic_read(&fscache_n_checkaux_obsolete));
-+ atomic_read_unchecked(&fscache_n_checkaux_none),
-+ atomic_read_unchecked(&fscache_n_checkaux_okay),
-+ atomic_read_unchecked(&fscache_n_checkaux_update),
-+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
-
- seq_printf(m, "Pages : mrk=%u unc=%u\n",
-- atomic_read(&fscache_n_marks),
-- atomic_read(&fscache_n_uncaches));
-+ atomic_read_unchecked(&fscache_n_marks),
-+ atomic_read_unchecked(&fscache_n_uncaches));
-
- seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
- " oom=%u\n",
-- atomic_read(&fscache_n_acquires),
-- atomic_read(&fscache_n_acquires_null),
-- atomic_read(&fscache_n_acquires_no_cache),
-- atomic_read(&fscache_n_acquires_ok),
-- atomic_read(&fscache_n_acquires_nobufs),
-- atomic_read(&fscache_n_acquires_oom));
-+ atomic_read_unchecked(&fscache_n_acquires),
-+ atomic_read_unchecked(&fscache_n_acquires_null),
-+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
-+ atomic_read_unchecked(&fscache_n_acquires_ok),
-+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
-+ atomic_read_unchecked(&fscache_n_acquires_oom));
-
- seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
-- atomic_read(&fscache_n_object_lookups),
-- atomic_read(&fscache_n_object_lookups_negative),
-- atomic_read(&fscache_n_object_lookups_positive),
-- atomic_read(&fscache_n_object_created),
-- atomic_read(&fscache_n_object_lookups_timed_out));
-+ atomic_read_unchecked(&fscache_n_object_lookups),
-+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
-+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
-+ atomic_read_unchecked(&fscache_n_object_created),
-+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
-
- seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
-- atomic_read(&fscache_n_updates),
-- atomic_read(&fscache_n_updates_null),
-- atomic_read(&fscache_n_updates_run));
-+ atomic_read_unchecked(&fscache_n_updates),
-+ atomic_read_unchecked(&fscache_n_updates_null),
-+ atomic_read_unchecked(&fscache_n_updates_run));
-
- seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
-- atomic_read(&fscache_n_relinquishes),
-- atomic_read(&fscache_n_relinquishes_null),
-- atomic_read(&fscache_n_relinquishes_waitcrt),
-- atomic_read(&fscache_n_relinquishes_retire));
-+ atomic_read_unchecked(&fscache_n_relinquishes),
-+ atomic_read_unchecked(&fscache_n_relinquishes_null),
-+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
-+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
-
- seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
-- atomic_read(&fscache_n_attr_changed),
-- atomic_read(&fscache_n_attr_changed_ok),
-- atomic_read(&fscache_n_attr_changed_nobufs),
-- atomic_read(&fscache_n_attr_changed_nomem),
-- atomic_read(&fscache_n_attr_changed_calls));
-+ atomic_read_unchecked(&fscache_n_attr_changed),
-+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
-+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
-+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
-+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
-
- seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
-- atomic_read(&fscache_n_allocs),
-- atomic_read(&fscache_n_allocs_ok),
-- atomic_read(&fscache_n_allocs_wait),
-- atomic_read(&fscache_n_allocs_nobufs),
-- atomic_read(&fscache_n_allocs_intr));
-+ atomic_read_unchecked(&fscache_n_allocs),
-+ atomic_read_unchecked(&fscache_n_allocs_ok),
-+ atomic_read_unchecked(&fscache_n_allocs_wait),
-+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
-+ atomic_read_unchecked(&fscache_n_allocs_intr));
- seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
-- atomic_read(&fscache_n_alloc_ops),
-- atomic_read(&fscache_n_alloc_op_waits),
-- atomic_read(&fscache_n_allocs_object_dead));
-+ atomic_read_unchecked(&fscache_n_alloc_ops),
-+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
-+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
-
- seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
- " int=%u oom=%u\n",
-- atomic_read(&fscache_n_retrievals),
-- atomic_read(&fscache_n_retrievals_ok),
-- atomic_read(&fscache_n_retrievals_wait),
-- atomic_read(&fscache_n_retrievals_nodata),
-- atomic_read(&fscache_n_retrievals_nobufs),
-- atomic_read(&fscache_n_retrievals_intr),
-- atomic_read(&fscache_n_retrievals_nomem));
-+ atomic_read_unchecked(&fscache_n_retrievals),
-+ atomic_read_unchecked(&fscache_n_retrievals_ok),
-+ atomic_read_unchecked(&fscache_n_retrievals_wait),
-+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
-+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
-+ atomic_read_unchecked(&fscache_n_retrievals_intr),
-+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
- seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
-- atomic_read(&fscache_n_retrieval_ops),
-- atomic_read(&fscache_n_retrieval_op_waits),
-- atomic_read(&fscache_n_retrievals_object_dead));
-+ atomic_read_unchecked(&fscache_n_retrieval_ops),
-+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
-+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
-
- seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
-- atomic_read(&fscache_n_stores),
-- atomic_read(&fscache_n_stores_ok),
-- atomic_read(&fscache_n_stores_again),
-- atomic_read(&fscache_n_stores_nobufs),
-- atomic_read(&fscache_n_stores_oom));
-+ atomic_read_unchecked(&fscache_n_stores),
-+ atomic_read_unchecked(&fscache_n_stores_ok),
-+ atomic_read_unchecked(&fscache_n_stores_again),
-+ atomic_read_unchecked(&fscache_n_stores_nobufs),
-+ atomic_read_unchecked(&fscache_n_stores_oom));
- seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
-- atomic_read(&fscache_n_store_ops),
-- atomic_read(&fscache_n_store_calls),
-- atomic_read(&fscache_n_store_pages),
-- atomic_read(&fscache_n_store_radix_deletes),
-- atomic_read(&fscache_n_store_pages_over_limit));
-+ atomic_read_unchecked(&fscache_n_store_ops),
-+ atomic_read_unchecked(&fscache_n_store_calls),
-+ atomic_read_unchecked(&fscache_n_store_pages),
-+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
-+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
-
- seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
-- atomic_read(&fscache_n_store_vmscan_not_storing),
-- atomic_read(&fscache_n_store_vmscan_gone),
-- atomic_read(&fscache_n_store_vmscan_busy),
-- atomic_read(&fscache_n_store_vmscan_cancelled));
-+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
-+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
-+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
-+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
-
- seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
-- atomic_read(&fscache_n_op_pend),
-- atomic_read(&fscache_n_op_run),
-- atomic_read(&fscache_n_op_enqueue),
-- atomic_read(&fscache_n_op_cancelled),
-- atomic_read(&fscache_n_op_rejected));
-+ atomic_read_unchecked(&fscache_n_op_pend),
-+ atomic_read_unchecked(&fscache_n_op_run),
-+ atomic_read_unchecked(&fscache_n_op_enqueue),
-+ atomic_read_unchecked(&fscache_n_op_cancelled),
-+ atomic_read_unchecked(&fscache_n_op_rejected));
- seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
-- atomic_read(&fscache_n_op_deferred_release),
-- atomic_read(&fscache_n_op_release),
-- atomic_read(&fscache_n_op_gc));
-+ atomic_read_unchecked(&fscache_n_op_deferred_release),
-+ atomic_read_unchecked(&fscache_n_op_release),
-+ atomic_read_unchecked(&fscache_n_op_gc));
-
- seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
- atomic_read(&fscache_n_cop_alloc_object),
-diff --git a/fs/fuse/control.c b/fs/fuse/control.c
-index 42593c5..0c6d731 100644
---- a/fs/fuse/control.c
-+++ b/fs/fuse/control.c
-@@ -347,6 +347,7 @@ static struct file_system_type fuse_ctl_fs_type = {
- .mount = fuse_ctl_mount,
- .kill_sb = fuse_ctl_kill_sb,
- };
-+MODULE_ALIAS_FS("fusectl");
-
- int __init fuse_ctl_init(void)
- {
-diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
-index 3426521..3b75162 100644
---- a/fs/fuse/cuse.c
-+++ b/fs/fuse/cuse.c
-@@ -587,10 +587,12 @@ static int __init cuse_init(void)
- INIT_LIST_HEAD(&cuse_conntbl[i]);
-
- /* inherit and extend fuse_dev_operations */
-- cuse_channel_fops = fuse_dev_operations;
-- cuse_channel_fops.owner = THIS_MODULE;
-- cuse_channel_fops.open = cuse_channel_open;
-- cuse_channel_fops.release = cuse_channel_release;
-+ pax_open_kernel();
-+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
-+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
-+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
-+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
-+ pax_close_kernel();
-
- cuse_class = class_create(THIS_MODULE, "cuse");
- if (IS_ERR(cuse_class))
-diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
-index 81970d7..594b748 100644
---- a/fs/fuse/dev.c
-+++ b/fs/fuse/dev.c
-@@ -1226,7 +1226,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
- ret = 0;
- pipe_lock(pipe);
-
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
-diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
-index e13558c..56ca611 100644
---- a/fs/fuse/dir.c
-+++ b/fs/fuse/dir.c
-@@ -1150,7 +1150,7 @@ static char *read_link(struct dentry *dentry)
- return link;
- }
-
--static void free_link(char *link)
-+static void free_link(const char *link)
- {
- if (!IS_ERR(link))
- free_page((unsigned long) link);
-diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
-index afc0f706..a5489ea 100644
---- a/fs/fuse/inode.c
-+++ b/fs/fuse/inode.c
-@@ -1106,6 +1106,7 @@ static struct file_system_type fuse_fs_type = {
- .mount = fuse_mount,
- .kill_sb = fuse_kill_sb_anon,
- };
-+MODULE_ALIAS_FS("fuse");
-
- #ifdef CONFIG_BLOCK
- static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
-@@ -1135,6 +1136,7 @@ static struct file_system_type fuseblk_fs_type = {
- .kill_sb = fuse_kill_sb_blk,
- .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
- };
-+MODULE_ALIAS_FS("fuseblk");
-
- static inline int register_fuseblk(void)
- {
-diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
-index cfd4959..a780959 100644
---- a/fs/gfs2/inode.c
-+++ b/fs/gfs2/inode.c
-@@ -1490,7 +1490,7 @@ out:
-
- static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
- {
-- char *s = nd_get_link(nd);
-+ const char *s = nd_get_link(nd);
- if (!IS_ERR(s))
- kfree(s);
- }
-diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
-index cb23c2b..2fa1ea5 100644
---- a/fs/gfs2/ops_fstype.c
-+++ b/fs/gfs2/ops_fstype.c
-@@ -19,6 +19,7 @@
- #include <linux/mount.h>
- #include <linux/gfs2_ondisk.h>
- #include <linux/quotaops.h>
-+#include <linux/module.h>
-
- #include "gfs2.h"
- #include "incore.h"
-@@ -1395,6 +1396,7 @@ struct file_system_type gfs2_fs_type = {
- .kill_sb = gfs2_kill_sb,
- .owner = THIS_MODULE,
- };
-+MODULE_ALIAS_FS("gfs2");
-
- struct file_system_type gfs2meta_fs_type = {
- .name = "gfs2meta",
-@@ -1402,4 +1404,4 @@ struct file_system_type gfs2meta_fs_type = {
- .mount = gfs2_mount_meta,
- .owner = THIS_MODULE,
- };
--
-+MODULE_ALIAS_FS("gfs2meta");
-diff --git a/fs/hfs/super.c b/fs/hfs/super.c
-index 1b55f70..bd6c289 100644
---- a/fs/hfs/super.c
-+++ b/fs/hfs/super.c
-@@ -460,6 +460,7 @@ static struct file_system_type hfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("hfs");
-
- static void hfs_init_once(void *p)
- {
-diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
-index d24a9b6..b398147 100644
---- a/fs/hfsplus/super.c
-+++ b/fs/hfsplus/super.c
-@@ -582,6 +582,7 @@ static struct file_system_type hfsplus_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("hfsplus");
-
- static void hfsplus_init_once(void *p)
- {
-diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
-index 2f72da5..7ee87b1 100644
---- a/fs/hostfs/hostfs_kern.c
-+++ b/fs/hostfs/hostfs_kern.c
-@@ -999,6 +999,7 @@ static struct file_system_type hostfs_type = {
- .kill_sb = hostfs_kill_sb,
- .fs_flags = 0,
- };
-+MODULE_ALIAS_FS("hostfs");
-
- static int __init init_hostfs(void)
- {
-diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
-index b03e766..0b5cb61 100644
---- a/fs/hpfs/super.c
-+++ b/fs/hpfs/super.c
-@@ -707,6 +707,7 @@ static struct file_system_type hpfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("hpfs");
-
- static int __init init_hpfs_fs(void)
- {
-diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
-index f590b11..414cf4b 100644
---- a/fs/hppfs/hppfs.c
-+++ b/fs/hppfs/hppfs.c
-@@ -758,6 +758,7 @@ static struct file_system_type hppfs_type = {
- .kill_sb = kill_anon_super,
- .fs_flags = 0,
- };
-+MODULE_ALIAS_FS("hppfs");
-
- static int __init init_hppfs(void)
- {
-diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
-index 0aa424a..0725236 100644
---- a/fs/hugetlbfs/inode.c
-+++ b/fs/hugetlbfs/inode.c
-@@ -134,6 +134,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- struct vm_area_struct *vma;
- unsigned long start_addr;
- struct hstate *h = hstate_file(file);
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
-@@ -146,18 +147,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- return addr;
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
- return addr;
- }
-
- start_addr = mm->free_area_cache;
-
- if (len <= mm->cached_hole_size)
-- start_addr = TASK_UNMAPPED_BASE;
-+ start_addr = mm->mmap_base;
-
- full_search:
- addr = ALIGN(start_addr, huge_page_size(h));
-@@ -169,15 +173,17 @@ full_search:
- * Start a new search - just in case we missed
- * some holes.
- */
-- if (start_addr != TASK_UNMAPPED_BASE) {
-- start_addr = TASK_UNMAPPED_BASE;
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = mm->mmap_base;
- goto full_search;
- }
- return -ENOMEM;
- }
-
-- if (!vma || addr + len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset)) {
-+ mm->free_area_cache = addr + len;
- return addr;
-+ }
- addr = ALIGN(vma->vm_end, huge_page_size(h));
- }
- }
-@@ -896,8 +902,9 @@ static struct file_system_type hugetlbfs_fs_type = {
- .mount = hugetlbfs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("hugetlbfs");
-
--static struct vfsmount *hugetlbfs_vfsmount;
-+struct vfsmount *hugetlbfs_vfsmount;
-
- static int can_do_hugetlb_shm(void)
- {
-diff --git a/fs/inode.c b/fs/inode.c
-index e2d3633..da449b7 100644
---- a/fs/inode.c
-+++ b/fs/inode.c
-@@ -785,16 +785,20 @@ unsigned int get_next_ino(void)
- unsigned int *p = &get_cpu_var(last_ino);
- unsigned int res = *p;
-
-+start:
-+
- #ifdef CONFIG_SMP
- if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
-- static atomic_t shared_last_ino;
-- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
-+ static atomic_unchecked_t shared_last_ino;
-+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
-
- res = next - LAST_INO_BATCH;
- }
- #endif
-
-- *p = ++res;
-+ if (unlikely(!++res))
-+ goto start; /* never zero */
-+ *p = res;
- put_cpu_var(last_ino);
- return res;
- }
-@@ -855,8 +859,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
- struct file_system_type *type = inode->i_sb->s_type;
-
- /* Set new key only if filesystem hasn't already changed it */
-- if (!lockdep_match_class(&inode->i_mutex,
-- &type->i_mutex_key)) {
-+ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
- /*
- * ensure nobody is actually holding i_mutex
- */
-@@ -883,6 +886,7 @@ void unlock_new_inode(struct inode *inode)
- spin_lock(&inode->i_lock);
- WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
-+ smp_mb();
- wake_up_bit(&inode->i_state, __I_NEW);
- spin_unlock(&inode->i_lock);
- }
-diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
-index 2f9197f..e2f03bf 100644
---- a/fs/isofs/inode.c
-+++ b/fs/isofs/inode.c
-@@ -1540,6 +1540,8 @@ static struct file_system_type iso9660_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("iso9660");
-+MODULE_ALIAS("iso9660");
-
- static int __init init_iso9660_fs(void)
- {
-@@ -1577,5 +1579,3 @@ static void __exit exit_iso9660_fs(void)
- module_init(init_iso9660_fs)
- module_exit(exit_iso9660_fs)
- MODULE_LICENSE("GPL");
--/* Actual filesystem name is iso9660, as requested in filesystems.c */
--MODULE_ALIAS("iso9660");
-diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
-index e513f19..2ab1351 100644
---- a/fs/jffs2/erase.c
-+++ b/fs/jffs2/erase.c
-@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
- struct jffs2_unknown_node marker = {
- .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
- .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-- .totlen = cpu_to_je32(c->cleanmarker_size)
-+ .totlen = cpu_to_je32(c->cleanmarker_size),
-+ .hdr_crc = cpu_to_je32(0)
- };
-
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
-diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
-index e7e9744..0de4fd9 100644
---- a/fs/jffs2/super.c
-+++ b/fs/jffs2/super.c
-@@ -357,6 +357,7 @@ static struct file_system_type jffs2_fs_type = {
- .mount = jffs2_mount,
- .kill_sb = jffs2_kill_sb,
- };
-+MODULE_ALIAS_FS("jffs2");
-
- static int __init init_jffs2_fs(void)
- {
-diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
-index 464cd76..3a3ed7e 100644
---- a/fs/jffs2/wbuf.c
-+++ b/fs/jffs2/wbuf.c
-@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
- {
- .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
- .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-- .totlen = constant_cpu_to_je32(8)
-+ .totlen = constant_cpu_to_je32(8),
-+ .hdr_crc = constant_cpu_to_je32(0)
- };
-
- /*
-diff --git a/fs/jfs/super.c b/fs/jfs/super.c
-index a44eff076..a4bf76a 100644
---- a/fs/jfs/super.c
-+++ b/fs/jfs/super.c
-@@ -780,6 +780,7 @@ static struct file_system_type jfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("jfs");
-
- static void init_once(void *foo)
- {
-@@ -802,7 +803,7 @@ static int __init init_jfs_fs(void)
-
- jfs_inode_cachep =
- kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
-- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
-+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
- init_once);
- if (jfs_inode_cachep == NULL)
- return -ENOMEM;
-diff --git a/fs/libfs.c b/fs/libfs.c
-index ce85edf..56ab3c0 100644
---- a/fs/libfs.c
-+++ b/fs/libfs.c
-@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
-
- for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
- struct dentry *next;
-+ char d_name[sizeof(next->d_iname)];
-+ const unsigned char *name;
-+
- next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (!simple_positive(next)) {
-@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
-
- spin_unlock(&next->d_lock);
- spin_unlock(&dentry->d_lock);
-- if (filldir(dirent, next->d_name.name,
-+ name = next->d_name.name;
-+ if (name == next->d_iname) {
-+ memcpy(d_name, name, next->d_name.len);
-+ name = d_name;
-+ }
-+ if (filldir(dirent, name,
- next->d_name.len, filp->f_pos,
- next->d_inode->i_ino,
- dt_type(next->d_inode)) < 0)
-diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
-index 8392cb8..80d6193 100644
---- a/fs/lockd/clntproc.c
-+++ b/fs/lockd/clntproc.c
-@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
- /*
- * Cookie counter for NLM requests
- */
--static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
-+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
-
- void nlmclnt_next_cookie(struct nlm_cookie *c)
- {
-- u32 cookie = atomic_inc_return(&nlm_cookie);
-+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
-
- memcpy(c->data, &cookie, 4);
- c->len=4;
-diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
-index 2444780..2544030 100644
---- a/fs/lockd/svc.c
-+++ b/fs/lockd/svc.c
-@@ -295,7 +295,7 @@ int lockd_up(void)
- svc_sock_update_bufs(serv);
- serv->sv_maxconn = nlm_max_connections;
-
-- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
-+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
- if (IS_ERR(nlmsvc_task)) {
- error = PTR_ERR(nlmsvc_task);
- svc_exit_thread(nlmsvc_rqst);
-diff --git a/fs/locks.c b/fs/locks.c
-index d4f1d89..0114708 100644
---- a/fs/locks.c
-+++ b/fs/locks.c
-@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
- return;
-
- if (filp->f_op && filp->f_op->flock) {
-- struct file_lock fl = {
-+ struct file_lock flock = {
- .fl_pid = current->tgid,
- .fl_file = filp,
- .fl_flags = FL_FLOCK,
- .fl_type = F_UNLCK,
- .fl_end = OFFSET_MAX,
- };
-- filp->f_op->flock(filp, F_SETLKW, &fl);
-- if (fl.fl_ops && fl.fl_ops->fl_release_private)
-- fl.fl_ops->fl_release_private(&fl);
-+ filp->f_op->flock(filp, F_SETLKW, &flock);
-+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
-+ flock.fl_ops->fl_release_private(&flock);
- }
-
- lock_flocks();
-diff --git a/fs/logfs/super.c b/fs/logfs/super.c
-index e795c234..136932a 100644
---- a/fs/logfs/super.c
-+++ b/fs/logfs/super.c
-@@ -609,6 +609,7 @@ static struct file_system_type logfs_fs_type = {
- .fs_flags = FS_REQUIRES_DEV,
-
- };
-+MODULE_ALIAS_FS("logfs");
-
- static int __init logfs_init(void)
- {
-diff --git a/fs/minix/inode.c b/fs/minix/inode.c
-index 4d46a6a..dee1cdf 100644
---- a/fs/minix/inode.c
-+++ b/fs/minix/inode.c
-@@ -653,6 +653,7 @@ static struct file_system_type minix_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("minix");
-
- static int __init init_minix_fs(void)
- {
-diff --git a/fs/namei.c b/fs/namei.c
-index c8b13a9..2ec69cd 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
- if (ret != -EACCES)
- return ret;
-
-+#ifdef CONFIG_GRKERNSEC
-+ /* we'll block if we have to log due to a denied capability use */
-+ if (mask & MAY_NOT_BLOCK)
-+ return -ECHILD;
-+#endif
-+
- if (S_ISDIR(inode->i_mode)) {
- /* DACs are overridable for directories */
-- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
-- return 0;
- if (!(mask & MAY_WRITE))
-- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
-+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
-+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
- return 0;
-+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
-+ return 0;
- return -EACCES;
- }
- /*
-+ * Searching includes executable on directories, else just read.
-+ */
-+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
-+ if (mask == MAY_READ)
-+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
-+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
-+ return 0;
-+
-+ /*
- * Read/write DACs are always overridable.
- * Executable DACs are overridable when there is
- * at least one exec bit set.
-@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
- return 0;
-
-- /*
-- * Searching includes executable on directories, else just read.
-- */
-- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
-- if (mask == MAY_READ)
-- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
-- return 0;
--
- return -EACCES;
- }
-
-@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
- return error;
- }
-
-+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
-+ dentry->d_inode, dentry, nd->path.mnt)) {
-+ error = -EACCES;
-+ *p = ERR_PTR(error); /* no ->put_link(), please */
-+ path_put(&nd->path);
-+ return error;
-+ }
-+
- nd->last_type = LAST_BIND;
- *p = dentry->d_inode->i_op->follow_link(dentry, nd);
- error = PTR_ERR(*p);
- if (!IS_ERR(*p)) {
-- char *s = nd_get_link(nd);
-+ const char *s = nd_get_link(nd);
- error = 0;
- if (s)
- error = __vfs_follow_link(nd, s);
-@@ -1345,6 +1361,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
- if (!res)
- res = walk_component(nd, path, &nd->last,
- nd->last_type, LOOKUP_FOLLOW);
-+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
-+ res = -EACCES;
- put_link(nd, &link, cookie);
- } while (res > 0);
-
-@@ -1625,6 +1643,8 @@ static int path_lookupat(int dfd, const char *name,
- err = follow_link(&link, nd, &cookie);
- if (!err)
- err = lookup_last(nd, &path);
-+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
-+ err = -EACCES;
- put_link(nd, &link, cookie);
- }
- }
-@@ -1632,6 +1652,13 @@ static int path_lookupat(int dfd, const char *name,
- if (!err)
- err = complete_walk(nd);
-
-+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
-+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
-+ path_put(&nd->path);
-+ err = -ENOENT;
-+ }
-+ }
-+
- if (!err && nd->flags & LOOKUP_DIRECTORY) {
- if (!nd->inode->i_op->lookup) {
- path_put(&nd->path);
-@@ -1663,6 +1690,12 @@ static int do_path_lookup(int dfd, const char *name,
- if (nd->path.dentry && nd->inode)
- audit_inode(name, nd->path.dentry);
- }
-+ if (*name != '/' && nd->path.dentry && nd->inode) {
-+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
-+ path_put(&nd->path);
-+ return -ENOENT;
-+ }
-+ }
- }
- return retval;
- }
-@@ -1792,7 +1825,13 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
- if (!len)
- return ERR_PTR(-EACCES);
-
-+ if (unlikely(name[0] == '.')) {
-+ if (len < 2 || (len == 2 && name[1] == '.'))
-+ return ERR_PTR(-EACCES);
-+ }
-+
- hash = init_name_hash();
-+
- while (len--) {
- c = *(const unsigned char *)name++;
- if (c == '/' || c == '\0')
-@@ -2056,6 +2095,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
- if (flag & O_NOATIME && !inode_owner_or_capable(inode))
- return -EPERM;
-
-+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
-+ return -EPERM;
-+ if (gr_handle_rawio(inode))
-+ return -EPERM;
-+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
-+ return -EACCES;
-+
- return 0;
- }
-
-@@ -2091,7 +2137,7 @@ static inline int open_to_namei_flags(int flag)
- /*
- * Handle the last step of open()
- */
--static struct file *do_last(struct nameidata *nd, struct path *path,
-+static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
- const struct open_flags *op, const char *pathname)
- {
- struct dentry *dir = nd->path.dentry;
-@@ -2117,16 +2163,32 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- error = complete_walk(nd);
- if (error)
- return ERR_PTR(error);
-+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
-+ error = -ENOENT;
-+ goto exit;
-+ }
- audit_inode(pathname, nd->path.dentry);
- if (open_flag & O_CREAT) {
- error = -EISDIR;
- goto exit;
- }
-+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
- goto ok;
- case LAST_BIND:
- error = complete_walk(nd);
- if (error)
- return ERR_PTR(error);
-+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
-+ error = -ENOENT;
-+ goto exit;
-+ }
-+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
- audit_inode(pathname, dir);
- goto ok;
- }
-@@ -2142,18 +2204,31 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- !symlink_ok);
- if (error < 0)
- return ERR_PTR(error);
-- if (error) /* symlink */
-+ if (error) /* symlink */ {
-+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
- return NULL;
-+ }
- /* sayonara */
- error = complete_walk(nd);
- if (error)
- return ERR_PTR(error);
-+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
-+ error = -ENOENT;
-+ goto exit;
-+ }
-
- error = -ENOTDIR;
- if (nd->flags & LOOKUP_DIRECTORY) {
- if (!nd->inode->i_op->lookup)
- goto exit;
- }
-+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
- audit_inode(pathname, nd->path.dentry);
- goto ok;
- }
-@@ -2188,6 +2263,17 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- /* Negative dentry, just create the file */
- if (!dentry->d_inode) {
- int mode = op->mode;
-+
-+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
-+ error = -EACCES;
-+ goto exit_mutex_unlock;
-+ }
-+
-+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
-+ error = -EACCES;
-+ goto exit_mutex_unlock;
-+ }
-+
- if (!IS_POSIXACL(dir->d_inode))
- mode &= ~current_umask();
- /*
-@@ -2211,6 +2297,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- error = vfs_create(dir->d_inode, dentry, mode, nd);
- if (error)
- goto exit_mutex_unlock;
-+ else
-+ gr_handle_create(path->dentry, path->mnt);
- mutex_unlock(&dir->d_inode->i_mutex);
- dput(nd->path.dentry);
- nd->path.dentry = dentry;
-@@ -2220,6 +2308,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- /*
- * It already exists.
- */
-+
-+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
-+ error = -ENOENT;
-+ goto exit_mutex_unlock;
-+ }
-+
-+ /* only check if O_CREAT is specified, all other checks need to go
-+ into may_open */
-+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
-+ error = -EACCES;
-+ goto exit_mutex_unlock;
-+ }
-+
- mutex_unlock(&dir->d_inode->i_mutex);
- audit_inode(pathname, path->dentry);
-
-@@ -2238,11 +2339,17 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- if (!path->dentry->d_inode)
- goto exit_dput;
-
-- if (path->dentry->d_inode->i_op->follow_link)
-+ if (path->dentry->d_inode->i_op->follow_link) {
-+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
- return NULL;
-+ }
-
- path_to_nameidata(path, nd);
- nd->inode = path->dentry->d_inode;
-+
- /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
- error = complete_walk(nd);
- if (error)
-@@ -2250,6 +2357,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
- error = -EISDIR;
- if (S_ISDIR(nd->inode->i_mode))
- goto exit;
-+
-+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
-+ error = -EACCES;
-+ goto exit;
-+ }
-+
- ok:
- if (!S_ISREG(nd->inode->i_mode))
- will_truncate = 0;
-@@ -2322,7 +2435,7 @@ static struct file *path_openat(int dfd, const char *pathname,
- if (unlikely(error))
- goto out_filp;
-
-- filp = do_last(nd, &path, op, pathname);
-+ filp = do_last(nd, &path, NULL, op, pathname);
- while (unlikely(!filp)) { /* trailing symlink */
- struct path link = path;
- void *cookie;
-@@ -2337,8 +2450,9 @@ static struct file *path_openat(int dfd, const char *pathname,
- error = follow_link(&link, nd, &cookie);
- if (unlikely(error))
- filp = ERR_PTR(error);
-- else
-- filp = do_last(nd, &path, op, pathname);
-+ else {
-+ filp = do_last(nd, &path, &link, op, pathname);
-+ }
- put_link(nd, &link, cookie);
- }
- out:
-@@ -2432,6 +2546,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
- *path = nd.path;
- return dentry;
- eexist:
-+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
-+ dput(dentry);
-+ dentry = ERR_PTR(-ENOENT);
-+ goto fail;
-+ }
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
- fail:
-@@ -2454,6 +2573,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
- }
- EXPORT_SYMBOL(user_path_create);
-
-+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
-+{
-+ char *tmp = getname(pathname);
-+ struct dentry *res;
-+ if (IS_ERR(tmp))
-+ return ERR_CAST(tmp);
-+ res = kern_path_create(dfd, tmp, path, is_dir);
-+ if (IS_ERR(res))
-+ putname(tmp);
-+ else
-+ *to = tmp;
-+ return res;
-+}
-+
- int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
- {
- int error = may_create(dir, dentry);
-@@ -2521,6 +2654,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
-+
-+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
-+ error = -EPERM;
-+ goto out_drop_write;
-+ }
-+
-+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
-+ error = -EACCES;
-+ goto out_drop_write;
-+ }
-+
- error = security_path_mknod(&path, dentry, mode, dev);
- if (error)
- goto out_drop_write;
-@@ -2538,6 +2682,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
- }
- out_drop_write:
- mnt_drop_write(path.mnt);
-+
-+ if (!error)
-+ gr_handle_create(dentry, path.mnt);
- out_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2587,12 +2734,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
-+
-+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
-+ error = -EACCES;
-+ goto out_drop_write;
-+ }
-+
- error = security_path_mkdir(&path, dentry, mode);
- if (error)
- goto out_drop_write;
- error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
- out_drop_write:
- mnt_drop_write(path.mnt);
-+
-+ if (!error)
-+ gr_handle_create(dentry, path.mnt);
- out_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
-@@ -2672,6 +2828,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
- char * name;
- struct dentry *dentry;
- struct nameidata nd;
-+ u64 saved_ino = 0;
-+ dev_t saved_dev = 0;
-
- error = user_path_parent(dfd, pathname, &nd, &name);
- if (error)
-@@ -2700,6 +2858,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
- error = -ENOENT;
- goto exit3;
- }
-+
-+ saved_ino = gr_get_ino_from_dentry(dentry);
-+ saved_dev = gr_get_dev_from_dentry(dentry);
-+
-+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
-+ error = -EACCES;
-+ goto exit3;
-+ }
-+
- error = mnt_want_write(nd.path.mnt);
- if (error)
- goto exit3;
-@@ -2707,6 +2874,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
- if (error)
- goto exit4;
- error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
-+ if (!error && (saved_dev || saved_ino))
-+ gr_handle_delete(saved_ino, saved_dev);
- exit4:
- mnt_drop_write(nd.path.mnt);
- exit3:
-@@ -2769,6 +2938,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
- struct dentry *dentry;
- struct nameidata nd;
- struct inode *inode = NULL;
-+ u64 saved_ino = 0;
-+ dev_t saved_dev = 0;
-
- error = user_path_parent(dfd, pathname, &nd, &name);
- if (error)
-@@ -2791,6 +2962,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
- if (!inode)
- goto slashes;
- ihold(inode);
-+
-+ if (inode->i_nlink <= 1) {
-+ saved_ino = gr_get_ino_from_dentry(dentry);
-+ saved_dev = gr_get_dev_from_dentry(dentry);
-+ }
-+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
-+ error = -EACCES;
-+ goto exit2;
-+ }
-+
- error = mnt_want_write(nd.path.mnt);
- if (error)
- goto exit2;
-@@ -2798,6 +2979,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
- if (error)
- goto exit3;
- error = vfs_unlink(nd.path.dentry->d_inode, dentry);
-+ if (!error && (saved_ino || saved_dev))
-+ gr_handle_delete(saved_ino, saved_dev);
- exit3:
- mnt_drop_write(nd.path.mnt);
- exit2:
-@@ -2873,10 +3056,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
-+
-+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
-+ error = -EACCES;
-+ goto out_drop_write;
-+ }
-+
- error = security_path_symlink(&path, dentry, from);
- if (error)
- goto out_drop_write;
- error = vfs_symlink(path.dentry->d_inode, dentry, from);
-+ if (!error)
-+ gr_handle_create(dentry, path.mnt);
- out_drop_write:
- mnt_drop_write(path.mnt);
- out_dput:
-@@ -2948,6 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
- {
- struct dentry *new_dentry;
- struct path old_path, new_path;
-+ char *to = NULL;
- int how = 0;
- int error;
-
-@@ -2971,7 +3163,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
- if (error)
- return error;
-
-- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
-+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
- error = PTR_ERR(new_dentry);
- if (IS_ERR(new_dentry))
- goto out;
-@@ -2982,13 +3174,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
- error = mnt_want_write(new_path.mnt);
- if (error)
- goto out_dput;
-+
-+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
-+ old_path.dentry->d_inode,
-+ old_path.dentry->d_inode->i_mode, to)) {
-+ error = -EACCES;
-+ goto out_drop_write;
-+ }
-+
-+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
-+ old_path.dentry, old_path.mnt, to)) {
-+ error = -EACCES;
-+ goto out_drop_write;
-+ }
-+
- error = security_path_link(old_path.dentry, &new_path, new_dentry);
- if (error)
- goto out_drop_write;
- error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
-+ if (!error)
-+ gr_handle_create(new_dentry, new_path.mnt);
- out_drop_write:
- mnt_drop_write(new_path.mnt);
- out_dput:
-+ putname(to);
- dput(new_dentry);
- mutex_unlock(&new_path.dentry->d_inode->i_mutex);
- path_put(&new_path);
-@@ -3216,6 +3425,20 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
- if (new_dentry == trap)
- goto exit5;
-
-+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
-+ /* use EXDEV error to cause 'mv' to switch to an alternative
-+ * method for usability
-+ */
-+ error = -EXDEV;
-+ goto exit5;
-+ }
-+
-+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
-+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
-+ to);
-+ if (error)
-+ goto exit5;
-+
- error = mnt_want_write(oldnd.path.mnt);
- if (error)
- goto exit5;
-@@ -3225,6 +3448,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
- goto exit6;
- error = vfs_rename(old_dir->d_inode, old_dentry,
- new_dir->d_inode, new_dentry);
-+ if (!error)
-+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
-+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
- exit6:
- mnt_drop_write(oldnd.path.mnt);
- exit5:
-@@ -3250,6 +3476,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
-
- int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
- {
-+ char tmpbuf[64];
-+ const char *newlink;
- int len;
-
- len = PTR_ERR(link);
-@@ -3259,7 +3487,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
- len = strlen(link);
- if (len > (unsigned) buflen)
- len = buflen;
-- if (copy_to_user(buffer, link, len))
-+
-+ if (len < sizeof(tmpbuf)) {
-+ memcpy(tmpbuf, link, len);
-+ newlink = tmpbuf;
-+ } else
-+ newlink = link;
-+
-+ if (copy_to_user(buffer, newlink, len))
- len = -EFAULT;
- out:
- return len;
-diff --git a/fs/namespace.c b/fs/namespace.c
-index a1e663d..83eea6e 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -1327,6 +1327,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
- if (!(sb->s_flags & MS_RDONLY))
- retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
- up_write(&sb->s_umount);
-+
-+ gr_log_remount(mnt->mnt_devname, retval);
-+
- return retval;
- }
-
-@@ -1346,6 +1349,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
- br_write_unlock(vfsmount_lock);
- up_write(&namespace_sem);
- release_mounts(&umount_list);
-+
-+ gr_log_unmount(mnt->mnt_devname, retval);
-+
- return retval;
- }
-
-@@ -1357,7 +1363,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
- * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
- */
-
--SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
-+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
- {
- struct path path;
- int retval;
-@@ -1396,7 +1402,7 @@ out:
- /*
- * The 2.0 compatible umount. No flags.
- */
--SYSCALL_DEFINE1(oldumount, char __user *, name)
-+SYSCALL_DEFINE1(oldumount, const char __user *, name)
- {
- return sys_umount(name, 0);
- }
-@@ -2345,6 +2351,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
- MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
- MS_STRICTATIME);
-
-+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
-+ retval = -EPERM;
-+ goto dput_out;
-+ }
-+
-+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
-+ retval = -EPERM;
-+ goto dput_out;
-+ }
-+
- if (flags & MS_REMOUNT)
- retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
- data_page);
-@@ -2359,6 +2375,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
- dev_name, data_page);
- dput_out:
- path_put(&path);
-+
-+ gr_log_mount(dev_name, dir_name, retval);
-+
- return retval;
- }
-
-@@ -2397,7 +2416,7 @@ void mnt_make_shortterm(struct vfsmount *mnt)
- * Allocate a new namespace structure and populate it with contents
- * copied from the namespace of the passed in task structure.
- */
--static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
-+static __latent_entropy struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
- struct fs_struct *fs)
- {
- struct mnt_namespace *new_ns;
-@@ -2526,8 +2545,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
- }
- EXPORT_SYMBOL(mount_subtree);
-
--SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
-- char __user *, type, unsigned long, flags, void __user *, data)
-+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
-+ const char __user *, type, unsigned long, flags, void __user *, data)
- {
- int ret;
- char *kernel_type;
-@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
- if (error)
- goto out2;
-
-+ if (gr_handle_chroot_pivot()) {
-+ error = -EPERM;
-+ goto out2;
-+ }
-+
- get_fs_root(current->fs, &root);
- error = lock_mount(&old);
- if (error)
-diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
-index cbd1a61..b43f68b 100644
---- a/fs/ncpfs/inode.c
-+++ b/fs/ncpfs/inode.c
-@@ -1041,6 +1041,7 @@ static struct file_system_type ncp_fs_type = {
- .kill_sb = kill_anon_super,
- .fs_flags = FS_BINARY_MOUNTDATA,
- };
-+MODULE_ALIAS_FS("ncpfs");
-
- static int __init init_ncp_fs(void)
- {
-diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
-index 3172b51..98740f4 100644
---- a/fs/nfs/callback.c
-+++ b/fs/nfs/callback.c
-@@ -252,7 +252,6 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
- struct svc_rqst *rqstp;
- int (*callback_svc)(void *vrqstp);
- struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
-- char svc_name[12];
- int ret = 0;
- int minorversion_setup;
-
-@@ -282,10 +281,9 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
-
- svc_sock_update_bufs(serv);
-
-- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
- cb_info->serv = serv;
- cb_info->rqst = rqstp;
-- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
-+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
- if (IS_ERR(cb_info->task)) {
- ret = PTR_ERR(cb_info->task);
- svc_exit_thread(cb_info->rqst);
-diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
-index 6d22d35..548abd8 100644
---- a/fs/nfs/callback_xdr.c
-+++ b/fs/nfs/callback_xdr.c
-@@ -50,7 +50,7 @@ struct callback_op {
- callback_decode_arg_t decode_args;
- callback_encode_res_t encode_res;
- long res_maxsize;
--};
-+} __do_const;
-
- static struct callback_op callback_ops[];
-
-diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 756f4df..8bd49ca 100644
---- a/fs/nfs/dir.c
-+++ b/fs/nfs/dir.c
-@@ -500,7 +500,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
- nfs_refresh_inode(dentry->d_inode, entry->fattr);
- goto out;
- } else {
-- d_drop(dentry);
-+ if (d_invalidate(dentry) != 0)
-+ goto out;
- dput(dentry);
- }
- }
-@@ -1164,6 +1165,8 @@ out_set_verifier:
- out_zap_parent:
- nfs_zap_caches(dir);
- out_bad:
-+ nfs_free_fattr(fattr);
-+ nfs_free_fhandle(fhandle);
- nfs_mark_for_revalidate(dir);
- if (inode && S_ISDIR(inode->i_mode)) {
- /* Purge readdir caches. */
-@@ -1176,8 +1179,6 @@ out_zap_parent:
- shrink_dcache_parent(dentry);
- }
- d_drop(dentry);
-- nfs_free_fattr(fattr);
-- nfs_free_fhandle(fhandle);
- dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
- __func__, dentry->d_parent->d_name.name,
-diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index b78b5b6..c64d84f 100644
---- a/fs/nfs/inode.c
-+++ b/fs/nfs/inode.c
-@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
- return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
- }
-
--static atomic_long_t nfs_attr_generation_counter;
-+static atomic_long_unchecked_t nfs_attr_generation_counter;
-
- static unsigned long nfs_read_attr_generation_counter(void)
- {
-- return atomic_long_read(&nfs_attr_generation_counter);
-+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
- }
-
- unsigned long nfs_inc_attr_generation_counter(void)
- {
-- return atomic_long_inc_return(&nfs_attr_generation_counter);
-+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
- }
-
- void nfs_fattr_init(struct nfs_fattr *fattr)
-diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index e83786f..c6f7fd6 100644
---- a/fs/nfs/nfs4proc.c
-+++ b/fs/nfs/nfs4proc.c
-@@ -1036,7 +1036,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
- struct nfs4_state *state = opendata->state;
- struct nfs_inode *nfsi = NFS_I(state->inode);
- struct nfs_delegation *delegation;
-- int open_mode = opendata->o_arg.open_flags & O_EXCL;
-+ int open_mode = opendata->o_arg.open_flags;
- fmode_t fmode = opendata->o_arg.fmode;
- nfs4_stateid stateid;
- int ret = -EAGAIN;
-diff --git a/fs/nfs/super.c b/fs/nfs/super.c
-index 1943898..396c460 100644
---- a/fs/nfs/super.c
-+++ b/fs/nfs/super.c
-@@ -282,6 +282,7 @@ static struct file_system_type nfs_fs_type = {
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
- };
-+MODULE_ALIAS_FS("nfs");
-
- struct file_system_type nfs_xdev_fs_type = {
- .owner = THIS_MODULE,
-@@ -338,6 +339,8 @@ static struct file_system_type nfs4_remote_fs_type = {
- .kill_sb = nfs4_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
- };
-+MODULE_ALIAS_FS("nfs4");
-+MODULE_ALIAS("nfs4");
-
- struct file_system_type nfs4_xdev_fs_type = {
- .owner = THIS_MODULE,
-@@ -3089,6 +3092,4 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
- return res;
- }
-
--MODULE_ALIAS("nfs4");
--
- #endif /* CONFIG_NFS_V4 */
-diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 9a959de..db4b27b 100644
---- a/fs/nfsd/nfs4proc.c
-+++ b/fs/nfsd/nfs4proc.c
-@@ -1039,7 +1039,7 @@ struct nfsd4_operation {
- char *op_name;
- /* Try to get response size before operation */
- nfsd4op_rsize op_rsize_bop;
--};
-+} __do_const;
-
- static struct nfsd4_operation nfsd4_ops[];
-
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 9d2c52b..c9d6c2aa 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
-
- typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
-
--static nfsd4_dec nfsd4_dec_ops[] = {
-+static const nfsd4_dec nfsd4_dec_ops[] = {
- [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
-@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
- [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
- };
-
--static nfsd4_dec nfsd41_dec_ops[] = {
-+static const nfsd4_dec nfsd41_dec_ops[] = {
- [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
-@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
- };
-
- struct nfsd4_minorversion_ops {
-- nfsd4_dec *decoders;
-+ const nfsd4_dec *decoders;
- int nops;
- };
-
-diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
-index 2cbac34..21c9120 100644
---- a/fs/nfsd/nfscache.c
-+++ b/fs/nfsd/nfscache.c
-@@ -259,13 +259,16 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
- {
- struct svc_cacherep *rp;
- struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
-- int len;
-+ long len;
-
- if (!(rp = rqstp->rq_cacherep) || cache_disabled)
- return;
-
-- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
-- len >>= 2;
-+ if (statp) {
-+ len = (char*)statp - (char*)resv->iov_base;
-+ len = resv->iov_len - len;
-+ len >>= 2;
-+ }
-
- /* Don't cache excessive amounts of data and XDR failures */
- if (!statp || len > (256 >> 2)) {
-diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
-index c45a2ea..1a6bd66 100644
---- a/fs/nfsd/nfsctl.c
-+++ b/fs/nfsd/nfsctl.c
-@@ -1102,6 +1102,7 @@ static struct file_system_type nfsd_fs_type = {
- .mount = nfsd_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("nfsd");
-
- #ifdef CONFIG_PROC_FS
- static int create_proc_exports_entry(void)
-diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index e2e7914..f057f88 100644
---- a/fs/nfsd/vfs.c
-+++ b/fs/nfsd/vfs.c
-@@ -960,7 +960,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- } else {
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
-+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
- set_fs(oldfs);
- }
-
-@@ -1064,7 +1064,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
-
- /* Write the data. */
- oldfs = get_fs(); set_fs(KERNEL_DS);
-- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
-+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
- set_fs(oldfs);
- if (host_err < 0)
- goto out_nfserr;
-@@ -1605,7 +1605,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
- */
-
- oldfs = get_fs(); set_fs(KERNEL_DS);
-- host_err = inode->i_op->readlink(dentry, buf, *lenp);
-+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
- set_fs(oldfs);
-
- if (host_err < 0)
-diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
-index 97bfbdd..e7f644a 100644
---- a/fs/nilfs2/super.c
-+++ b/fs/nilfs2/super.c
-@@ -1370,6 +1370,7 @@ struct file_system_type nilfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("nilfs2");
-
- static void nilfs_inode_init_once(void *obj)
- {
-diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
-index 1c98f53..41e6a04 100644
---- a/fs/nilfs2/the_nilfs.c
-+++ b/fs/nilfs2/the_nilfs.c
-@@ -410,6 +410,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
- nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
- nilfs->ns_r_segments_percentage =
- le32_to_cpu(sbp->s_r_segments_percentage);
-+ if (nilfs->ns_r_segments_percentage < 1 ||
-+ nilfs->ns_r_segments_percentage > 99) {
-+ printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
-+ return -EINVAL;
-+ }
-+
- nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
- nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
- return 0;
-diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
-index 0eb059ec..e086089 100644
---- a/fs/nls/nls_base.c
-+++ b/fs/nls/nls_base.c
-@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
-
- int register_nls(struct nls_table * nls)
- {
-- struct nls_table ** tmp = &tables;
-+ struct nls_table *tmp = tables;
-
- if (nls->next)
- return -EBUSY;
-
- spin_lock(&nls_lock);
-- while (*tmp) {
-- if (nls == *tmp) {
-+ while (tmp) {
-+ if (nls == tmp) {
- spin_unlock(&nls_lock);
- return -EBUSY;
- }
-- tmp = &(*tmp)->next;
-+ tmp = tmp->next;
- }
-- nls->next = tables;
-+ pax_open_kernel();
-+ *(struct nls_table **)&nls->next = tables;
-+ pax_close_kernel();
- tables = nls;
- spin_unlock(&nls_lock);
- return 0;
-@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
-
- int unregister_nls(struct nls_table * nls)
- {
-- struct nls_table ** tmp = &tables;
-+ struct nls_table * const * tmp = &tables;
-
- spin_lock(&nls_lock);
- while (*tmp) {
- if (nls == *tmp) {
-- *tmp = nls->next;
-+ pax_open_kernel();
-+ *(struct nls_table **)tmp = nls->next;
-+ pax_close_kernel();
- spin_unlock(&nls_lock);
- return 0;
- }
-diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
-index 7424929..35f6be5 100644
---- a/fs/nls/nls_euc-jp.c
-+++ b/fs/nls/nls_euc-jp.c
-@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
- p_nls = load_nls("cp932");
-
- if (p_nls) {
-- table.charset2upper = p_nls->charset2upper;
-- table.charset2lower = p_nls->charset2lower;
-+ pax_open_kernel();
-+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
-+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
-+ pax_close_kernel();
- return register_nls(&table);
- }
-
-diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
-index e7bc1d7..06bd4bb 100644
---- a/fs/nls/nls_koi8-ru.c
-+++ b/fs/nls/nls_koi8-ru.c
-@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
- p_nls = load_nls("koi8-u");
-
- if (p_nls) {
-- table.charset2upper = p_nls->charset2upper;
-- table.charset2lower = p_nls->charset2lower;
-+ pax_open_kernel();
-+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
-+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
-+ pax_close_kernel();
- return register_nls(&table);
- }
-
-diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
-index d57995e..76a343a 100644
---- a/fs/notify/fanotify/fanotify_user.c
-+++ b/fs/notify/fanotify/fanotify_user.c
-@@ -277,7 +277,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
- goto out_close_fd;
-
- ret = -EFAULT;
-- if (copy_to_user(buf, &fanotify_event_metadata,
-+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
-+ copy_to_user(buf, &fanotify_event_metadata,
- fanotify_event_metadata.event_len))
- goto out_kill_access_response;
-
-diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
-index e3cbd74..2980aa8 100644
---- a/fs/notify/inotify/inotify_fsnotify.c
-+++ b/fs/notify/inotify/inotify_fsnotify.c
-@@ -198,8 +198,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
- idr_for_each(&group->inotify_data.idr, idr_callback, group);
- idr_remove_all(&group->inotify_data.idr);
- idr_destroy(&group->inotify_data.idr);
-- atomic_dec(&group->inotify_data.user->inotify_devs);
-- free_uid(group->inotify_data.user);
-+ if (group->inotify_data.user) {
-+ atomic_dec(&group->inotify_data.user->inotify_devs);
-+ free_uid(group->inotify_data.user);
-+ }
- }
-
- void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
-diff --git a/fs/notify/notification.c b/fs/notify/notification.c
-index ee18815..7aa5d01 100644
---- a/fs/notify/notification.c
-+++ b/fs/notify/notification.c
-@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
- * get set to 0 so it will never get 'freed'
- */
- static struct fsnotify_event *q_overflow_event;
--static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
-+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
-
- /**
- * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
-@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
- */
- u32 fsnotify_get_cookie(void)
- {
-- return atomic_inc_return(&fsnotify_sync_cookie);
-+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
- }
- EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
-
-diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
-index 99e3610..02c1068 100644
---- a/fs/ntfs/dir.c
-+++ b/fs/ntfs/dir.c
-@@ -1329,7 +1329,7 @@ find_next_index_buffer:
- ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
- ~(s64)(ndir->itype.index.block_size - 1)));
- /* Bounds checks. */
-- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
-+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
- ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
- "inode 0x%lx or driver bug.", vdir->i_ino);
- goto err_out;
-diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
-index c587e2d..48a16cd 100644
---- a/fs/ntfs/file.c
-+++ b/fs/ntfs/file.c
-@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
- char *addr;
- size_t total = 0;
- unsigned len;
-- int left;
-+ unsigned left;
-
- do {
- len = PAGE_CACHE_SIZE - ofs;
-diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
-index b52706d..b9a9f9d 100644
---- a/fs/ntfs/super.c
-+++ b/fs/ntfs/super.c
-@@ -661,7 +661,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
- if (!silent)
- ntfs_error(sb, "Primary boot sector is invalid.");
- } else if (!silent)
-- ntfs_error(sb, read_err_str, "primary");
-+ ntfs_error(sb, read_err_str, "%s", "primary");
- if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
- if (bh_primary)
- brelse(bh_primary);
-@@ -677,7 +677,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
- goto hotfix_primary_boot_sector;
- brelse(bh_backup);
- } else if (!silent)
-- ntfs_error(sb, read_err_str, "backup");
-+ ntfs_error(sb, read_err_str, "%s", "backup");
- /* Try to read NT3.51- backup boot sector. */
- if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
- if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
-@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
- "sector.");
- brelse(bh_backup);
- } else if (!silent)
-- ntfs_error(sb, read_err_str, "backup");
-+ ntfs_error(sb, read_err_str, "%s", "backup");
- /* We failed. Cleanup and return. */
- if (bh_primary)
- brelse(bh_primary);
-@@ -3072,6 +3072,7 @@ static struct file_system_type ntfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ntfs");
-
- /* Stable names for the slab caches. */
- static const char ntfs_index_ctx_cache_name[] = "ntfs_index_ctx_cache";
-diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
-index b420767..bbf1094 100644
---- a/fs/ocfs2/dlmfs/dlmfs.c
-+++ b/fs/ocfs2/dlmfs/dlmfs.c
-@@ -662,6 +662,7 @@ static struct file_system_type dlmfs_fs_type = {
- .mount = dlmfs_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("ocfs2_dlmfs");
-
- static int __init init_dlmfs_fs(void)
- {
-diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
-index 210c352..a174f83 100644
---- a/fs/ocfs2/localalloc.c
-+++ b/fs/ocfs2/localalloc.c
-@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
- goto bail;
- }
-
-- atomic_inc(&osb->alloc_stats.moves);
-+ atomic_inc_unchecked(&osb->alloc_stats.moves);
-
- bail:
- if (handle)
-diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
-index d355e6e..578d905 100644
---- a/fs/ocfs2/ocfs2.h
-+++ b/fs/ocfs2/ocfs2.h
-@@ -235,11 +235,11 @@ enum ocfs2_vol_state
-
- struct ocfs2_alloc_stats
- {
-- atomic_t moves;
-- atomic_t local_data;
-- atomic_t bitmap_data;
-- atomic_t bg_allocs;
-- atomic_t bg_extends;
-+ atomic_unchecked_t moves;
-+ atomic_unchecked_t local_data;
-+ atomic_unchecked_t bitmap_data;
-+ atomic_unchecked_t bg_allocs;
-+ atomic_unchecked_t bg_extends;
- };
-
- enum ocfs2_local_alloc_state
-diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
-index b7e74b5..19c6536 100644
---- a/fs/ocfs2/suballoc.c
-+++ b/fs/ocfs2/suballoc.c
-@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
- mlog_errno(status);
- goto bail;
- }
-- atomic_inc(&osb->alloc_stats.bg_extends);
-+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
-
- /* You should never ask for this much metadata */
- BUG_ON(bits_wanted >
-@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
- mlog_errno(status);
- goto bail;
- }
-- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-
- *suballoc_loc = res.sr_bg_blkno;
- *suballoc_bit_start = res.sr_bit_offset;
-@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
- trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
- res->sr_bits);
-
-- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-
- BUG_ON(res->sr_bits != 1);
-
-@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
- mlog_errno(status);
- goto bail;
- }
-- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
-
- BUG_ON(res.sr_bits != 1);
-
-@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
- cluster_start,
- num_clusters);
- if (!status)
-- atomic_inc(&osb->alloc_stats.local_data);
-+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
- } else {
- if (min_clusters > (osb->bitmap_cpg - 1)) {
- /* The only paths asking for contiguousness
-@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
- ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
- res.sr_bg_blkno,
- res.sr_bit_offset);
-- atomic_inc(&osb->alloc_stats.bitmap_data);
-+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
- *num_clusters = res.sr_bits;
- }
- }
-diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
-index 4994f8b..04a9180 100644
---- a/fs/ocfs2/super.c
-+++ b/fs/ocfs2/super.c
-@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
- "%10s => GlobalAllocs: %d LocalAllocs: %d "
- "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
- "Stats",
-- atomic_read(&osb->alloc_stats.bitmap_data),
-- atomic_read(&osb->alloc_stats.local_data),
-- atomic_read(&osb->alloc_stats.bg_allocs),
-- atomic_read(&osb->alloc_stats.moves),
-- atomic_read(&osb->alloc_stats.bg_extends));
-+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
-+ atomic_read_unchecked(&osb->alloc_stats.local_data),
-+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
-+ atomic_read_unchecked(&osb->alloc_stats.moves),
-+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
-
- out += snprintf(buf + out, len - out,
- "%10s => State: %u Descriptor: %llu Size: %u bits "
-@@ -1270,6 +1270,7 @@ static struct file_system_type ocfs2_fs_type = {
- .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
- .next = NULL
- };
-+MODULE_ALIAS_FS("ocfs2");
-
- static int ocfs2_check_set_options(struct super_block *sb,
- struct mount_options *options)
-@@ -2119,11 +2120,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
- spin_lock_init(&osb->osb_xattr_lock);
- ocfs2_init_steal_slots(osb);
-
-- atomic_set(&osb->alloc_stats.moves, 0);
-- atomic_set(&osb->alloc_stats.local_data, 0);
-- atomic_set(&osb->alloc_stats.bitmap_data, 0);
-- atomic_set(&osb->alloc_stats.bg_allocs, 0);
-- atomic_set(&osb->alloc_stats.bg_extends, 0);
-+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
-+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
-+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
-+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
-+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
-
- /* Copy the blockcheck stats from the superblock probe */
- osb->osb_ecc_stats = *stats;
-diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
-index 5d22872..523db20 100644
---- a/fs/ocfs2/symlink.c
-+++ b/fs/ocfs2/symlink.c
-@@ -142,7 +142,7 @@ bail:
-
- static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
- {
-- char *link = nd_get_link(nd);
-+ const char *link = nd_get_link(nd);
- if (!IS_ERR(link))
- kfree(link);
- }
-diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
-index e043c4c..f99d456 100644
---- a/fs/omfs/inode.c
-+++ b/fs/omfs/inode.c
-@@ -570,6 +570,7 @@ static struct file_system_type omfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("omfs");
-
- static int __init init_omfs_fs(void)
- {
-diff --git a/fs/open.c b/fs/open.c
-index b8485d3..e18561a 100644
---- a/fs/open.c
-+++ b/fs/open.c
-@@ -31,6 +31,8 @@
- #include <linux/ima.h>
- #include <linux/dnotify.h>
-
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/fs.h>
- #include "internal.h"
-
- int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
-@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
- error = locks_verify_truncate(inode, NULL, length);
- if (!error)
- error = security_path_truncate(&path);
-+
-+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
-+ error = -EACCES;
-+
- if (!error)
- error = do_truncate(path.dentry, length, 0, NULL);
-
-@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
- if (__mnt_is_readonly(path.mnt))
- res = -EROFS;
-
-+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
-+ res = -EACCES;
-+
- out_path_release:
- path_put(&path);
- out:
-@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
- if (error)
- goto dput_and_out;
-
-+ gr_log_chdir(path.dentry, path.mnt);
-+
- set_fs_pwd(current->fs, &path);
-
- dput_and_out:
-@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
- goto out_putf;
-
- error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
-+
-+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
-+ error = -EPERM;
-+
-+ if (!error)
-+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
-+
- if (!error)
- set_fs_pwd(current->fs, &file->f_path);
- out_putf:
-@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
- if (error)
- goto dput_and_out;
-
-+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
-+ goto dput_and_out;
-+
- set_fs_root(current->fs, &path);
-+
-+ gr_handle_chroot_chdir(&path);
-+
- error = 0;
- dput_and_out:
- path_put(&path);
-@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
- if (error)
- return error;
- mutex_lock(&inode->i_mutex);
-+
-+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
-+ error = -EACCES;
-+ goto out_unlock;
-+ }
-+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
-+ error = -EACCES;
-+ goto out_unlock;
-+ }
-+
- error = security_path_chmod(path->dentry, path->mnt, mode);
- if (error)
- goto out_unlock;
-@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
- int error;
- struct iattr newattrs;
-
-+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
-+ return -EACCES;
-+
- newattrs.ia_valid = ATTR_CTIME;
- if (user != (uid_t) -1) {
- newattrs.ia_valid |= ATTR_UID;
-@@ -988,6 +1025,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
- } else {
- fsnotify_open(f);
- fd_install(fd, f);
-+ trace_do_sys_open(tmp, flags, mode);
- }
- }
- putname(tmp);
-diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
-index e4e0ff7..458929b 100644
---- a/fs/openpromfs/inode.c
-+++ b/fs/openpromfs/inode.c
-@@ -434,6 +434,7 @@ static struct file_system_type openprom_fs_type = {
- .mount = openprom_mount,
- .kill_sb = kill_anon_super,
- };
-+MODULE_ALIAS_FS("openpromfs");
-
- static void op_inode_init_once(void *data)
- {
-diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
-index 6296b40..417c00f 100644
---- a/fs/partitions/efi.c
-+++ b/fs/partitions/efi.c
-@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
- if (!gpt)
- return NULL;
-
-+ if (!le32_to_cpu(gpt->num_partition_entries))
-+ return NULL;
-+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
-+ if (!pte)
-+ return NULL;
-+
- count = le32_to_cpu(gpt->num_partition_entries) *
- le32_to_cpu(gpt->sizeof_partition_entry);
-- if (!count)
-- return NULL;
-- pte = kzalloc(count, GFP_KERNEL);
-- if (!pte)
-- return NULL;
--
- if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
- (u8 *) pte,
- count) < count) {
-diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
-index bd8ae78..539d250 100644
---- a/fs/partitions/ldm.c
-+++ b/fs/partitions/ldm.c
-@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
- goto found;
- }
-
-- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
-+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
- if (!f) {
- ldm_crit ("Out of memory.");
- return false;
-diff --git a/fs/pipe.c b/fs/pipe.c
-index 8ca88fc..db6ce82 100644
---- a/fs/pipe.c
-+++ b/fs/pipe.c
-@@ -33,7 +33,7 @@ unsigned int pipe_max_size = 1048576;
- /*
- * Minimum pipe size, as required by POSIX
- */
--unsigned int pipe_min_size = PAGE_SIZE;
-+unsigned int pipe_min_size __read_only = PAGE_SIZE;
-
- /*
- * We use a start+len construction, which provides full use of the
-@@ -103,25 +103,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
- }
-
- static int
--pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
-- int atomic)
-+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
-+ size_t *remaining, int atomic)
- {
- unsigned long copy;
-
-- while (len > 0) {
-+ while (*remaining > 0) {
- while (!iov->iov_len)
- iov++;
-- copy = min_t(unsigned long, len, iov->iov_len);
-+ copy = min_t(unsigned long, *remaining, iov->iov_len);
-
- if (atomic) {
-- if (__copy_from_user_inatomic(to, iov->iov_base, copy))
-+ if (__copy_from_user_inatomic(addr + *offset,
-+ iov->iov_base, copy))
- return -EFAULT;
- } else {
-- if (copy_from_user(to, iov->iov_base, copy))
-+ if (copy_from_user(addr + *offset,
-+ iov->iov_base, copy))
- return -EFAULT;
- }
-- to += copy;
-- len -= copy;
-+ *offset += copy;
-+ *remaining -= copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
-@@ -129,25 +131,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
- }
-
- static int
--pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
-- int atomic)
-+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
-+ size_t *remaining, int atomic)
- {
- unsigned long copy;
-
-- while (len > 0) {
-+ while (*remaining > 0) {
- while (!iov->iov_len)
- iov++;
-- copy = min_t(unsigned long, len, iov->iov_len);
-+ copy = min_t(unsigned long, *remaining, iov->iov_len);
-
- if (atomic) {
-- if (__copy_to_user_inatomic(iov->iov_base, from, copy))
-+ if (__copy_to_user_inatomic(iov->iov_base,
-+ addr + *offset, copy))
- return -EFAULT;
- } else {
-- if (copy_to_user(iov->iov_base, from, copy))
-+ if (copy_to_user(iov->iov_base,
-+ addr + *offset, copy))
- return -EFAULT;
- }
-- from += copy;
-- len -= copy;
-+ *offset += copy;
-+ *remaining -= copy;
- iov->iov_base += copy;
- iov->iov_len -= copy;
- }
-@@ -383,7 +387,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
- struct pipe_buffer *buf = pipe->bufs + curbuf;
- const struct pipe_buf_operations *ops = buf->ops;
- void *addr;
-- size_t chars = buf->len;
-+ size_t chars = buf->len, remaining;
- int error, atomic;
-
- if (chars > total_len)
-@@ -397,9 +401,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
- }
-
- atomic = !iov_fault_in_pages_write(iov, chars);
-+ remaining = chars;
- redo:
- addr = ops->map(pipe, buf, atomic);
-- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
-+ error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
-+ &remaining, atomic);
- ops->unmap(pipe, buf, addr);
- if (unlikely(error)) {
- /*
-@@ -414,7 +420,6 @@ redo:
- break;
- }
- ret += chars;
-- buf->offset += chars;
- buf->len -= chars;
-
- /* Was it a packet buffer? Clean up and exit */
-@@ -437,9 +442,9 @@ redo:
- }
- if (bufs) /* More to do? */
- continue;
-- if (!pipe->writers)
-+ if (!atomic_read(&pipe->writers))
- break;
-- if (!pipe->waiting_writers) {
-+ if (!atomic_read(&pipe->waiting_writers)) {
- /* syscall merging: Usually we must not sleep
- * if O_NONBLOCK is set, or if we got some data.
- * But if a writer sleeps in kernel space, then
-@@ -503,7 +508,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
-
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- ret = -EPIPE;
- goto out;
-@@ -521,6 +526,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
- if (ops->can_merge && offset + chars <= PAGE_SIZE) {
- int error, atomic = 1;
- void *addr;
-+ size_t remaining = chars;
-
- error = ops->confirm(pipe, buf);
- if (error)
-@@ -529,8 +535,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
- iov_fault_in_pages_read(iov, chars);
- redo1:
- addr = ops->map(pipe, buf, atomic);
-- error = pipe_iov_copy_from_user(offset + addr, iov,
-- chars, atomic);
-+ error = pipe_iov_copy_from_user(addr, &offset, iov,
-+ &remaining, atomic);
- ops->unmap(pipe, buf, addr);
- ret = error;
- do_wakeup = 1;
-@@ -552,7 +558,7 @@ redo1:
- for (;;) {
- int bufs;
-
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
-@@ -565,6 +571,8 @@ redo1:
- struct page *page = pipe->tmp_page;
- char *src;
- int error, atomic = 1;
-+ int offset = 0;
-+ size_t remaining;
-
- if (!page) {
- page = alloc_page(GFP_HIGHUSER);
-@@ -585,14 +593,15 @@ redo1:
- chars = total_len;
-
- iov_fault_in_pages_read(iov, chars);
-+ remaining = chars;
- redo2:
- if (atomic)
- src = kmap_atomic(page, KM_USER0);
- else
- src = kmap(page);
-
-- error = pipe_iov_copy_from_user(src, iov, chars,
-- atomic);
-+ error = pipe_iov_copy_from_user(src, &offset, iov,
-+ &remaining, atomic);
- if (atomic)
- kunmap_atomic(src, KM_USER0);
- else
-@@ -643,9 +652,9 @@ redo2:
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
-- pipe->waiting_writers++;
-+ atomic_inc(&pipe->waiting_writers);
- pipe_wait(pipe);
-- pipe->waiting_writers--;
-+ atomic_dec(&pipe->waiting_writers);
- }
- out:
- mutex_unlock(&inode->i_mutex);
-@@ -712,7 +721,7 @@ pipe_poll(struct file *filp, poll_table *wait)
- mask = 0;
- if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
-- if (!pipe->writers && filp->f_version != pipe->w_counter)
-+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
- mask |= POLLHUP;
- }
-
-@@ -722,7 +731,7 @@ pipe_poll(struct file *filp, poll_table *wait)
- * Most Unices do not set POLLERR for FIFOs but on Linux they
- * behave exactly like pipes for poll().
- */
-- if (!pipe->readers)
-+ if (!atomic_read(&pipe->readers))
- mask |= POLLERR;
- }
-
-@@ -736,10 +745,10 @@ pipe_release(struct inode *inode, int decr, int decw)
-
- mutex_lock(&inode->i_mutex);
- pipe = inode->i_pipe;
-- pipe->readers -= decr;
-- pipe->writers -= decw;
-+ atomic_sub(decr, &pipe->readers);
-+ atomic_sub(decw, &pipe->writers);
-
-- if (!pipe->readers && !pipe->writers) {
-+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
- free_pipe_info(inode);
- } else {
- wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
-@@ -829,7 +838,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
-
- if (inode->i_pipe) {
- ret = 0;
-- inode->i_pipe->readers++;
-+ atomic_inc(&inode->i_pipe->readers);
- }
-
- mutex_unlock(&inode->i_mutex);
-@@ -846,7 +855,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
-
- if (inode->i_pipe) {
- ret = 0;
-- inode->i_pipe->writers++;
-+ atomic_inc(&inode->i_pipe->writers);
- }
-
- mutex_unlock(&inode->i_mutex);
-@@ -867,9 +876,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
- if (inode->i_pipe) {
- ret = 0;
- if (filp->f_mode & FMODE_READ)
-- inode->i_pipe->readers++;
-+ atomic_inc(&inode->i_pipe->readers);
- if (filp->f_mode & FMODE_WRITE)
-- inode->i_pipe->writers++;
-+ atomic_inc(&inode->i_pipe->writers);
- }
-
- mutex_unlock(&inode->i_mutex);
-@@ -961,7 +970,7 @@ void free_pipe_info(struct inode *inode)
- inode->i_pipe = NULL;
- }
-
--static struct vfsmount *pipe_mnt __read_mostly;
-+struct vfsmount *pipe_mnt __read_mostly;
-
- /*
- * pipefs_dname() is called from d_path().
-@@ -991,7 +1000,8 @@ static struct inode * get_pipe_inode(void)
- goto fail_iput;
- inode->i_pipe = pipe;
-
-- pipe->readers = pipe->writers = 1;
-+ atomic_set(&pipe->readers, 1);
-+ atomic_set(&pipe->writers, 1);
- inode->i_fop = &rdwr_pipefifo_fops;
-
- /*
-@@ -1203,7 +1213,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
--static inline unsigned int round_pipe_size(unsigned int size)
-+static inline unsigned long round_pipe_size(unsigned long size)
- {
- unsigned long nr_pages;
-
-@@ -1253,13 +1263,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
-
- switch (cmd) {
- case F_SETPIPE_SZ: {
-- unsigned int size, nr_pages;
-+ unsigned long size, nr_pages;
-+
-+ ret = -EINVAL;
-+ if (arg < pipe_min_size)
-+ goto out;
-
- size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
-
-- ret = -EINVAL;
-- if (!nr_pages)
-+ if (size < pipe_min_size)
- goto out;
-
- if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
-diff --git a/fs/posix_acl.c b/fs/posix_acl.c
-index 6c70ab2..54c5656 100644
---- a/fs/posix_acl.c
-+++ b/fs/posix_acl.c
-@@ -19,6 +19,7 @@
- #include <linux/sched.h>
- #include <linux/posix_acl.h>
- #include <linux/module.h>
-+#include <linux/grsecurity.h>
-
- #include <linux/errno.h>
-
-@@ -186,7 +187,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
- }
- }
- if (mode_p)
-- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
-+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
- return not_equiv;
- }
-
-@@ -337,7 +338,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
- mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
- }
-
-- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
-+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
- return not_equiv;
- }
-
-@@ -395,6 +396,8 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
- struct posix_acl *clone = posix_acl_clone(*acl, gfp);
- int err = -ENOMEM;
- if (clone) {
-+ *mode_p &= ~gr_acl_umask();
-+
- err = posix_acl_create_masq(clone, mode_p);
- if (err < 0) {
- posix_acl_release(clone);
-diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
-index 15af622..0e9f4467 100644
---- a/fs/proc/Kconfig
-+++ b/fs/proc/Kconfig
-@@ -30,12 +30,12 @@ config PROC_FS
-
- config PROC_KCORE
- bool "/proc/kcore support" if !ARM
-- depends on PROC_FS && MMU
-+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
-
- config PROC_VMCORE
- bool "/proc/vmcore support"
-- depends on PROC_FS && CRASH_DUMP
-- default y
-+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
-+ default n
- help
- Exports the dump image of crashed kernel in ELF format.
-
-@@ -59,8 +59,8 @@ config PROC_SYSCTL
- limited in memory.
-
- config PROC_PAGE_MONITOR
-- default y
-- depends on PROC_FS && MMU
-+ default n
-+ depends on PROC_FS && MMU && !GRKERNSEC
- bool "Enable /proc page monitoring" if EXPERT
- help
- Various /proc files exist to monitor process memory utilization:
-diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 439b5a1..5dec96d 100644
---- a/fs/proc/array.c
-+++ b/fs/proc/array.c
-@@ -60,6 +60,7 @@
- #include <linux/tty.h>
- #include <linux/string.h>
- #include <linux/mman.h>
-+#include <linux/grsecurity.h>
- #include <linux/proc_fs.h>
- #include <linux/ioport.h>
- #include <linux/uaccess.h>
-@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
- seq_putc(m, '\n');
- }
-
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+static inline void task_pax(struct seq_file *m, struct task_struct *p)
-+{
-+ if (p->mm)
-+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
-+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
-+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
-+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
-+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
-+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
-+ else
-+ seq_printf(m, "PaX:\t-----\n");
-+}
-+#endif
-+
- int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task)
- {
-@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
- task_cpus_allowed(m, task);
- cpuset_task_status_allowed(m, task);
- task_context_switch_counts(m, task);
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+ task_pax(m, task);
-+#endif
-+
-+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
-+ task_grsec_rbac(m, task);
-+#endif
-+
- return 0;
- }
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
-+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
-+ _mm->pax_flags & MF_PAX_SEGMEXEC))
-+#endif
-+
- static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task, int whole)
- {
-@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- char tcomm[sizeof(task->comm)];
- unsigned long flags;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (current->exec_id != m->exec_id) {
-+ gr_log_badprocpid("stat");
-+ return 0;
-+ }
-+#endif
-+
- state = *get_task_state(task);
- vsize = eip = esp = 0;
- permitted = ptrace_may_access(task, PTRACE_MODE_READ);
-@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- gtime = task->gtime;
- }
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (PAX_RAND_FLAGS(mm)) {
-+ eip = 0;
-+ esp = 0;
-+ wchan = 0;
-+ }
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ wchan = 0;
-+ eip =0;
-+ esp =0;
-+#endif
-+
- /* scale priority and nice values from timeslices to -20..20 */
- /* to make it look like a "normal" Unix priority/nice value */
- priority = task_prio(task);
-@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- vsize,
- mm ? get_mm_rss(mm) : 0,
- rsslim,
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
-+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
-+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
-+#else
- mm ? (permitted ? mm->start_code : 1) : 0,
- mm ? (permitted ? mm->end_code : 1) : 0,
- (permitted && mm) ? mm->start_stack : 0,
-+#endif
- esp,
- eip,
- /* The signal information here is obsolete.
-@@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task)
- {
- unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
-- struct mm_struct *mm = get_task_mm(task);
-+ struct mm_struct *mm;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (current->exec_id != m->exec_id) {
-+ gr_log_badprocpid("statm");
-+ return 0;
-+ }
-+#endif
-+ mm = get_task_mm(task);
- if (mm) {
- size = task_statm(mm, &shared, &text, &data, &resident);
- mmput(mm);
-@@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
-
- return 0;
- }
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
-+{
-+ unsigned long flags;
-+ u32 curr_ip = 0;
-+
-+ if (lock_task_sighand(task, &flags)) {
-+ curr_ip = task->signal->curr_ip;
-+ unlock_task_sighand(task, &flags);
-+ }
-+
-+ return sprintf(buffer, "%pI4\n", &curr_ip);
-+}
-+#endif
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 1ace83d..83b9247 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -107,6 +107,22 @@ struct pid_entry {
- union proc_op op;
- };
-
-+struct getdents_callback {
-+ struct linux_dirent __user * current_dir;
-+ struct linux_dirent __user * previous;
-+ struct file * file;
-+ int count;
-+ int error;
-+};
-+
-+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
-+ loff_t offset, u64 ino, unsigned int d_type)
-+{
-+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
-+ buf->error = -EINVAL;
-+ return 0;
-+}
-+
- #define NOD(NAME, MODE, IOP, FOP, OP) { \
- .name = (NAME), \
- .len = sizeof(NAME) - 1, \
-@@ -194,31 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
- return result;
- }
-
--static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
--{
-- struct mm_struct *mm;
-- int err;
--
-- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
-- if (err)
-- return ERR_PTR(err);
--
-- mm = get_task_mm(task);
-- if (mm && mm != current->mm &&
-- !ptrace_may_access(task, mode)) {
-- mmput(mm);
-- mm = ERR_PTR(-EACCES);
-- }
-- mutex_unlock(&task->signal->cred_guard_mutex);
--
-- return mm;
--}
--
--struct mm_struct *mm_for_maps(struct task_struct *task)
--{
-- return mm_access(task, PTRACE_MODE_READ);
--}
--
- static int proc_pid_cmdline(struct task_struct *task, char * buffer)
- {
- int res = 0;
-@@ -229,6 +220,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
- if (!mm->arg_end)
- goto out_mm; /* Shh! No looking before we're done */
-
-+ if (gr_acl_handle_procpidmem(task))
-+ goto out_mm;
-+
- len = mm->arg_end - mm->arg_start;
-
- if (len > PAGE_SIZE)
-@@ -256,12 +250,28 @@ out:
- return res;
- }
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
-+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
-+ _mm->pax_flags & MF_PAX_SEGMEXEC))
-+#endif
-+
- static int proc_pid_auxv(struct task_struct *task, char *buffer)
- {
-- struct mm_struct *mm = mm_for_maps(task);
-+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
- int res = PTR_ERR(mm);
- if (mm && !IS_ERR(mm)) {
- unsigned int nwords = 0;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ /* allow if we're currently ptracing this task */
-+ if (PAX_RAND_FLAGS(mm) &&
-+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
-+ mmput(mm);
-+ return 0;
-+ }
-+#endif
-+
- do {
- nwords += 2;
- } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
-@@ -275,7 +285,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
- }
-
-
--#ifdef CONFIG_KALLSYMS
-+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- /*
- * Provides a wchan file via kallsyms in a proper one-value-per-file format.
- * Returns the resolved symbol. If that fails, simply return the address.
-@@ -314,7 +324,7 @@ static void unlock_trace(struct task_struct *task)
- mutex_unlock(&task->signal->cred_guard_mutex);
- }
-
--#ifdef CONFIG_STACKTRACE
-+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
-
- #define MAX_STACK_TRACE_DEPTH 64
-
-@@ -505,7 +515,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
- return count;
- }
-
--#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
- static int proc_pid_syscall(struct task_struct *task, char *buffer)
- {
- long nr;
-@@ -534,7 +544,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
- /************************************************************************/
-
- /* permission checks */
--static int proc_fd_access_allowed(struct inode *inode)
-+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
- {
- struct task_struct *task;
- int allowed = 0;
-@@ -544,7 +554,10 @@ static int proc_fd_access_allowed(struct inode *inode)
- */
- task = get_proc_task(inode);
- if (task) {
-- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
-+ if (log)
-+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
-+ else
-+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
- put_task_struct(task);
- }
- return allowed;
-@@ -761,7 +774,7 @@ static const struct file_operations proc_single_file_operations = {
- .release = single_release,
- };
-
--static int mem_open(struct inode* inode, struct file* file)
-+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
- {
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
- struct mm_struct *mm;
-@@ -769,7 +782,12 @@ static int mem_open(struct inode* inode, struct file* file)
- if (!task)
- return -ESRCH;
-
-- mm = mm_access(task, PTRACE_MODE_ATTACH);
-+ if (gr_acl_handle_procpidmem(task)) {
-+ put_task_struct(task);
-+ return -EPERM;
-+ }
-+
-+ mm = mm_access(task, mode);
- put_task_struct(task);
-
- if (IS_ERR(mm))
-@@ -782,11 +800,24 @@ static int mem_open(struct inode* inode, struct file* file)
- mmput(mm);
- }
-
-+ file->private_data = mm;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ file->f_version = current->exec_id;
-+#endif
-+
-+ return 0;
-+}
-+
-+static int mem_open(struct inode *inode, struct file *file)
-+{
-+ int ret;
-+ ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
-+
- /* OK to pass negative loff_t, we can catch out-of-range */
- file->f_mode |= FMODE_UNSIGNED_OFFSET;
-- file->private_data = mm;
-
-- return 0;
-+ return ret;
- }
-
- static ssize_t mem_rw(struct file *file, char __user *buf,
-@@ -797,6 +828,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
- ssize_t copied;
- char *page;
-
-+#ifdef CONFIG_GRKERNSEC
-+ if (write)
-+ return -EPERM;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (file->f_version != current->exec_id) {
-+ gr_log_badprocpid("mem");
-+ return 0;
-+ }
-+#endif
-+
- if (!mm)
- return 0;
-
-@@ -809,7 +851,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
- goto free;
-
- while (count > 0) {
-- int this_len = min_t(int, count, PAGE_SIZE);
-+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
-
- if (write && copy_from_user(page, buf, this_len)) {
- copied = -EFAULT;
-@@ -885,42 +927,49 @@ static const struct file_operations proc_mem_operations = {
- .release = mem_release,
- };
-
-+static int environ_open(struct inode *inode, struct file *file)
-+{
-+ return __mem_open(inode, file, PTRACE_MODE_READ);
-+}
-+
- static ssize_t environ_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
- {
-- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
- char *page;
- unsigned long src = *ppos;
-- int ret = -ESRCH;
-- struct mm_struct *mm;
-+ ssize_t ret = -ESRCH;
-+ struct mm_struct *mm = file->private_data;
-
-- if (!task)
-- goto out_no_task;
-+ if (!mm)
-+ return 0;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (file->f_version != current->exec_id) {
-+ gr_log_badprocpid("environ");
-+ return 0;
-+ }
-+#endif
-
-- ret = -ENOMEM;
- page = (char *)__get_free_page(GFP_TEMPORARY);
- if (!page)
-- goto out;
--
--
-- mm = mm_for_maps(task);
-- ret = PTR_ERR(mm);
-- if (!mm || IS_ERR(mm))
-- goto out_free;
-+ return -ENOMEM;
-
- ret = 0;
-+ if (!atomic_inc_not_zero(&mm->mm_users))
-+ goto free;
- while (count > 0) {
-- int this_len, retval, max_len;
-+ size_t this_len, max_len;
-+ int retval;
-+
-+ if (src >= (mm->env_end - mm->env_start))
-+ break;
-
- this_len = mm->env_end - (mm->env_start + src);
-
-- if (this_len <= 0)
-- break;
-+ max_len = min_t(size_t, PAGE_SIZE, count);
-+ this_len = min(max_len, this_len);
-
-- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-- this_len = (this_len > max_len) ? max_len : this_len;
--
-- retval = access_process_vm(task, (mm->env_start + src),
-+ retval = access_remote_vm(mm, (mm->env_start + src),
- page, this_len, 0);
-
- if (retval <= 0) {
-@@ -939,19 +988,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
- count -= retval;
- }
- *ppos = src;
--
- mmput(mm);
--out_free:
-+
-+free:
- free_page((unsigned long) page);
--out:
-- put_task_struct(task);
--out_no_task:
- return ret;
- }
-
- static const struct file_operations proc_environ_operations = {
-+ .open = environ_open,
- .read = environ_read,
- .llseek = generic_file_llseek,
-+ .release = mem_release,
- };
-
- static ssize_t oom_adjust_read(struct file *file, char __user *buf,
-@@ -1519,7 +1567,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
- path_put(&nd->path);
-
- /* Are we allowed to snoop on the tasks file descriptors? */
-- if (!proc_fd_access_allowed(inode))
-+ if (!proc_fd_access_allowed(inode,0))
- goto out;
-
- error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
-@@ -1558,8 +1606,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
- struct path path;
-
- /* Are we allowed to snoop on the tasks file descriptors? */
-- if (!proc_fd_access_allowed(inode))
-- goto out;
-+ /* logging this is needed for learning on chromium to work properly,
-+ but we don't want to flood the logs from 'ps' which does a readlink
-+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
-+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
-+ */
-+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
-+ if (!proc_fd_access_allowed(inode,0))
-+ goto out;
-+ } else {
-+ if (!proc_fd_access_allowed(inode,1))
-+ goto out;
-+ }
-
- error = PROC_I(inode)->op.proc_get_link(inode, &path);
- if (error)
-@@ -1624,7 +1682,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
- rcu_read_lock();
- cred = __task_cred(task);
- inode->i_uid = cred->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ inode->i_gid = grsec_proc_gid;
-+#else
- inode->i_gid = cred->egid;
-+#endif
- rcu_read_unlock();
- }
- security_task_to_inode(task, inode);
-@@ -1642,6 +1704,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
- struct inode *inode = dentry->d_inode;
- struct task_struct *task;
- const struct cred *cred;
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ const struct cred *tmpcred = current_cred();
-+#endif
-
- generic_fillattr(inode, stat);
-
-@@ -1649,13 +1714,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
- stat->uid = 0;
- stat->gid = 0;
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
-+
-+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
-+ rcu_read_unlock();
-+ return -ENOENT;
-+ }
-+
- if (task) {
-+ cred = __task_cred(task);
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ || in_group_p(grsec_proc_gid)
-+#endif
-+ ) {
-+#endif
- if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
-+#endif
- task_dumpable(task)) {
-- cred = __task_cred(task);
- stat->uid = cred->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ stat->gid = grsec_proc_gid;
-+#else
- stat->gid = cred->egid;
-+#endif
- }
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ } else {
-+ rcu_read_unlock();
-+ return -ENOENT;
-+ }
-+#endif
- }
- rcu_read_unlock();
- return 0;
-@@ -1692,11 +1785,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
-
- if (task) {
- if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
-+#endif
- task_dumpable(task)) {
- rcu_read_lock();
- cred = __task_cred(task);
- inode->i_uid = cred->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ inode->i_gid = grsec_proc_gid;
-+#else
- inode->i_gid = cred->egid;
-+#endif
- rcu_read_unlock();
- } else {
- inode->i_uid = 0;
-@@ -1814,7 +1916,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
- int fd = proc_fd(inode);
-
- if (task) {
-- files = get_files_struct(task);
-+ if (!gr_acl_handle_procpidmem(task))
-+ files = get_files_struct(task);
- put_task_struct(task);
- }
- if (files) {
-@@ -2082,11 +2185,21 @@ static const struct file_operations proc_fd_operations = {
- */
- static int proc_fd_permission(struct inode *inode, int mask)
- {
-+ struct task_struct *task;
- int rv = generic_permission(inode, mask);
-- if (rv == 0)
-- return 0;
-+
- if (task_pid(current) == proc_pid(inode))
- rv = 0;
-+
-+ task = get_proc_task(inode);
-+ if (task == NULL)
-+ return rv;
-+
-+ if (gr_acl_handle_procpidmem(task))
-+ rv = -EACCES;
-+
-+ put_task_struct(task);
-+
- return rv;
- }
-
-@@ -2196,6 +2309,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
- if (!task)
- goto out_no_task;
-
-+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+ goto out;
-+
- /*
- * Yes, it does not scale. And it should not. Don't add
- * new entries into /proc/<tgid>/ without very good reasons.
-@@ -2240,6 +2356,9 @@ static int proc_pident_readdir(struct file *filp,
- if (!task)
- goto out_no_task;
-
-+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+ goto out;
-+
- ret = 0;
- i = filp->f_pos;
- switch (i) {
-@@ -2510,7 +2629,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
- static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
- void *cookie)
- {
-- char *s = nd_get_link(nd);
-+ const char *s = nd_get_link(nd);
- if (!IS_ERR(s))
- __putname(s);
- }
-@@ -2708,7 +2827,7 @@ static const struct pid_entry tgid_base_stuff[] = {
- REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
- #endif
- REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
--#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
- INF("syscall", S_IRUGO, proc_pid_syscall),
- #endif
- INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -2733,10 +2852,10 @@ static const struct pid_entry tgid_base_stuff[] = {
- #ifdef CONFIG_SECURITY
- DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
- #endif
--#ifdef CONFIG_KALLSYMS
-+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- INF("wchan", S_IRUGO, proc_pid_wchan),
- #endif
--#ifdef CONFIG_STACKTRACE
-+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- ONE("stack", S_IRUGO, proc_pid_stack),
- #endif
- #ifdef CONFIG_SCHEDSTATS
-@@ -2770,6 +2889,9 @@ static const struct pid_entry tgid_base_stuff[] = {
- #ifdef CONFIG_HARDWALL
- INF("hardwall", S_IRUGO, proc_pid_hardwall),
- #endif
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
-+#endif
- };
-
- static int proc_tgid_base_readdir(struct file * filp,
-@@ -2895,7 +3017,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
- if (!inode)
- goto out;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ inode->i_gid = grsec_proc_gid;
-+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
-+#else
- inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
-+#endif
- inode->i_op = &proc_tgid_base_inode_operations;
- inode->i_fop = &proc_tgid_base_operations;
- inode->i_flags|=S_IMMUTABLE;
-@@ -2937,7 +3066,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
- if (!task)
- goto out;
-
-+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+ goto out_put_task;
-+
- result = proc_pid_instantiate(dir, dentry, task, NULL);
-+out_put_task:
- put_task_struct(task);
- out:
- return result;
-@@ -3002,6 +3135,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
- {
- unsigned int nr;
- struct task_struct *reaper;
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ const struct cred *tmpcred = current_cred();
-+ const struct cred *itercred;
-+#endif
-+ filldir_t __filldir = filldir;
- struct tgid_iter iter;
- struct pid_namespace *ns;
-
-@@ -3025,8 +3163,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
- for (iter = next_tgid(ns, iter);
- iter.task;
- iter.tgid += 1, iter = next_tgid(ns, iter)) {
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ rcu_read_lock();
-+ itercred = __task_cred(iter.task);
-+#endif
-+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ && !in_group_p(grsec_proc_gid)
-+#endif
-+ )
-+#endif
-+ )
-+ __filldir = &gr_fake_filldir;
-+ else
-+ __filldir = filldir;
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ rcu_read_unlock();
-+#endif
- filp->f_pos = iter.tgid + TGID_OFFSET;
-- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
-+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
- put_task_struct(iter.task);
- goto out;
- }
-@@ -3054,7 +3211,7 @@ static const struct pid_entry tid_base_stuff[] = {
- REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
- #endif
- REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
--#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
- INF("syscall", S_IRUGO, proc_pid_syscall),
- #endif
- INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -3078,10 +3235,10 @@ static const struct pid_entry tid_base_stuff[] = {
- #ifdef CONFIG_SECURITY
- DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
- #endif
--#ifdef CONFIG_KALLSYMS
-+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- INF("wchan", S_IRUGO, proc_pid_wchan),
- #endif
--#ifdef CONFIG_STACKTRACE
-+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- ONE("stack", S_IRUGO, proc_pid_stack),
- #endif
- #ifdef CONFIG_SCHEDSTATS
-diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
-index 82676e3..5f8518a 100644
---- a/fs/proc/cmdline.c
-+++ b/fs/proc/cmdline.c
-@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
-
- static int __init proc_cmdline_init(void)
- {
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
-+#else
- proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
-+#endif
- return 0;
- }
- module_init(proc_cmdline_init);
-diff --git a/fs/proc/devices.c b/fs/proc/devices.c
-index b143471..bb105e5 100644
---- a/fs/proc/devices.c
-+++ b/fs/proc/devices.c
-@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
-
- static int __init proc_devices_init(void)
- {
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
-+#else
- proc_create("devices", 0, NULL, &proc_devinfo_operations);
-+#endif
- return 0;
- }
- module_init(proc_devices_init);
-diff --git a/fs/proc/generic.c b/fs/proc/generic.c
-index 10090d9..91dc403 100644
---- a/fs/proc/generic.c
-+++ b/fs/proc/generic.c
-@@ -22,6 +22,7 @@
- #include <linux/bitops.h>
- #include <linux/spinlock.h>
- #include <linux/completion.h>
-+#include <linux/grsecurity.h>
- #include <asm/uaccess.h>
-
- #include "internal.h"
-@@ -451,6 +452,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
- return proc_lookup_de(PDE(dir), dir, dentry);
- }
-
-+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
-+ struct nameidata *nd)
-+{
-+ if (gr_proc_is_restricted())
-+ return ERR_PTR(-EACCES);
-+
-+ return proc_lookup_de(PDE(dir), dir, dentry);
-+}
-+
- /*
- * This returns non-zero if at EOF, so that the /proc
- * root directory can use this and check if it should
-@@ -532,6 +542,16 @@ int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
- return proc_readdir_de(PDE(inode), filp, dirent, filldir);
- }
-
-+int proc_readdir_restrict(struct file *filp, void *dirent, filldir_t filldir)
-+{
-+ struct inode *inode = filp->f_path.dentry->d_inode;
-+
-+ if (gr_proc_is_restricted())
-+ return -EACCES;
-+
-+ return proc_readdir_de(PDE(inode), filp, dirent, filldir);
-+}
-+
- /*
- * These are the generic /proc directory operations. They
- * use the in-memory "struct proc_dir_entry" tree to parse
-@@ -543,6 +563,12 @@ static const struct file_operations proc_dir_operations = {
- .readdir = proc_readdir,
- };
-
-+static const struct file_operations proc_dir_restricted_operations = {
-+ .llseek = generic_file_llseek,
-+ .read = generic_read_dir,
-+ .readdir = proc_readdir_restrict,
-+};
-+
- /*
- * proc directories can do almost nothing..
- */
-@@ -552,6 +578,12 @@ static const struct inode_operations proc_dir_inode_operations = {
- .setattr = proc_notify_change,
- };
-
-+static const struct inode_operations proc_dir_restricted_inode_operations = {
-+ .lookup = proc_lookup_restrict,
-+ .getattr = proc_getattr,
-+ .setattr = proc_notify_change,
-+};
-+
- static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
- {
- unsigned int i;
-@@ -564,8 +596,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
-
- if (S_ISDIR(dp->mode)) {
- if (dp->proc_iops == NULL) {
-- dp->proc_fops = &proc_dir_operations;
-- dp->proc_iops = &proc_dir_inode_operations;
-+ if (dp->restricted) {
-+ dp->proc_fops = &proc_dir_restricted_operations;
-+ dp->proc_iops = &proc_dir_restricted_inode_operations;
-+ } else {
-+ dp->proc_fops = &proc_dir_operations;
-+ dp->proc_iops = &proc_dir_inode_operations;
-+ }
- }
- dir->nlink++;
- } else if (S_ISLNK(dp->mode)) {
-@@ -675,6 +712,23 @@ struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
- }
- EXPORT_SYMBOL(proc_mkdir_mode);
-
-+struct proc_dir_entry *proc_mkdir_mode_restrict(const char *name, mode_t mode,
-+ struct proc_dir_entry *parent)
-+{
-+ struct proc_dir_entry *ent;
-+
-+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
-+ if (ent) {
-+ ent->restricted = 1;
-+ if (proc_register(parent, ent) < 0) {
-+ kfree(ent);
-+ ent = NULL;
-+ }
-+ }
-+ return ent;
-+}
-+EXPORT_SYMBOL(proc_mkdir_mode_restrict);
-+
- struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
- struct proc_dir_entry *parent)
- {
-@@ -683,6 +737,7 @@ struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
- ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
- if (ent) {
- ent->data = net;
-+ ent->restricted = 1;
- if (proc_register(parent, ent) < 0) {
- kfree(ent);
- ent = NULL;
-@@ -699,6 +754,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
- }
- EXPORT_SYMBOL(proc_mkdir);
-
-+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
-+ struct proc_dir_entry *parent)
-+{
-+ return proc_mkdir_mode_restrict(name, S_IRUGO | S_IXUGO, parent);
-+}
-+EXPORT_SYMBOL(proc_mkdir_restrict);
-+
- struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
- struct proc_dir_entry *parent)
- {
-diff --git a/fs/proc/inode.c b/fs/proc/inode.c
-index 00f08b3..2f14f30 100644
---- a/fs/proc/inode.c
-+++ b/fs/proc/inode.c
-@@ -18,12 +18,18 @@
- #include <linux/module.h>
- #include <linux/sysctl.h>
- #include <linux/slab.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/system.h>
- #include <asm/uaccess.h>
-
- #include "internal.h"
-
-+#ifdef CONFIG_PROC_SYSCTL
-+extern const struct inode_operations proc_sys_inode_operations;
-+extern const struct inode_operations proc_sys_dir_operations;
-+#endif
-+
- static void proc_evict_inode(struct inode *inode)
- {
- struct proc_dir_entry *de;
-@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
- ns_ops = PROC_I(inode)->ns_ops;
- if (ns_ops && ns_ops->put)
- ns_ops->put(PROC_I(inode)->ns);
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+ if (inode->i_op == &proc_sys_inode_operations ||
-+ inode->i_op == &proc_sys_dir_operations)
-+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
-+#endif
-+
- }
-
- static struct kmem_cache * proc_inode_cachep;
-@@ -438,7 +451,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
- if (de->mode) {
- inode->i_mode = de->mode;
- inode->i_uid = de->uid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ inode->i_gid = grsec_proc_gid;
-+#else
- inode->i_gid = de->gid;
-+#endif
- }
- if (de->size)
- inode->i_size = de->size;
-diff --git a/fs/proc/internal.h b/fs/proc/internal.h
-index 7838e5c..9efa574 100644
---- a/fs/proc/internal.h
-+++ b/fs/proc/internal.h
-@@ -28,8 +28,6 @@ struct vmalloc_info {
- unsigned long largest_chunk;
- };
-
--extern struct mm_struct *mm_for_maps(struct task_struct *);
--
- #ifdef CONFIG_MMU
- #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
- extern void get_vmalloc_info(struct vmalloc_info *vmi);
-@@ -51,6 +49,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task);
- extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task);
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
-+#endif
- extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
-
- extern const struct file_operations proc_maps_operations;
-@@ -126,7 +127,9 @@ struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
- * of the /proc/<pid> subdirectories.
- */
- int proc_readdir(struct file *, void *, filldir_t);
-+int proc_readdir_restrict(struct file *, void *, filldir_t);
- struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
-+struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, struct nameidata *);
-
-
-
-diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
-index d245cb2..bf46f45 100644
---- a/fs/proc/kcore.c
-+++ b/fs/proc/kcore.c
-@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
- * the addresses in the elf_phdr on our list.
- */
- start = kc_offset_to_vaddr(*fpos - elf_buflen);
-- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
-+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
-+ if (tsz > buflen)
- tsz = buflen;
--
-+
- while (buflen) {
- struct kcore_list *m;
-
-@@ -510,19 +511,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
- } else {
- if (kern_addr_valid(start)) {
- unsigned long n;
-+ char *elf_buf;
-+ mm_segment_t oldfs;
-
-- n = copy_to_user(buffer, (char *)start, tsz);
-- /*
-- * We cannot distingush between fault on source
-- * and fault on destination. When this happens
-- * we clear too and hope it will trigger the
-- * EFAULT again.
-- */
-- if (n) {
-- if (clear_user(buffer + tsz - n,
-- n))
-- return -EFAULT;
-- }
-+ elf_buf = kzalloc(tsz, GFP_KERNEL);
-+ if (!elf_buf)
-+ return -ENOMEM;
-+ oldfs = get_fs();
-+ set_fs(KERNEL_DS);
-+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
-+ set_fs(oldfs);
-+ n = copy_to_user(buffer, elf_buf, tsz);
-+ kfree(elf_buf);
-+ if (n)
-+ return -EFAULT;
- } else {
- if (clear_user(buffer, tsz))
- return -EFAULT;
-@@ -542,6 +544,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
-
- static int open_kcore(struct inode *inode, struct file *filp)
- {
-+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
-+ return -EPERM;
-+#endif
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- if (kcore_need_update)
-diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
-index 80e4645..53e5fcf 100644
---- a/fs/proc/meminfo.c
-+++ b/fs/proc/meminfo.c
-@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
- vmi.used >> 10,
- vmi.largest_chunk >> 10
- #ifdef CONFIG_MEMORY_FAILURE
-- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
-+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
- #endif
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
-index b1822dd..df622cb 100644
---- a/fs/proc/nommu.c
-+++ b/fs/proc/nommu.c
-@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
- if (len < 1)
- len = 1;
- seq_printf(m, "%*c", len, ' ');
-- seq_path(m, &file->f_path, "");
-+ seq_path(m, &file->f_path, "\n\\");
- }
-
- seq_putc(m, '\n');
-diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
-index f738024..c2f9e5e 100644
---- a/fs/proc/proc_net.c
-+++ b/fs/proc/proc_net.c
-@@ -23,15 +23,34 @@
- #include <linux/nsproxy.h>
- #include <net/net_namespace.h>
- #include <linux/seq_file.h>
-+#include <linux/grsecurity.h>
-
- #include "internal.h"
-
-+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-+static struct seq_operations *ipv6_seq_ops_addr;
-+
-+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
-+{
-+ ipv6_seq_ops_addr = addr;
-+}
-+
-+void unregister_ipv6_seq_ops_addr(void)
-+{
-+ ipv6_seq_ops_addr = NULL;
-+}
-+
-+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
-+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
-+#endif
-
- static struct net *get_proc_net(const struct inode *inode)
- {
- return maybe_get_net(PDE_NET(PDE(inode)));
- }
-
-+extern const struct seq_operations dev_seq_ops;
-+
- int seq_open_net(struct inode *ino, struct file *f,
- const struct seq_operations *ops, int size)
- {
-@@ -40,6 +59,14 @@ int seq_open_net(struct inode *ino, struct file *f,
-
- BUG_ON(size < sizeof(*p));
-
-+ /* only permit access to /proc/net/dev */
-+ if (
-+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-+ ops != ipv6_seq_ops_addr &&
-+#endif
-+ ops != &dev_seq_ops && gr_proc_is_restricted())
-+ return -EACCES;
-+
- net = get_proc_net(ino);
- if (net == NULL)
- return -ENXIO;
-@@ -62,6 +89,9 @@ int single_open_net(struct inode *inode, struct file *file,
- int err;
- struct net *net;
-
-+ if (gr_proc_is_restricted())
-+ return -EACCES;
-+
- err = -ENXIO;
- net = get_proc_net(inode);
- if (net == NULL)
-@@ -228,7 +258,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
- kfree(net->proc_net);
- }
-
--static struct pernet_operations __net_initdata proc_net_ns_ops = {
-+static struct pernet_operations __net_initconst proc_net_ns_ops = {
- .init = proc_net_ns_init,
- .exit = proc_net_ns_exit,
- };
-diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
-index 0be1aa4..21298e5 100644
---- a/fs/proc/proc_sysctl.c
-+++ b/fs/proc/proc_sysctl.c
-@@ -7,13 +7,16 @@
- #include <linux/proc_fs.h>
- #include <linux/security.h>
- #include <linux/namei.h>
-+#include <linux/nsproxy.h>
- #include "internal.h"
-
-+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
-+
- static const struct dentry_operations proc_sys_dentry_operations;
- static const struct file_operations proc_sys_file_operations;
--static const struct inode_operations proc_sys_inode_operations;
-+const struct inode_operations proc_sys_inode_operations;
- static const struct file_operations proc_sys_dir_file_operations;
--static const struct inode_operations proc_sys_dir_operations;
-+const struct inode_operations proc_sys_dir_operations;
-
- void proc_sys_poll_notify(struct ctl_table_poll *poll)
- {
-@@ -128,8 +131,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
-
- err = NULL;
- d_set_d_op(dentry, &proc_sys_dentry_operations);
-+
-+ gr_handle_proc_create(dentry, inode);
-+
- d_add(dentry, inode);
-
-+ if (gr_handle_sysctl(p, MAY_EXEC))
-+ err = ERR_PTR(-ENOENT);
-+
- out:
- if (h)
- sysctl_head_finish(h);
-@@ -162,6 +171,17 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
- if (!table->proc_handler)
- goto out;
-
-+#ifdef CONFIG_GRKERNSEC
-+ error = -EPERM;
-+ if (write) {
-+ if (current->nsproxy->net_ns != table->extra2) {
-+ if (!capable(CAP_SYS_ADMIN))
-+ goto out;
-+ } else if (!nsown_capable(CAP_NET_ADMIN))
-+ goto out;
-+ }
-+#endif
-+
- /* careful: calling conventions are nasty here */
- res = count;
- error = table->proc_handler(table, write, buf, &res, ppos);
-@@ -259,6 +279,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
- return -ENOMEM;
- } else {
- d_set_d_op(child, &proc_sys_dentry_operations);
-+
-+ gr_handle_proc_create(child, inode);
-+
- d_add(child, inode);
- }
- } else {
-@@ -287,6 +310,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
- if (*pos < file->f_pos)
- continue;
-
-+ if (gr_handle_sysctl(table, 0))
-+ continue;
-+
- res = proc_sys_fill_cache(file, dirent, filldir, head, table);
- if (res)
- return res;
-@@ -412,6 +438,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
- if (IS_ERR(head))
- return PTR_ERR(head);
-
-+ if (table && gr_handle_sysctl(table, MAY_EXEC))
-+ return -ENOENT;
-+
- generic_fillattr(inode, stat);
- if (table)
- stat->mode = (stat->mode & S_IFMT) | table->mode;
-@@ -434,13 +463,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
- .llseek = generic_file_llseek,
- };
-
--static const struct inode_operations proc_sys_inode_operations = {
-+const struct inode_operations proc_sys_inode_operations = {
- .permission = proc_sys_permission,
- .setattr = proc_sys_setattr,
- .getattr = proc_sys_getattr,
- };
-
--static const struct inode_operations proc_sys_dir_operations = {
-+const struct inode_operations proc_sys_dir_operations = {
- .lookup = proc_sys_lookup,
- .permission = proc_sys_permission,
- .setattr = proc_sys_setattr,
-diff --git a/fs/proc/root.c b/fs/proc/root.c
-index 03102d9..4ae347e 100644
---- a/fs/proc/root.c
-+++ b/fs/proc/root.c
-@@ -121,7 +121,15 @@ void __init proc_root_init(void)
- #ifdef CONFIG_PROC_DEVICETREE
- proc_device_tree_init();
- #endif
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
-+#endif
-+#else
- proc_mkdir("bus", NULL);
-+#endif
- proc_sys_init();
- }
-
-diff --git a/fs/proc/stat.c b/fs/proc/stat.c
-index 81a48d1..b2275ba 100644
---- a/fs/proc/stat.c
-+++ b/fs/proc/stat.c
-@@ -11,6 +11,7 @@
- #include <linux/irqnr.h>
- #include <asm/cputime.h>
- #include <linux/tick.h>
-+#include <linux/grsecurity.h>
-
- #ifndef arch_irq_stat_cpu
- #define arch_irq_stat_cpu(cpu) 0
-@@ -67,6 +68,18 @@ static int show_stat(struct seq_file *p, void *v)
- u64 sum_softirq = 0;
- unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
- struct timespec boottime;
-+ int unrestricted = 1;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ if (current_uid()
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ && !in_group_p(grsec_proc_gid)
-+#endif
-+ )
-+ unrestricted = 0;
-+#endif
-+#endif
-
- user = nice = system = idle = iowait =
- irq = softirq = steal = cputime64_zero;
-@@ -79,24 +92,27 @@ static int show_stat(struct seq_file *p, void *v)
- nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
- system = cputime64_add(system, kstat_cpu(i).cpustat.system);
- idle = cputime64_add(idle, get_idle_time(i));
-- iowait = cputime64_add(iowait, get_iowait_time(i));
-- irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
-- softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
-- steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
-- guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-- guest_nice = cputime64_add(guest_nice,
-- kstat_cpu(i).cpustat.guest_nice);
-- sum += kstat_cpu_irqs_sum(i);
-- sum += arch_irq_stat_cpu(i);
-+ if (unrestricted) {
-+ iowait = cputime64_add(iowait, get_iowait_time(i));
-+ irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
-+ softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
-+ steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
-+ guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-+ guest_nice = cputime64_add(guest_nice,
-+ kstat_cpu(i).cpustat.guest_nice);
-+ sum += kstat_cpu_irqs_sum(i);
-+ sum += arch_irq_stat_cpu(i);
-
-- for (j = 0; j < NR_SOFTIRQS; j++) {
-- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
-+ for (j = 0; j < NR_SOFTIRQS; j++) {
-+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
-
-- per_softirq_sums[j] += softirq_stat;
-- sum_softirq += softirq_stat;
-+ per_softirq_sums[j] += softirq_stat;
-+ sum_softirq += softirq_stat;
-+ }
- }
- }
-- sum += arch_irq_stat();
-+ if (unrestricted)
-+ sum += arch_irq_stat();
-
- seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu "
- "%llu\n",
-@@ -116,12 +132,14 @@ static int show_stat(struct seq_file *p, void *v)
- nice = kstat_cpu(i).cpustat.nice;
- system = kstat_cpu(i).cpustat.system;
- idle = get_idle_time(i);
-- iowait = get_iowait_time(i);
-- irq = kstat_cpu(i).cpustat.irq;
-- softirq = kstat_cpu(i).cpustat.softirq;
-- steal = kstat_cpu(i).cpustat.steal;
-- guest = kstat_cpu(i).cpustat.guest;
-- guest_nice = kstat_cpu(i).cpustat.guest_nice;
-+ if (unrestricted) {
-+ iowait = get_iowait_time(i);
-+ irq = kstat_cpu(i).cpustat.irq;
-+ softirq = kstat_cpu(i).cpustat.softirq;
-+ steal = kstat_cpu(i).cpustat.steal;
-+ guest = kstat_cpu(i).cpustat.guest;
-+ guest_nice = kstat_cpu(i).cpustat.guest_nice;
-+ }
- seq_printf(p,
- "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
- "%llu\n",
-diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index d1bd6a9..872017c 100644
---- a/fs/proc/task_mmu.c
-+++ b/fs/proc/task_mmu.c
-@@ -11,12 +11,19 @@
- #include <linux/rmap.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/elf.h>
- #include <asm/uaccess.h>
- #include <asm/tlbflush.h>
- #include "internal.h"
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
-+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
-+ _mm->pax_flags & MF_PAX_SEGMEXEC))
-+#endif
-+
- void task_mem(struct seq_file *m, struct mm_struct *mm)
- {
- unsigned long data, text, lib, swap;
-@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
- "VmExe:\t%8lu kB\n"
- "VmLib:\t%8lu kB\n"
- "VmPTE:\t%8lu kB\n"
-- "VmSwap:\t%8lu kB\n",
-- hiwater_vm << (PAGE_SHIFT-10),
-+ "VmSwap:\t%8lu kB\n"
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
-+#endif
-+
-+ ,hiwater_vm << (PAGE_SHIFT-10),
- (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
- mm->locked_vm << (PAGE_SHIFT-10),
- mm->pinned_vm << (PAGE_SHIFT-10),
-@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
- data << (PAGE_SHIFT-10),
- mm->stack_vm << (PAGE_SHIFT-10), text, lib,
- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
-- swap << (PAGE_SHIFT-10));
-+ swap << (PAGE_SHIFT-10)
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
-+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
-+#else
-+ , mm->context.user_cs_base
-+ , mm->context.user_cs_limit
-+#endif
-+#endif
-+
-+ );
- }
-
- unsigned long task_vsize(struct mm_struct *mm)
-@@ -125,7 +149,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
- if (!priv->task)
- return ERR_PTR(-ESRCH);
-
-- mm = mm_for_maps(priv->task);
-+ mm = mm_access(priv->task, PTRACE_MODE_READ);
- if (!mm || IS_ERR(mm))
- return mm;
- down_read(&mm->mmap_sem);
-@@ -227,13 +251,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
- pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
- }
-
-- /* We don't show the stack guard page in /proc/maps */
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
-+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
-+#else
- start = vma->vm_start;
-- if (stack_guard_page_start(vma, start))
-- start += PAGE_SIZE;
- end = vma->vm_end;
-- if (stack_guard_page_end(vma, end))
-- end -= PAGE_SIZE;
-+#endif
-
- seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
- start,
-@@ -242,7 +266,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
- flags & VM_WRITE ? 'w' : '-',
- flags & VM_EXEC ? 'x' : '-',
- flags & VM_MAYSHARE ? 's' : 'p',
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
-+#else
- pgoff,
-+#endif
- MAJOR(dev), MINOR(dev), ino, &len);
-
- /*
-@@ -251,7 +279,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
- */
- if (file) {
- pad_len_spaces(m, len);
-- seq_path(m, &file->f_path, "\n");
-+ seq_path(m, &file->f_path, "\n\\");
- } else {
- const char *name = arch_vma_name(vma);
- if (!name) {
-@@ -259,8 +287,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
- if (vma->vm_start <= mm->brk &&
- vma->vm_end >= mm->start_brk) {
- name = "[heap]";
-- } else if (vma->vm_start <= mm->start_stack &&
-- vma->vm_end >= mm->start_stack) {
-+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
-+ (vma->vm_start <= mm->start_stack &&
-+ vma->vm_end >= mm->start_stack)) {
- name = "[stack]";
- }
- } else {
-@@ -281,6 +310,13 @@ static int show_map(struct seq_file *m, void *v)
- struct proc_maps_private *priv = m->private;
- struct task_struct *task = priv->task;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (current->exec_id != m->exec_id) {
-+ gr_log_badprocpid("maps");
-+ return 0;
-+ }
-+#endif
-+
- show_map_vma(m, vma);
-
- if (m->count < m->size) /* vma is copied successfully */
-@@ -437,12 +473,23 @@ static int show_smap(struct seq_file *m, void *v)
- .private = &mss,
- };
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (current->exec_id != m->exec_id) {
-+ gr_log_badprocpid("smaps");
-+ return 0;
-+ }
-+#endif
- memset(&mss, 0, sizeof mss);
-- mss.vma = vma;
-- /* mmap_sem is held in m_start */
-- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
--
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
-+#endif
-+ mss.vma = vma;
-+ /* mmap_sem is held in m_start */
-+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ }
-+#endif
- show_map_vma(m, vma);
-
- seq_printf(m,
-@@ -460,7 +507,11 @@ static int show_smap(struct seq_file *m, void *v)
- "KernelPageSize: %8lu kB\n"
- "MMUPageSize: %8lu kB\n"
- "Locked: %8lu kB\n",
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
-+#else
- (vma->vm_end - vma->vm_start) >> 10,
-+#endif
- mss.resident >> 10,
- (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
- mss.shared_clean >> 10,
-@@ -798,7 +849,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
- if (!pm.buffer)
- goto out_task;
-
-- mm = mm_for_maps(task);
-+ mm = mm_access(task, PTRACE_MODE_READ);
- ret = PTR_ERR(mm);
- if (!mm || IS_ERR(mm))
- goto out_free;
-@@ -1034,6 +1085,13 @@ static int show_numa_map(struct seq_file *m, void *v)
- int n;
- char buffer[50];
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (current->exec_id != m->exec_id) {
-+ gr_log_badprocpid("numa_maps");
-+ return 0;
-+ }
-+#endif
-+
- if (!mm)
- return 0;
-
-@@ -1051,11 +1109,15 @@ static int show_numa_map(struct seq_file *m, void *v)
- mpol_to_str(buffer, sizeof(buffer), pol, 0);
- mpol_cond_put(pol);
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
-+#else
- seq_printf(m, "%08lx %s", vma->vm_start, buffer);
-+#endif
-
- if (file) {
- seq_printf(m, " file=");
-- seq_path(m, &file->f_path, "\n\t= ");
-+ seq_path(m, &file->f_path, "\n\t\\= ");
- } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
- seq_printf(m, " heap");
- } else if (vma->vm_start <= mm->start_stack &&
-diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
-index 980de54..78b2faa 100644
---- a/fs/proc/task_nommu.c
-+++ b/fs/proc/task_nommu.c
-@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
- else
- bytes += kobjsize(mm);
-
-- if (current->fs && current->fs->users > 1)
-+ if (current->fs && atomic_read(&current->fs->users) > 1)
- sbytes += kobjsize(current->fs);
- else
- bytes += kobjsize(current->fs);
-@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
-
- if (file) {
- pad_len_spaces(m, len);
-- seq_path(m, &file->f_path, "");
-+ seq_path(m, &file->f_path, "\n\\");
- } else if (mm) {
- if (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack) {
-@@ -201,7 +201,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
- if (!priv->task)
- return ERR_PTR(-ESRCH);
-
-- mm = mm_for_maps(priv->task);
-+ mm = mm_access(priv->task, PTRACE_MODE_READ);
- if (!mm || IS_ERR(mm)) {
- put_task_struct(priv->task);
- priv->task = NULL;
-diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
-index b0f450a..8ba3e5d 100644
---- a/fs/proc/vmcore.c
-+++ b/fs/proc/vmcore.c
-@@ -97,9 +97,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
- nr_bytes = count;
-
- /* If pfn is not ram, return zeros for sparse dump files */
-- if (pfn_is_ram(pfn) == 0)
-- memset(buf, 0, nr_bytes);
-- else {
-+ if (pfn_is_ram(pfn) == 0) {
-+ if (userbuf) {
-+ if (clear_user((char __force_user *)buf, nr_bytes))
-+ return -EFAULT;
-+ } else
-+ memset(buf, 0, nr_bytes);
-+ } else {
- tmp = copy_oldmem_page(pfn, buf, nr_bytes,
- offset, userbuf);
- if (tmp < 0)
-@@ -184,7 +188,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
- tsz = nr_bytes;
-
- while (buflen) {
-- tmp = read_from_oldmem(buffer, tsz, &start, 1);
-+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
- if (tmp < 0)
- return tmp;
- buflen -= tsz;
-diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
-index 3bdd214..e570832 100644
---- a/fs/qnx4/inode.c
-+++ b/fs/qnx4/inode.c
-@@ -473,6 +473,7 @@ static struct file_system_type qnx4_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("qnx4");
-
- static int __init init_qnx4_fs(void)
- {
-diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
-index d67908b..d13f6a6 100644
---- a/fs/quota/netlink.c
-+++ b/fs/quota/netlink.c
-@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
- void quota_send_warning(short type, unsigned int id, dev_t dev,
- const char warntype)
- {
-- static atomic_t seq;
-+ static atomic_unchecked_t seq;
- struct sk_buff *skb;
- void *msg_head;
- int ret;
-@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
- "VFS: Not enough memory to send quota warning.\n");
- return;
- }
-- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
-+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
- &quota_genl_family, 0, QUOTA_NL_C_WARNING);
- if (!msg_head) {
- printk(KERN_ERR
-diff --git a/fs/read_write.c b/fs/read_write.c
-index 5ad4248..492b277 100644
---- a/fs/read_write.c
-+++ b/fs/read_write.c
-@@ -956,6 +956,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
- if (retval > 0) {
- add_rchar(current, retval);
- add_wchar(current, retval);
-+ fsnotify_access(in_file);
-+ fsnotify_modify(out_file);
- }
-
- inc_syscr(current);
-diff --git a/fs/readdir.c b/fs/readdir.c
-index 356f715..c918d38 100644
---- a/fs/readdir.c
-+++ b/fs/readdir.c
-@@ -17,6 +17,7 @@
- #include <linux/security.h>
- #include <linux/syscalls.h>
- #include <linux/unistd.h>
-+#include <linux/namei.h>
-
- #include <asm/uaccess.h>
-
-@@ -67,6 +68,7 @@ struct old_linux_dirent {
-
- struct readdir_callback {
- struct old_linux_dirent __user * dirent;
-+ struct file * file;
- int result;
- };
-
-@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
- buf->result = -EOVERFLOW;
- return -EOVERFLOW;
- }
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- buf->result++;
- dirent = buf->dirent;
- if (!access_ok(VERIFY_WRITE, dirent,
-@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
-
- buf.result = 0;
- buf.dirent = dirent;
-+ buf.file = file;
-
- error = vfs_readdir(file, fillonedir, &buf);
- if (buf.result)
-@@ -142,6 +149,7 @@ struct linux_dirent {
- struct getdents_callback {
- struct linux_dirent __user * current_dir;
- struct linux_dirent __user * previous;
-+ struct file * file;
- int count;
- int error;
- };
-@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
- buf->error = -EOVERFLOW;
- return -EOVERFLOW;
- }
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- dirent = buf->previous;
- if (dirent) {
- if (__put_user(offset, &dirent->d_off))
-@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
- buf.previous = NULL;
- buf.count = count;
- buf.error = 0;
-+ buf.file = file;
-
- error = vfs_readdir(file, filldir, &buf);
- if (error >= 0)
-@@ -229,6 +242,7 @@ out:
- struct getdents_callback64 {
- struct linux_dirent64 __user * current_dir;
- struct linux_dirent64 __user * previous;
-+ struct file *file;
- int count;
- int error;
- };
-@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
-+
-+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+ return 0;
-+
- dirent = buf->previous;
- if (dirent) {
- if (__put_user(offset, &dirent->d_off))
-@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
-
- buf.current_dir = dirent;
- buf.previous = NULL;
-+ buf.file = file;
- buf.count = count;
- buf.error = 0;
-
-@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
- error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
-- typeof(lastdirent->d_off) d_off = file->f_pos;
-+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
- if (__put_user(d_off, &lastdirent->d_off))
- error = -EFAULT;
- else
-diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
-index 8048eea..ad851ae 100644
---- a/fs/reiserfs/dir.c
-+++ b/fs/reiserfs/dir.c
-@@ -206,6 +206,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
- next_pos = cur_pos + 1;
-
- if (item_moved(&tmp_ih, &path_to_entry)) {
-+ set_cpu_key_k_offset(&pos_key,
-+ next_pos);
- goto research;
- }
- } /* for */
-diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
-index 60c0804..d814f98 100644
---- a/fs/reiserfs/do_balan.c
-+++ b/fs/reiserfs/do_balan.c
-@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
- return;
- }
-
-- atomic_inc(&(fs_generation(tb->tb_sb)));
-+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
- do_balance_starts(tb);
-
- /* balance leaf returns 0 except if combining L R and S into
-diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
-index fcb07e5..973ddfc 100644
---- a/fs/reiserfs/inode.c
-+++ b/fs/reiserfs/inode.c
-@@ -1816,11 +1816,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
- TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
- memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
- args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
-- if (insert_inode_locked4(inode, args.objectid,
-- reiserfs_find_actor, &args) < 0) {
-+
-+ reiserfs_write_unlock(inode->i_sb);
-+ err = insert_inode_locked4(inode, args.objectid,
-+ reiserfs_find_actor, &args);
-+ reiserfs_write_lock(inode->i_sb);
-+ if (err) {
- err = -EINVAL;
- goto out_bad_inode;
- }
-+
- if (old_format_only(sb))
- /* not a perfect generation count, as object ids can be reused, but
- ** this is as good as reiserfs can do right now.
-diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
-index 72cb1cc..a7d36d3 100644
---- a/fs/reiserfs/item_ops.c
-+++ b/fs/reiserfs/item_ops.c
-@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
- }
-
- static struct item_operations errcatch_ops = {
-- errcatch_bytes_number,
-- errcatch_decrement_key,
-- errcatch_is_left_mergeable,
-- errcatch_print_item,
-- errcatch_check_item,
-+ .bytes_number = errcatch_bytes_number,
-+ .decrement_key = errcatch_decrement_key,
-+ .is_left_mergeable = errcatch_is_left_mergeable,
-+ .print_item = errcatch_print_item,
-+ .check_item = errcatch_check_item,
-
-- errcatch_create_vi,
-- errcatch_check_left,
-- errcatch_check_right,
-- errcatch_part_size,
-- errcatch_unit_num,
-- errcatch_print_vi
-+ .create_vi = errcatch_create_vi,
-+ .check_left = errcatch_check_left,
-+ .check_right = errcatch_check_right,
-+ .part_size = errcatch_part_size,
-+ .unit_num = errcatch_unit_num,
-+ .print_vi = errcatch_print_vi
- };
-
- //////////////////////////////////////////////////////////////////////////////
-diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
-index 7a99811..a7c96c4 100644
---- a/fs/reiserfs/procfs.c
-+++ b/fs/reiserfs/procfs.c
-@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
- "SMALL_TAILS " : "NO_TAILS ",
- replay_only(sb) ? "REPLAY_ONLY " : "",
- convert_reiserfs(sb) ? "CONV " : "",
-- atomic_read(&r->s_generation_counter),
-+ atomic_read_unchecked(&r->s_generation_counter),
- SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
- SF(s_do_balance), SF(s_unneeded_left_neighbor),
- SF(s_good_search_by_key_reada), SF(s_bmaps),
-diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
-index 569498a..636bb35 100644
---- a/fs/reiserfs/super.c
-+++ b/fs/reiserfs/super.c
-@@ -1664,6 +1664,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
- REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
- REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
- REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
-+#ifdef CONFIG_REISERFS_FS_XATTR
-+ /* turn on user xattrs by default */
-+ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
-+#endif
- /* no preallocation minimum, be smart in
- reiserfs_file_write instead */
- REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
-@@ -2295,6 +2299,7 @@ struct file_system_type reiserfs_fs_type = {
- .kill_sb = reiserfs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("reiserfs");
-
- MODULE_DESCRIPTION("ReiserFS journaled filesystem");
- MODULE_AUTHOR("Hans Reiser <reiser@namesys.com>");
-diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
-index 04eecc4..33f74d0 100644
---- a/fs/reiserfs/xattr.c
-+++ b/fs/reiserfs/xattr.c
-@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
- static int chown_one_xattr(struct dentry *dentry, void *data)
- {
- struct iattr *attrs = data;
-- return reiserfs_setattr(dentry, attrs);
-+ int ia_valid = attrs->ia_valid;
-+ int err;
-+
-+ /*
-+ * We only want the ownership bits. Otherwise, we'll do
-+ * things like change a directory to a regular file if
-+ * ATTR_MODE is set.
-+ */
-+ attrs->ia_valid &= (ATTR_UID|ATTR_GID);
-+ err = reiserfs_setattr(dentry, attrs);
-+ attrs->ia_valid = ia_valid;
-+
-+ return err;
- }
-
- /* No i_mutex, but the inode is unconnected. */
-diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
-index 6da0396..fc338f3 100644
---- a/fs/reiserfs/xattr_acl.c
-+++ b/fs/reiserfs/xattr_acl.c
-@@ -429,6 +429,9 @@ int reiserfs_acl_chmod(struct inode *inode)
- int depth;
- int error;
-
-+ if (IS_PRIVATE(inode))
-+ return 0;
-+
- if (S_ISLNK(inode->i_mode))
- return -EOPNOTSUPP;
-
-diff --git a/fs/romfs/super.c b/fs/romfs/super.c
-index 8b4089f..2575128 100644
---- a/fs/romfs/super.c
-+++ b/fs/romfs/super.c
-@@ -602,6 +602,7 @@ static struct file_system_type romfs_fs_type = {
- .kill_sb = romfs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("romfs");
-
- /*
- * inode storage initialiser
-diff --git a/fs/select.c b/fs/select.c
-index d33418f..2a5345e 100644
---- a/fs/select.c
-+++ b/fs/select.c
-@@ -20,6 +20,7 @@
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/poll.h>
-+#include <linux/security.h>
- #include <linux/personality.h> /* for STICKY_TIMEOUTS */
- #include <linux/file.h>
- #include <linux/fdtable.h>
-@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
- struct poll_list *walk = head;
- unsigned long todo = nfds;
-
-+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
- if (nfds > rlimit(RLIMIT_NOFILE))
- return -EINVAL;
-
-diff --git a/fs/seq_file.c b/fs/seq_file.c
-index dba43c3..cb3437c 100644
---- a/fs/seq_file.c
-+++ b/fs/seq_file.c
-@@ -9,6 +9,8 @@
- #include <linux/module.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/uaccess.h>
- #include <asm/page.h>
-@@ -40,6 +42,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
- memset(p, 0, sizeof(*p));
- mutex_init(&p->lock);
- p->op = op;
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ p->exec_id = current->exec_id;
-+#endif
-
- /*
- * Wrappers around seq_open(e.g. swaps_open) need to be
-@@ -62,6 +67,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
- }
- EXPORT_SYMBOL(seq_open);
-
-+
-+int seq_open_restrict(struct file *file, const struct seq_operations *op)
-+{
-+ if (gr_proc_is_restricted())
-+ return -EACCES;
-+
-+ return seq_open(file, op);
-+}
-+EXPORT_SYMBOL(seq_open_restrict);
-+
- static int traverse(struct seq_file *m, loff_t offset)
- {
- loff_t pos = 0, index;
-@@ -76,7 +91,11 @@ static int traverse(struct seq_file *m, loff_t offset)
- return 0;
- }
- if (!m->buf) {
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
-+#else
- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
-+#endif
- if (!m->buf)
- return -ENOMEM;
- }
-@@ -116,7 +135,11 @@ static int traverse(struct seq_file *m, loff_t offset)
- Eoverflow:
- m->op->stop(m, p);
- kfree(m->buf);
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
-+#else
- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
-+#endif
- return !m->buf ? -ENOMEM : -EAGAIN;
- }
-
-@@ -132,7 +155,7 @@ Eoverflow:
- ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
- {
- struct seq_file *m = file->private_data;
-- size_t copied = 0;
-+ ssize_t copied = 0;
- loff_t pos;
- size_t n;
- void *p;
-@@ -169,7 +192,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
- m->version = file->f_version;
- /* grab buffer if we didn't have one */
- if (!m->buf) {
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
-+#else
- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
-+#endif
- if (!m->buf)
- goto Enomem;
- }
-@@ -210,7 +237,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
- goto Fill;
- m->op->stop(m, p);
- kfree(m->buf);
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
-+#else
- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
-+#endif
- if (!m->buf)
- goto Enomem;
- m->count = 0;
-@@ -549,7 +580,7 @@ static void single_stop(struct seq_file *p, void *v)
- int single_open(struct file *file, int (*show)(struct seq_file *, void *),
- void *data)
- {
-- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
-+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
- int res = -ENOMEM;
-
- if (op) {
-@@ -567,6 +598,17 @@ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
- }
- EXPORT_SYMBOL(single_open);
-
-+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
-+ void *data)
-+{
-+ if (gr_proc_is_restricted())
-+ return -EACCES;
-+
-+ return single_open(file, show, data);
-+}
-+EXPORT_SYMBOL(single_open_restrict);
-+
-+
- int single_release(struct inode *inode, struct file *file)
- {
- const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
-diff --git a/fs/splice.c b/fs/splice.c
-index 34c2b2b..cb9a1ed 100644
---- a/fs/splice.c
-+++ b/fs/splice.c
-@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
- pipe_lock(pipe);
-
- for (;;) {
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
-@@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
- do_wakeup = 0;
- }
-
-- pipe->waiting_writers++;
-+ atomic_inc(&pipe->waiting_writers);
- pipe_wait(pipe);
-- pipe->waiting_writers--;
-+ atomic_dec(&pipe->waiting_writers);
- }
-
- pipe_unlock(pipe);
-@@ -582,7 +582,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
- old_fs = get_fs();
- set_fs(get_ds());
- /* The cast to a user pointer is valid due to the set_fs() */
-- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
-+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
- set_fs(old_fs);
-
- return res;
-@@ -597,7 +597,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
- old_fs = get_fs();
- set_fs(get_ds());
- /* The cast to a user pointer is valid due to the set_fs() */
-- res = vfs_write(file, (const char __user *)buf, count, &pos);
-+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
- set_fs(old_fs);
-
- return res;
-@@ -649,7 +649,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
- goto err;
-
- this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
-- vec[i].iov_base = (void __user *) page_address(page);
-+ vec[i].iov_base = (void __force_user *) page_address(page);
- vec[i].iov_len = this_len;
- spd.pages[i] = page;
- spd.nr_pages++;
-@@ -873,10 +873,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
- int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
- {
- while (!pipe->nrbufs) {
-- if (!pipe->writers)
-+ if (!atomic_read(&pipe->writers))
- return 0;
-
-- if (!pipe->waiting_writers && sd->num_spliced)
-+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
- return 0;
-
- if (sd->flags & SPLICE_F_NONBLOCK)
-@@ -1187,7 +1187,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
- long ret, bytes;
- umode_t i_mode;
- size_t len;
-- int i, flags;
-+ int i, flags, more;
-
- /*
- * We require the input being a regular file, as we don't want to
-@@ -1213,7 +1213,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
- * out of the pipe right after the splice_to_pipe(). So set
- * PIPE_READERS appropriately.
- */
-- pipe->readers = 1;
-+ atomic_set(&pipe->readers, 1);
-
- current->splice_pipe = pipe;
- }
-@@ -1230,6 +1230,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
- * Don't block on output, we have to drain the direct pipe.
- */
- sd->flags &= ~SPLICE_F_NONBLOCK;
-+ more = sd->flags & SPLICE_F_MORE;
-
- while (len) {
- size_t read_len;
-@@ -1243,6 +1244,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
- sd->total_len = read_len;
-
- /*
-+ * If more data is pending, set SPLICE_F_MORE
-+ * If this is the last data and SPLICE_F_MORE was not set
-+ * initially, clears it.
-+ */
-+ if (read_len < len)
-+ sd->flags |= SPLICE_F_MORE;
-+ else if (!more)
-+ sd->flags &= ~SPLICE_F_MORE;
-+ /*
- * NOTE: nonblocking mode only applies to the input. We
- * must not do the output in nonblocking mode as then we
- * could get stuck data in the internal pipe:
-@@ -1481,6 +1491,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
-
- partial[buffers].offset = off;
- partial[buffers].len = plen;
-+ partial[buffers].private = 0;
-
- off = 0;
- len -= plen;
-@@ -1766,9 +1777,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
- ret = -ERESTARTSYS;
- break;
- }
-- if (!pipe->writers)
-+ if (!atomic_read(&pipe->writers))
- break;
-- if (!pipe->waiting_writers) {
-+ if (!atomic_read(&pipe->waiting_writers)) {
- if (flags & SPLICE_F_NONBLOCK) {
- ret = -EAGAIN;
- break;
-@@ -1800,7 +1811,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
- pipe_lock(pipe);
-
- while (pipe->nrbufs >= pipe->buffers) {
-- if (!pipe->readers) {
-+ if (!atomic_read(&pipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- ret = -EPIPE;
- break;
-@@ -1813,9 +1824,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
- ret = -ERESTARTSYS;
- break;
- }
-- pipe->waiting_writers++;
-+ atomic_inc(&pipe->waiting_writers);
- pipe_wait(pipe);
-- pipe->waiting_writers--;
-+ atomic_dec(&pipe->waiting_writers);
- }
-
- pipe_unlock(pipe);
-@@ -1851,14 +1862,14 @@ retry:
- pipe_double_lock(ipipe, opipe);
-
- do {
-- if (!opipe->readers) {
-+ if (!atomic_read(&opipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
- break;
- }
-
-- if (!ipipe->nrbufs && !ipipe->writers)
-+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
- break;
-
- /*
-@@ -1955,7 +1966,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
- pipe_double_lock(ipipe, opipe);
-
- do {
-- if (!opipe->readers) {
-+ if (!atomic_read(&opipe->readers)) {
- send_sig(SIGPIPE, current, 0);
- if (!ret)
- ret = -EPIPE;
-@@ -2000,7 +2011,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
- * return EAGAIN if we have the potential of some data in the
- * future, otherwise just return 0
- */
-- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
-+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
- ret = -EAGAIN;
-
- pipe_unlock(ipipe);
-diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
-index 4619247..e3910db 100644
---- a/fs/squashfs/super.c
-+++ b/fs/squashfs/super.c
-@@ -481,6 +481,7 @@ static struct file_system_type squashfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV
- };
-+MODULE_ALIAS_FS("squashfs");
-
- static const struct super_operations squashfs_super_ops = {
- .alloc_inode = squashfs_alloc_inode,
-diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
-index 92fcde7..1687329 100644
---- a/fs/squashfs/xattr.c
-+++ b/fs/squashfs/xattr.c
-@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
- + msblk->xattr_table;
- int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
- int count = squashfs_i(inode)->xattr_count;
-- size_t rest = buffer_size;
-- int err;
-+ size_t used = 0;
-+ ssize_t err;
-
- /* check that the file system has xattrs */
- if (msblk->xattr_id_table == NULL)
-@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
- name_size = le16_to_cpu(entry.size);
- handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
- if (handler)
-- prefix_size = handler->list(d, buffer, rest, NULL,
-+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
- name_size, handler->flags);
- if (prefix_size) {
- if (buffer) {
-- if (prefix_size + name_size + 1 > rest) {
-+ if (prefix_size + name_size + 1 > buffer_size - used) {
- err = -ERANGE;
- goto failed;
- }
-@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
- buffer[name_size] = '\0';
- buffer += name_size + 1;
- }
-- rest -= prefix_size + name_size + 1;
-+ used += prefix_size + name_size + 1;
- } else {
- /* no handler or insuffficient privileges, so skip */
- err = squashfs_read_metadata(sb, NULL, &start,
-@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
- if (err < 0)
- goto failed;
- }
-- err = buffer_size - rest;
-+ err = used;
-
- failed:
- return err;
-diff --git a/fs/stat.c b/fs/stat.c
-index 7b21801..ee8fe9b 100644
---- a/fs/stat.c
-+++ b/fs/stat.c
-@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
- stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
- stat->size = i_size_read(inode);
-- stat->atime = inode->i_atime;
-- stat->mtime = inode->i_mtime;
-+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
-+ stat->atime = inode->i_ctime;
-+ stat->mtime = inode->i_ctime;
-+ } else {
-+ stat->atime = inode->i_atime;
-+ stat->mtime = inode->i_mtime;
-+ }
- stat->ctime = inode->i_ctime;
- stat->blksize = (1 << inode->i_blkbits);
- stat->blocks = inode->i_blocks;
-@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
- if (retval)
- return retval;
-
-- if (inode->i_op->getattr)
-- return inode->i_op->getattr(mnt, dentry, stat);
-+ if (inode->i_op->getattr) {
-+ retval = inode->i_op->getattr(mnt, dentry, stat);
-+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
-+ stat->atime = stat->ctime;
-+ stat->mtime = stat->ctime;
-+ }
-+ return retval;
-+ }
-
- generic_fillattr(inode, stat);
- return 0;
-diff --git a/fs/super.c b/fs/super.c
-index 531de18..dfecd9e 100644
---- a/fs/super.c
-+++ b/fs/super.c
-@@ -297,19 +297,19 @@ EXPORT_SYMBOL(deactivate_super);
- * and want to turn it into a full-blown active reference. grab_super()
- * is called with sb_lock held and drops it. Returns 1 in case of
- * success, 0 if we had failed (superblock contents was already dead or
-- * dying when grab_super() had been called).
-+ * dying when grab_super() had been called). Note that this is only
-+ * called for superblocks not in rundown mode (== ones still on ->fs_supers
-+ * of their type), so increment of ->s_count is OK here.
- */
- static int grab_super(struct super_block *s) __releases(sb_lock)
- {
-- if (atomic_inc_not_zero(&s->s_active)) {
-- spin_unlock(&sb_lock);
-- return 1;
-- }
-- /* it's going away */
- s->s_count++;
- spin_unlock(&sb_lock);
-- /* wait for it to die */
- down_write(&s->s_umount);
-+ if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
-+ put_super(s);
-+ return 1;
-+ }
- up_write(&s->s_umount);
- put_super(s);
- return 0;
-@@ -438,11 +438,6 @@ retry:
- destroy_super(s);
- s = NULL;
- }
-- down_write(&old->s_umount);
-- if (unlikely(!(old->s_flags & MS_BORN))) {
-- deactivate_locked_super(old);
-- goto retry;
-- }
- return old;
- }
- }
-@@ -652,10 +647,10 @@ restart:
- if (list_empty(&sb->s_instances))
- continue;
- if (sb->s_bdev == bdev) {
-- if (grab_super(sb)) /* drops sb_lock */
-- return sb;
-- else
-+ if (!grab_super(sb))
- goto restart;
-+ up_write(&sb->s_umount);
-+ return sb;
- }
- }
- spin_unlock(&sb_lock);
-diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
-index a475983..3aab767 100644
---- a/fs/sysfs/bin.c
-+++ b/fs/sysfs/bin.c
-@@ -233,13 +233,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
- return ret;
- }
-
--static int bin_access(struct vm_area_struct *vma, unsigned long addr,
-- void *buf, int len, int write)
-+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
-+ void *buf, size_t len, int write)
- {
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
-- int ret;
-+ ssize_t ret;
-
- if (!bb->vm_ops)
- return -EINVAL;
-diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
-index e756bc4..6e4ebbc 100644
---- a/fs/sysfs/dir.c
-+++ b/fs/sysfs/dir.c
-@@ -87,6 +87,10 @@ static void sysfs_link_sibling(struct sysfs_dirent *sd)
- rb_insert_color(&sd->name_node, &parent_sd->s_dir.name_tree);
- }
-
-+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
-+extern int grsec_enable_sysfs_restrict;
-+#endif
-+
- /**
- * sysfs_unlink_sibling - unlink sysfs_dirent from sibling list
- * @sd: sysfs_dirent of interest
-@@ -642,6 +646,20 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
- struct sysfs_dirent *sd;
- int rc;
-
-+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
-+ const char *parent_name = parent_sd->s_name;
-+
-+ mode = S_IFDIR | S_IRWXU;
-+
-+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
-+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
-+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
-+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
-+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
-+ if (!grsec_enable_sysfs_restrict)
-+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
-+#endif
-+
- /* allocate */
- sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
- if (!sd)
-diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
-index 779789a..f58193c 100644
---- a/fs/sysfs/file.c
-+++ b/fs/sysfs/file.c
-@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
-
- struct sysfs_open_dirent {
- atomic_t refcnt;
-- atomic_t event;
-+ atomic_unchecked_t event;
- wait_queue_head_t poll;
- struct list_head buffers; /* goes through sysfs_buffer.list */
- };
-@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
- if (!sysfs_get_active(attr_sd))
- return -ENODEV;
-
-- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
-+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
- count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
-
- sysfs_put_active(attr_sd);
-@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
- return -ENOMEM;
-
- atomic_set(&new_od->refcnt, 0);
-- atomic_set(&new_od->event, 1);
-+ atomic_set_unchecked(&new_od->event, 1);
- init_waitqueue_head(&new_od->poll);
- INIT_LIST_HEAD(&new_od->buffers);
- goto retry;
-@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
-
- sysfs_put_active(attr_sd);
-
-- if (buffer->event != atomic_read(&od->event))
-+ if (buffer->event != atomic_read_unchecked(&od->event))
- goto trigger;
-
- return DEFAULT_POLLMASK;
-@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
-
- od = sd->s_attr.open;
- if (od) {
-- atomic_inc(&od->event);
-+ atomic_inc_unchecked(&od->event);
- wake_up_interruptible(&od->poll);
- }
-
-diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
-index a7ac78f..02158e1 100644
---- a/fs/sysfs/symlink.c
-+++ b/fs/sysfs/symlink.c
-@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-
- static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
- {
-- char *page = nd_get_link(nd);
-+ const char *page = nd_get_link(nd);
- if (!IS_ERR(page))
- free_page((unsigned long)page);
- }
-diff --git a/fs/sysv/super.c b/fs/sysv/super.c
-index f60c196..b2d8fdc 100644
---- a/fs/sysv/super.c
-+++ b/fs/sysv/super.c
-@@ -545,6 +545,7 @@ static struct file_system_type sysv_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("sysv");
-
- static struct file_system_type v7_fs_type = {
- .owner = THIS_MODULE,
-@@ -553,6 +554,8 @@ static struct file_system_type v7_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("v7");
-+MODULE_ALIAS("v7");
-
- static int __init init_sysv_fs(void)
- {
-@@ -586,5 +589,4 @@ static void __exit exit_sysv_fs(void)
-
- module_init(init_sysv_fs)
- module_exit(exit_sysv_fs)
--MODULE_ALIAS("v7");
- MODULE_LICENSE("GPL");
-diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
-index bb55cdb..e9ebb8a 100644
---- a/fs/sysv/sysv.h
-+++ b/fs/sysv/sysv.h
-@@ -189,7 +189,7 @@ static inline u32 PDP_swab(u32 x)
- #endif
- }
-
--static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
-+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
- {
- if (sbi->s_bytesex == BYTESEX_PDP)
- return PDP_swab((__force __u32)n);
-diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
-index 9228950..bbad895 100644
---- a/fs/ubifs/io.c
-+++ b/fs/ubifs/io.c
-@@ -156,7 +156,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
- return err;
- }
-
--int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
-+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
- {
- int err;
-
-diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
-index 201bcfc..cee4d16 100644
---- a/fs/ubifs/super.c
-+++ b/fs/ubifs/super.c
-@@ -2191,6 +2191,7 @@ static struct file_system_type ubifs_fs_type = {
- .mount = ubifs_mount,
- .kill_sb = kill_ubifs_super,
- };
-+MODULE_ALIAS_FS("ubifs");
-
- /*
- * Inode slab cache constructor.
-diff --git a/fs/udf/misc.c b/fs/udf/misc.c
-index c175b4d..8f36a16 100644
---- a/fs/udf/misc.c
-+++ b/fs/udf/misc.c
-@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
-
- u8 udf_tag_checksum(const struct tag *t)
- {
-- u8 *data = (u8 *)t;
-+ const u8 *data = (const u8 *)t;
- u8 checksum = 0;
- int i;
- for (i = 0; i < sizeof(struct tag); ++i)
-diff --git a/fs/udf/namei.c b/fs/udf/namei.c
-index 483d662..d86a93a 100644
---- a/fs/udf/namei.c
-+++ b/fs/udf/namei.c
-@@ -273,9 +273,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
- NULL, 0),
- };
- inode = udf_iget(dir->i_sb, lb);
-- if (!inode) {
-- return ERR_PTR(-EACCES);
-- }
-+ if (IS_ERR(inode))
-+ return inode;
- } else
- #endif /* UDF_RECOVERY */
-
-@@ -288,9 +287,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
-
- loc = lelb_to_cpu(cfi.icb.extLocation);
- inode = udf_iget(dir->i_sb, &loc);
-- if (!inode) {
-- return ERR_PTR(-EACCES);
-- }
-+ if (IS_ERR(inode))
-+ return ERR_CAST(inode);
- }
-
- return d_splice_alias(inode, dentry);
-@@ -1212,7 +1210,7 @@ static struct dentry *udf_get_parent(struct dentry *child)
- struct udf_fileident_bh fibh;
-
- if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
-- goto out_unlock;
-+ return ERR_PTR(-EACCES);
-
- if (fibh.sbh != fibh.ebh)
- brelse(fibh.ebh);
-@@ -1220,12 +1218,10 @@ static struct dentry *udf_get_parent(struct dentry *child)
-
- tloc = lelb_to_cpu(cfi.icb.extLocation);
- inode = udf_iget(child->d_inode->i_sb, &tloc);
-- if (!inode)
-- goto out_unlock;
-+ if (IS_ERR(inode))
-+ return ERR_CAST(inode);
-
- return d_obtain_alias(inode);
--out_unlock:
-- return ERR_PTR(-EACCES);
- }
-
-
-@@ -1242,8 +1238,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
- loc.partitionReferenceNum = partref;
- inode = udf_iget(sb, &loc);
-
-- if (inode == NULL)
-- return ERR_PTR(-ENOMEM);
-+ if (IS_ERR(inode))
-+ return ERR_CAST(inode);
-
- if (generation && inode->i_generation != generation) {
- iput(inode);
-diff --git a/fs/udf/super.c b/fs/udf/super.c
-index f66439e..247cfef 100644
---- a/fs/udf/super.c
-+++ b/fs/udf/super.c
-@@ -116,6 +116,7 @@ static struct file_system_type udf_fstype = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("udf");
-
- static struct kmem_cache *udf_inode_cachep;
-
-@@ -838,12 +839,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
-
- metadata_fe = udf_iget(sb, &addr);
-
-- if (metadata_fe == NULL)
-+ if (IS_ERR(metadata_fe)) {
- udf_warn(sb, "metadata inode efe not found\n");
-- else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
-+ return metadata_fe;
-+ }
-+ if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
- udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
- iput(metadata_fe);
-- metadata_fe = NULL;
-+ return ERR_PTR(-EIO);
- }
-
- return metadata_fe;
-@@ -855,6 +858,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
- struct udf_part_map *map;
- struct udf_meta_data *mdata;
- struct kernel_lb_addr addr;
-+ struct inode *fe;
-
- map = &sbi->s_partmaps[partition];
- mdata = &map->s_type_specific.s_metadata;
-@@ -863,22 +867,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
- udf_debug("Metadata file location: block = %d part = %d\n",
- mdata->s_meta_file_loc, map->s_partition_num);
-
-- mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
-- mdata->s_meta_file_loc, map->s_partition_num);
--
-- if (mdata->s_metadata_fe == NULL) {
-+ fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
-+ map->s_partition_num);
-+ if (IS_ERR(fe)) {
- /* mirror file entry */
- udf_debug("Mirror metadata file location: block = %d part = %d\n",
- mdata->s_mirror_file_loc, map->s_partition_num);
-
-- mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-- mdata->s_mirror_file_loc, map->s_partition_num);
-+ fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
-+ map->s_partition_num);
-
-- if (mdata->s_mirror_fe == NULL) {
-+ if (IS_ERR(fe)) {
- udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-- goto error_exit;
-+ return PTR_ERR(fe);
- }
-- }
-+ mdata->s_mirror_fe = fe;
-+ } else
-+ mdata->s_metadata_fe = fe;
-+
-
- /*
- * bitmap file entry
-@@ -892,24 +898,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
- udf_debug("Bitmap file location: block = %d part = %d\n",
- addr.logicalBlockNum, addr.partitionReferenceNum);
-
-- mdata->s_bitmap_fe = udf_iget(sb, &addr);
--
-- if (mdata->s_bitmap_fe == NULL) {
-+ fe = udf_iget(sb, &addr);
-+ if (IS_ERR(fe)) {
- if (sb->s_flags & MS_RDONLY)
- udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
- else {
- udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-- goto error_exit;
-+ return PTR_ERR(fe);
- }
-- }
-+ } else
-+ mdata->s_bitmap_fe = fe;
- }
-
- udf_debug("udf_load_metadata_files Ok\n");
-
- return 0;
--
--error_exit:
-- return 1;
- }
-
- static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
-@@ -997,13 +1000,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
- phd->unallocSpaceTable.extPosition),
- .partitionReferenceNum = p_index,
- };
-+ struct inode *inode;
-
-- map->s_uspace.s_table = udf_iget(sb, &loc);
-- if (!map->s_uspace.s_table) {
-+ inode = udf_iget(sb, &loc);
-+ if (IS_ERR(inode)) {
- udf_debug("cannot load unallocSpaceTable (part %d)\n",
- p_index);
-- return 1;
-+ return PTR_ERR(inode);
- }
-+ map->s_uspace.s_table = inode;
- map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
- udf_debug("unallocSpaceTable (part %d) @ %ld\n",
- p_index, map->s_uspace.s_table->i_ino);
-@@ -1032,14 +1037,15 @@ static int udf_fill_partdesc_info(struct super_block *sb,
- phd->freedSpaceTable.extPosition),
- .partitionReferenceNum = p_index,
- };
-+ struct inode *inode;
-
-- map->s_fspace.s_table = udf_iget(sb, &loc);
-- if (!map->s_fspace.s_table) {
-+ inode = udf_iget(sb, &loc);
-+ if (IS_ERR(inode)) {
- udf_debug("cannot load freedSpaceTable (part %d)\n",
- p_index);
-- return 1;
-+ return PTR_ERR(inode);
- }
--
-+ map->s_fspace.s_table = inode;
- map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
- udf_debug("freedSpaceTable (part %d) @ %ld\n",
- p_index, map->s_fspace.s_table->i_ino);
-@@ -1068,6 +1074,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
- struct udf_part_map *map = &sbi->s_partmaps[p_index];
- sector_t vat_block;
- struct kernel_lb_addr ino;
-+ struct inode *inode;
-
- /*
- * VAT file entry is in the last recorded block. Some broken disks have
-@@ -1076,10 +1083,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
- ino.partitionReferenceNum = type1_index;
- for (vat_block = start_block;
- vat_block >= map->s_partition_root &&
-- vat_block >= start_block - 3 &&
-- !sbi->s_vat_inode; vat_block--) {
-+ vat_block >= start_block - 3; vat_block--) {
- ino.logicalBlockNum = vat_block - map->s_partition_root;
-- sbi->s_vat_inode = udf_iget(sb, &ino);
-+ inode = udf_iget(sb, &ino);
-+ if (!IS_ERR(inode)) {
-+ sbi->s_vat_inode = inode;
-+ break;
-+ }
- }
- }
-
-@@ -2058,9 +2068,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
- /* assign inodes by physical block number */
- /* perhaps it's not extensible enough, but for now ... */
- inode = udf_iget(sb, &rootdir);
-- if (!inode) {
-+ if (IS_ERR(inode)) {
- udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
- rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
-+ ret = PTR_ERR(inode);
- goto error_out;
- }
-
-diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
-index 8775ab23..3156eb1 100644
---- a/fs/udf/udfdecl.h
-+++ b/fs/udf/udfdecl.h
-@@ -149,7 +149,6 @@ extern int udf_expand_file_adinicb(struct inode *);
- extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
- extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
- extern int udf_setsize(struct inode *, loff_t);
--extern void udf_read_inode(struct inode *);
- extern void udf_evict_inode(struct inode *);
- extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
- extern long udf_block_map(struct inode *, sector_t);
-diff --git a/fs/ufs/super.c b/fs/ufs/super.c
-index 3915ade..00fcbf4 100644
---- a/fs/ufs/super.c
-+++ b/fs/ufs/super.c
-@@ -1484,6 +1484,7 @@ static struct file_system_type ufs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("ufs");
-
- static int __init init_ufs_fs(void)
- {
-diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
-index 8d974c4..4c19a7b 100644
---- a/fs/ufs/swab.h
-+++ b/fs/ufs/swab.h
-@@ -22,7 +22,7 @@ enum {
- BYTESEX_BE
- };
-
--static inline u64
-+static inline u64 __intentional_overflow(-1)
- fs64_to_cpu(struct super_block *sbp, __fs64 n)
- {
- if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
- return (__force __fs64)cpu_to_be64(n);
- }
-
--static inline u32
-+static inline u32 __intentional_overflow(-1)
- fs32_to_cpu(struct super_block *sbp, __fs32 n)
- {
- if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-@@ -76,7 +76,7 @@ fs32_sub(struct super_block *sbp, __fs32 *n, int d)
- be32_add_cpu((__be32 *)n, -d);
- }
-
--static inline u16
-+static inline u16 __intentional_overflow(-1)
- fs16_to_cpu(struct super_block *sbp, __fs16 n)
- {
- if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-diff --git a/fs/utimes.c b/fs/utimes.c
-index ba653f3..06ea4b1 100644
---- a/fs/utimes.c
-+++ b/fs/utimes.c
-@@ -1,6 +1,7 @@
- #include <linux/compiler.h>
- #include <linux/file.h>
- #include <linux/fs.h>
-+#include <linux/security.h>
- #include <linux/linkage.h>
- #include <linux/mount.h>
- #include <linux/namei.h>
-@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
- goto mnt_drop_write_and_out;
- }
- }
-+
-+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
-+ error = -EACCES;
-+ goto mnt_drop_write_and_out;
-+ }
-+
- mutex_lock(&inode->i_mutex);
- error = notify_change(path->dentry, &newattrs);
- mutex_unlock(&inode->i_mutex);
-diff --git a/fs/xattr.c b/fs/xattr.c
-index 67583de..328e065 100644
---- a/fs/xattr.c
-+++ b/fs/xattr.c
-@@ -225,6 +225,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
- return rc;
- }
-
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+ssize_t
-+pax_getxattr(struct dentry *dentry, void *value, size_t size)
-+{
-+ struct inode *inode = dentry->d_inode;
-+ ssize_t error;
-+
-+ error = inode_permission(inode, MAY_EXEC);
-+ if (error)
-+ return error;
-+
-+ if (inode->i_op->getxattr)
-+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
-+ else
-+ error = -EOPNOTSUPP;
-+
-+ return error;
-+}
-+EXPORT_SYMBOL(pax_getxattr);
-+#endif
-+
- ssize_t
- vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
- {
-@@ -315,7 +336,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
- * Extended attribute SET operations
- */
- static long
--setxattr(struct dentry *d, const char __user *name, const void __user *value,
-+setxattr(struct path *path, const char __user *name, const void __user *value,
- size_t size, int flags)
- {
- int error;
-@@ -339,7 +360,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
- return PTR_ERR(kvalue);
- }
-
-- error = vfs_setxattr(d, kname, kvalue, size, flags);
-+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
-+ error = -EACCES;
-+ goto out;
-+ }
-+
-+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
-+out:
- kfree(kvalue);
- return error;
- }
-@@ -356,7 +383,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
-- error = setxattr(path.dentry, name, value, size, flags);
-+ error = setxattr(&path, name, value, size, flags);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
-@@ -375,7 +402,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
-- error = setxattr(path.dentry, name, value, size, flags);
-+ error = setxattr(&path, name, value, size, flags);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
-@@ -386,17 +413,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
- const void __user *,value, size_t, size, int, flags)
- {
- struct file *f;
-- struct dentry *dentry;
- int error = -EBADF;
-
- f = fget(fd);
- if (!f)
- return error;
-- dentry = f->f_path.dentry;
-- audit_inode(NULL, dentry);
-+ audit_inode(NULL, f->f_path.dentry);
- error = mnt_want_write_file(f);
- if (!error) {
-- error = setxattr(dentry, name, value, size, flags);
-+ error = setxattr(&f->f_path, name, value, size, flags);
- mnt_drop_write(f->f_path.mnt);
- }
- fput(f);
-@@ -560,7 +585,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
- * Extended attribute REMOVE operations
- */
- static long
--removexattr(struct dentry *d, const char __user *name)
-+removexattr(struct path *path, const char __user *name)
- {
- int error;
- char kname[XATTR_NAME_MAX + 1];
-@@ -571,7 +596,10 @@ removexattr(struct dentry *d, const char __user *name)
- if (error < 0)
- return error;
-
-- return vfs_removexattr(d, kname);
-+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
-+ return -EACCES;
-+
-+ return vfs_removexattr(path->dentry, kname);
- }
-
- SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
-@@ -585,7 +613,7 @@ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
-- error = removexattr(path.dentry, name);
-+ error = removexattr(&path, name);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
-@@ -603,7 +631,7 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
-- error = removexattr(path.dentry, name);
-+ error = removexattr(&path, name);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
-@@ -613,17 +641,17 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
- SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
- {
- struct file *f;
-- struct dentry *dentry;
-+ struct path *path;
- int error = -EBADF;
-
- f = fget(fd);
- if (!f)
- return error;
-- dentry = f->f_path.dentry;
-- audit_inode(NULL, dentry);
-+ path = &f->f_path;
-+ audit_inode(NULL, path->dentry);
- error = mnt_want_write_file(f);
- if (!error) {
-- error = removexattr(dentry, name);
-+ error = removexattr(path, name);
- mnt_drop_write(f->f_path.mnt);
- }
- fput(f);
-diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
-index 8d5a506..e4a8a5f 100644
---- a/fs/xattr_acl.c
-+++ b/fs/xattr_acl.c
-@@ -9,7 +9,7 @@
- #include <linux/fs.h>
- #include <linux/posix_acl_xattr.h>
- #include <linux/gfp.h>
--
-+#include <linux/grsecurity.h>
-
- /*
- * Convert from extended attribute to in-memory representation.
-@@ -17,11 +17,12 @@
- struct posix_acl *
- posix_acl_from_xattr(const void *value, size_t size)
- {
-- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
-- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
-+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
-+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
- int count;
- struct posix_acl *acl;
- struct posix_acl_entry *acl_e;
-+ umode_t umask = gr_acl_umask();
-
- if (!value)
- return NULL;
-@@ -47,14 +48,23 @@ posix_acl_from_xattr(const void *value, size_t size)
-
- switch(acl_e->e_tag) {
- case ACL_USER_OBJ:
-+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
-+ break;
- case ACL_GROUP_OBJ:
- case ACL_MASK:
-+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
-+ break;
- case ACL_OTHER:
-+ acl_e->e_perm &= ~(umask & S_IRWXO);
- acl_e->e_id = ACL_UNDEFINED_ID;
- break;
-
- case ACL_USER:
-+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
-+ acl_e->e_id = le32_to_cpu(entry->e_id);
-+ break;
- case ACL_GROUP:
-+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
- acl_e->e_id = le32_to_cpu(entry->e_id);
- break;
-
-diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
-index d0ab788..827999b 100644
---- a/fs/xfs/xfs_bmap.c
-+++ b/fs/xfs/xfs_bmap.c
-@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
- int nmap,
- int ret_nmap);
- #else
--#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
-+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
- #endif /* DEBUG */
-
- STATIC int
-diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
-index 79d05e8..e3e5861 100644
---- a/fs/xfs/xfs_dir2_sf.c
-+++ b/fs/xfs/xfs_dir2_sf.c
-@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
- }
-
- ino = xfs_dir2_sfe_get_ino(sfp, sfep);
-- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
-+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
-+ char name[sfep->namelen];
-+ memcpy(name, sfep->name, sfep->namelen);
-+ if (filldir(dirent, name, sfep->namelen,
-+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
-+ *offset = off & 0x7fffffff;
-+ return 0;
-+ }
-+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
- off & 0x7fffffff, ino, DT_UNKNOWN)) {
- *offset = off & 0x7fffffff;
- return 0;
-diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
-index eb519de..a7569b5 100644
---- a/fs/xfs/xfs_ioctl.c
-+++ b/fs/xfs/xfs_ioctl.c
-@@ -128,7 +128,7 @@ xfs_find_handle(
- }
-
- error = -EFAULT;
-- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
-+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
- copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
- goto out_put;
-
-diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
-index 1c01f04..28fd95b 100644
---- a/fs/xfs/xfs_iops.c
-+++ b/fs/xfs/xfs_iops.c
-@@ -447,7 +447,7 @@ xfs_vn_put_link(
- struct nameidata *nd,
- void *p)
- {
-- char *s = nd_get_link(nd);
-+ const char *s = nd_get_link(nd);
-
- if (!IS_ERR(s))
- kfree(s);
-diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
-index 87323f1..dab9d00 100644
---- a/fs/xfs/xfs_rtalloc.c
-+++ b/fs/xfs/xfs_rtalloc.c
-@@ -858,7 +858,7 @@ xfs_rtbuf_get(
- xfs_buf_t *bp; /* block buffer, result */
- xfs_inode_t *ip; /* bitmap or summary inode */
- xfs_bmbt_irec_t map;
-- int nmap;
-+ int nmap = 1;
- int error; /* error value */
-
- ip = issum ? mp->m_rsumip : mp->m_rbmip;
-diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
-index 8a89949..6776861 100644
---- a/fs/xfs/xfs_super.c
-+++ b/fs/xfs/xfs_super.c
-@@ -1474,6 +1474,7 @@ static struct file_system_type xfs_fs_type = {
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
- };
-+MODULE_ALIAS_FS("xfs");
-
- STATIC int __init
- xfs_init_zones(void)
-diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
-new file mode 100644
-index 0000000..1d38334
---- /dev/null
-+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1170 @@
-+#
-+# grecurity configuration
-+#
-+menu "Memory Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_KMEM
-+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
-+ help
-+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
-+ be written to or read from to modify or leak the contents of the running
-+ kernel. /dev/port will also not be allowed to be opened, writing to
-+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
-+ If you have module support disabled, enabling this will close up several
-+ ways that are currently used to insert malicious code into the running
-+ kernel.
-+
-+ Even with this feature enabled, we still highly recommend that
-+ you use the RBAC system, as it is still possible for an attacker to
-+ modify the running kernel through other more obscure methods.
-+
-+ It is highly recommended that you say Y here if you meet all the
-+ conditions above.
-+
-+config GRKERNSEC_VM86
-+ bool "Restrict VM86 mode"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
-+ depends on X86_32
-+
-+ help
-+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
-+ make use of a special execution mode on 32bit x86 processors called
-+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
-+ video cards and will still work with this option enabled. The purpose
-+ of the option is to prevent exploitation of emulation errors in
-+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
-+ Nearly all users should be able to enable this option.
-+
-+config GRKERNSEC_IO
-+ bool "Disable privileged I/O"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
-+ depends on X86
-+ select RTC_CLASS
-+ select RTC_INTF_DEV
-+ select RTC_DRV_CMOS
-+
-+ help
-+ If you say Y here, all ioperm and iopl calls will return an error.
-+ Ioperm and iopl can be used to modify the running kernel.
-+ Unfortunately, some programs need this access to operate properly,
-+ the most notable of which are XFree86 and hwclock. hwclock can be
-+ remedied by having RTC support in the kernel, so real-time
-+ clock support is enabled if this option is enabled, to ensure
-+ that hwclock operates correctly. If hwclock still does not work,
-+ either update udev or symlink /dev/rtc to /dev/rtc0.
-+
-+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
-+ you may not be able to boot into a graphical environment with this
-+ option enabled. In this case, you should use the RBAC system instead.
-+
-+config GRKERNSEC_JIT_HARDEN
-+ bool "Harden BPF JIT against spray attacks"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on BPF_JIT && X86
-+ help
-+ If you say Y here, the native code generated by the kernel's Berkeley
-+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
-+ attacks that attempt to fit attacker-beneficial instructions in
-+ 32bit immediate fields of JIT-generated native instructions. The
-+ attacker will generally aim to cause an unintended instruction sequence
-+ of JIT-generated native code to execute by jumping into the middle of
-+ a generated instruction. This feature effectively randomizes the 32bit
-+ immediate constants present in the generated code to thwart such attacks.
-+
-+ If you're using KERNEXEC, it's recommended that you enable this option
-+ to supplement the hardening of the kernel.
-+
-+config GRKERNSEC_PERF_HARDEN
-+ bool "Disable unprivileged PERF_EVENTS usage by default"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PERF_EVENTS
-+ help
-+ If you say Y here, the range of acceptable values for the
-+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
-+ default to a new value: 3. When the sysctl is set to this value, no
-+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
-+
-+ Though PERF_EVENTS can be used legitimately for performance monitoring
-+ and low-level application profiling, it is forced on regardless of
-+ configuration, has been at fault for several vulnerabilities, and
-+ creates new opportunities for side channels and other information leaks.
-+
-+ This feature puts PERF_EVENTS into a secure default state and permits
-+ the administrator to change out of it temporarily if unprivileged
-+ application profiling is needed.
-+
-+config GRKERNSEC_RAND_THREADSTACK
-+ bool "Insert random gaps between thread stacks"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PAX_RANDMMAP && !PPC
-+ help
-+ If you say Y here, a random-sized gap will be enforced between allocated
-+ thread stacks. Glibc's NPTL and other threading libraries that
-+ pass MAP_STACK to the kernel for thread stack allocation are supported.
-+ The implementation currently provides 8 bits of entropy for the gap.
-+
-+ Many distributions do not compile threaded remote services with the
-+ -fstack-check argument to GCC, causing the variable-sized stack-based
-+ allocator, alloca(), to not probe the stack on allocation. This
-+ permits an unbounded alloca() to skip over any guard page and potentially
-+ modify another thread's stack reliably. An enforced random gap
-+ reduces the reliability of such an attack and increases the chance
-+ that such a read/write to another thread's stack instead lands in
-+ an unmapped area, causing a crash and triggering grsecurity's
-+ anti-bruteforcing logic.
-+
-+config GRKERNSEC_PROC_MEMMAP
-+ bool "Harden ASLR against information leaks and entropy reduction"
-+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
-+ depends on PAX_NOEXEC || PAX_ASLR
-+ help
-+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
-+ give no information about the addresses of its mappings if
-+ PaX features that rely on random addresses are enabled on the task.
-+ In addition to sanitizing this information and disabling other
-+ dangerous sources of information, this option causes reads of sensitive
-+ /proc/<pid> entries where the file descriptor was opened in a different
-+ task than the one performing the read. Such attempts are logged.
-+ This option also limits argv/env strings for suid/sgid binaries
-+ to 512KB to prevent a complete exhaustion of the stack entropy provided
-+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
-+ binaries to prevent alternative mmap layouts from being abused.
-+
-+ If you use PaX it is essential that you say Y here as it closes up
-+ several holes that make full ASLR useless locally.
-+
-+
-+config GRKERNSEC_KSTACKOVERFLOW
-+ bool "Prevent kernel stack overflows"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on !IA64 && 64BIT
-+ help
-+ If you say Y here, the kernel's process stacks will be allocated
-+ with vmalloc instead of the kernel's default allocator. This
-+ introduces guard pages that in combination with the alloca checking
-+ of the STACKLEAK feature prevents all forms of kernel process stack
-+ overflow abuse. Note that this is different from kernel stack
-+ buffer overflows.
-+
-+config GRKERNSEC_BRUTE
-+ bool "Deter exploit bruteforcing"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, attempts to bruteforce exploits against forking
-+ daemons such as apache or sshd, as well as against suid/sgid binaries
-+ will be deterred. When a child of a forking daemon is killed by PaX
-+ or crashes due to an illegal instruction or other suspicious signal,
-+ the parent process will be delayed 30 seconds upon every subsequent
-+ fork until the administrator is able to assess the situation and
-+ restart the daemon.
-+ In the suid/sgid case, the attempt is logged, the user has all their
-+ existing instances of the suid/sgid binary terminated and will
-+ be unable to execute any suid/sgid binaries for 15 minutes.
-+
-+ It is recommended that you also enable signal logging in the auditing
-+ section so that logs are generated when a process triggers a suspicious
-+ signal.
-+ If the sysctl option is enabled, a sysctl option with name
-+ "deter_bruteforce" is created.
-+
-+config GRKERNSEC_MODHARDEN
-+ bool "Harden module auto-loading"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on MODULES
-+ help
-+ If you say Y here, module auto-loading in response to use of some
-+ feature implemented by an unloaded module will be restricted to
-+ root users. Enabling this option helps defend against attacks
-+ by unprivileged users who abuse the auto-loading behavior to
-+ cause a vulnerable module to load that is then exploited.
-+
-+ If this option prevents a legitimate use of auto-loading for a
-+ non-root user, the administrator can execute modprobe manually
-+ with the exact name of the module mentioned in the alert log.
-+ Alternatively, the administrator can add the module to the list
-+ of modules loaded at boot by modifying init scripts.
-+
-+ Modification of init scripts will most likely be needed on
-+ Ubuntu servers with encrypted home directory support enabled,
-+ as the first non-root user logging in will cause the ecb(aes),
-+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
-+
-+config GRKERNSEC_HIDESYM
-+ bool "Hide kernel symbols"
-+ select PAX_USERCOPY_SLABS
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, getting information on loaded modules, and
-+ displaying all kernel symbols through a syscall will be restricted
-+ to users with CAP_SYS_MODULE. For software compatibility reasons,
-+ /proc/kallsyms will be restricted to the root user. The RBAC
-+ system can hide that entry even from root.
-+
-+ This option also prevents leaking of kernel addresses through
-+ several /proc entries.
-+
-+ Note that this option is only effective provided the following
-+ conditions are met:
-+ 1) The kernel using grsecurity is not precompiled by some distribution
-+ 2) You have also enabled GRKERNSEC_DMESG
-+ 3) You are using the RBAC system and hiding other files such as your
-+ kernel image and System.map. Alternatively, enabling this option
-+ causes the permissions on /boot, /lib/modules, and the kernel
-+ source directory to change at compile time to prevent
-+ reading by non-root users.
-+ If the above conditions are met, this option will aid in providing a
-+ useful protection against local kernel exploitation of overflows
-+ and arbitrary read/write vulnerabilities.
-+
-+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
-+ in addition to this feature.
-+
-+config GRKERNSEC_RANDSTRUCT
-+ bool "Randomize layout of sensitive kernel structures"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ select GRKERNSEC_HIDESYM
-+ select MODVERSIONS if MODULES
-+ help
-+ If you say Y here, the layouts of a number of sensitive kernel
-+ structures (task, fs, cred, etc) and all structures composed entirely
-+ of function pointers (aka "ops" structs) will be randomized at compile-time.
-+ This can introduce the requirement of an additional infoleak
-+ vulnerability for exploits targeting these structure types.
-+
-+ Enabling this feature will introduce some performance impact, slightly
-+ increase memory usage, and prevent the use of forensic tools like
-+ Volatility against the system (unless the kernel source tree isn't
-+ cleaned after kernel installation).
-+
-+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
-+ It remains after a make clean to allow for external modules to be compiled
-+ with the existing seed and will be removed by a make mrproper or
-+ make distclean.
-+
-+ Note that the implementation requires gcc 4.6.4. or newer. You may need
-+ to install the supporting headers explicitly in addition to the normal
-+ gcc package.
-+
-+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
-+ bool "Use cacheline-aware structure randomization"
-+ depends on GRKERNSEC_RANDSTRUCT
-+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
-+ help
-+ If you say Y here, the RANDSTRUCT randomization will make a best effort
-+ at restricting randomization to cacheline-sized groups of elements. It
-+ will further not randomize bitfields in structures. This reduces the
-+ performance hit of RANDSTRUCT at the cost of weakened randomization.
-+
-+config GRKERNSEC_KERN_LOCKOUT
-+ bool "Active kernel exploit response"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on X86 || ARM || PPC || SPARC
-+ help
-+ If you say Y here, when a PaX alert is triggered due to suspicious
-+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
-+ or an OOPS occurs due to bad memory accesses, instead of just
-+ terminating the offending process (and potentially allowing
-+ a subsequent exploit from the same user), we will take one of two
-+ actions:
-+ If the user was root, we will panic the system
-+ If the user was non-root, we will log the attempt, terminate
-+ all processes owned by the user, then prevent them from creating
-+ any new processes until the system is restarted
-+ This deters repeated kernel exploitation/bruteforcing attempts
-+ and is useful for later forensics.
-+
-+endmenu
-+menu "Role Based Access Control Options"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_RBAC_DEBUG
-+ bool
-+
-+config GRKERNSEC_NO_RBAC
-+ bool "Disable RBAC system"
-+ help
-+ If you say Y here, the /dev/grsec device will be removed from the kernel,
-+ preventing the RBAC system from being enabled. You should only say Y
-+ here if you have no intention of using the RBAC system, so as to prevent
-+ an attacker with root access from misusing the RBAC system to hide files
-+ and processes when loadable module support and /dev/[k]mem have been
-+ locked down.
-+
-+config GRKERNSEC_ACL_HIDEKERN
-+ bool "Hide kernel processes"
-+ help
-+ If you say Y here, all kernel threads will be hidden to all
-+ processes but those whose subject has the "view hidden processes"
-+ flag.
-+
-+config GRKERNSEC_ACL_MAXTRIES
-+ int "Maximum tries before password lockout"
-+ default 3
-+ help
-+ This option enforces the maximum number of times a user can attempt
-+ to authorize themselves with the grsecurity RBAC system before being
-+ denied the ability to attempt authorization again for a specified time.
-+ The lower the number, the harder it will be to brute-force a password.
-+
-+config GRKERNSEC_ACL_TIMEOUT
-+ int "Time to wait after max password tries, in seconds"
-+ default 30
-+ help
-+ This option specifies the time the user must wait after attempting to
-+ authorize to the RBAC system with the maximum number of invalid
-+ passwords. The higher the number, the harder it will be to brute-force
-+ a password.
-+
-+endmenu
-+menu "Filesystem Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_PROC
-+ bool "Proc restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, the permissions of the /proc filesystem
-+ will be altered to enhance system security and privacy. You MUST
-+ choose either a user only restriction or a user and group restriction.
-+ Depending upon the option you choose, you can either restrict users to
-+ see only the processes they themselves run, or choose a group that can
-+ view all processes and files normally restricted to root if you choose
-+ the "restrict to user only" option. NOTE: If you're running identd or
-+ ntpd as a non-root user, you will have to run it as the group you
-+ specify here.
-+
-+config GRKERNSEC_PROC_USER
-+ bool "Restrict /proc to user only"
-+ depends on GRKERNSEC_PROC
-+ help
-+ If you say Y here, non-root users will only be able to view their own
-+ processes, and restricts them from viewing network-related information,
-+ and viewing kernel symbol and module information.
-+
-+config GRKERNSEC_PROC_USERGROUP
-+ bool "Allow special group"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
-+ help
-+ If you say Y here, you will be able to select a group that will be
-+ able to view all processes and network-related information. If you've
-+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
-+ remain hidden. This option is useful if you want to run identd as
-+ a non-root user. The group you select may also be chosen at boot time
-+ via "grsec_proc_gid=" on the kernel commandline.
-+
-+config GRKERNSEC_PROC_GID
-+ int "GID for special group"
-+ depends on GRKERNSEC_PROC_USERGROUP
-+ default 1001
-+
-+config GRKERNSEC_PROC_ADD
-+ bool "Additional restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
-+ help
-+ If you say Y here, additional restrictions will be placed on
-+ /proc that keep normal users from viewing device information and
-+ slabinfo information that could be useful for exploits.
-+
-+config GRKERNSEC_LINK
-+ bool "Linking restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, /tmp race exploits will be prevented, since users
-+ will no longer be able to follow symlinks owned by other users in
-+ world-writable +t directories (e.g. /tmp), unless the owner of the
-+ symlink is the owner of the directory. users will also not be
-+ able to hardlink to files they do not own. If the sysctl option is
-+ enabled, a sysctl option with name "linking_restrictions" is created.
-+
-+config GRKERNSEC_SYMLINKOWN
-+ bool "Kernel-enforced SymlinksIfOwnerMatch"
-+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
-+ help
-+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
-+ that prevents it from being used as a security feature. As Apache
-+ verifies the symlink by performing a stat() against the target of
-+ the symlink before it is followed, an attacker can setup a symlink
-+ to point to a same-owned file, then replace the symlink with one
-+ that targets another user's file just after Apache "validates" the
-+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
-+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
-+ will be in place for the group you specify. If the sysctl option
-+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
-+ created.
-+
-+config GRKERNSEC_SYMLINKOWN_GID
-+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
-+ depends on GRKERNSEC_SYMLINKOWN
-+ default 1006
-+ help
-+ Setting this GID determines what group kernel-enforced
-+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
-+ is enabled, a sysctl option with name "symlinkown_gid" is created.
-+
-+config GRKERNSEC_FIFO
-+ bool "FIFO restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, users will not be able to write to FIFOs they don't
-+ own in world-writable +t directories (e.g. /tmp), unless the owner of
-+ the FIFO is the same owner of the directory it's held in. If the sysctl
-+ option is enabled, a sysctl option with name "fifo_restrictions" is
-+ created.
-+
-+config GRKERNSEC_SYSFS_RESTRICT
-+ bool "Sysfs/debugfs restriction"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
-+ depends on SYSFS
-+ help
-+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
-+ any filesystem normally mounted under it (e.g. debugfs) will be
-+ mostly accessible only by root. These filesystems generally provide access
-+ to hardware and debug information that isn't appropriate for unprivileged
-+ users of the system. Sysfs and debugfs have also become a large source
-+ of new vulnerabilities, ranging from infoleaks to local compromise.
-+ There has been very little oversight with an eye toward security involved
-+ in adding new exporters of information to these filesystems, so their
-+ use is discouraged.
-+ For reasons of compatibility, a few directories have been whitelisted
-+ for access by non-root users:
-+ /sys/fs/selinux
-+ /sys/fs/fuse
-+ /sys/devices/system/cpu
-+
-+config GRKERNSEC_ROFS
-+ bool "Runtime read-only mount protection"
-+ depends on SYSCTL
-+ help
-+ If you say Y here, a sysctl option with name "romount_protect" will
-+ be created. By setting this option to 1 at runtime, filesystems
-+ will be protected in the following ways:
-+ * No new writable mounts will be allowed
-+ * Existing read-only mounts won't be able to be remounted read/write
-+ * Write operations will be denied on all block devices
-+ This option acts independently of grsec_lock: once it is set to 1,
-+ it cannot be turned off. Therefore, please be mindful of the resulting
-+ behavior if this option is enabled in an init script on a read-only
-+ filesystem.
-+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
-+ and GRKERNSEC_IO should be enabled and module loading disabled via
-+ config or at runtime.
-+ This feature is mainly intended for secure embedded systems.
-+
-+
-+config GRKERNSEC_DEVICE_SIDECHANNEL
-+ bool "Eliminate stat/notify-based device sidechannels"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, timing analyses on block or character
-+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
-+ will be thwarted for unprivileged users. If a process without
-+ CAP_MKNOD stats such a device, the last access and last modify times
-+ will match the device's create time. No access or modify events
-+ will be triggered through inotify/dnotify/fanotify for such devices.
-+ This feature will prevent attacks that may at a minimum
-+ allow an attacker to determine the administrator's password length.
-+
-+config GRKERNSEC_CHROOT
-+ bool "Chroot jail restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, you will be able to choose several options that will
-+ make breaking out of a chrooted jail much more difficult. If you
-+ encounter no software incompatibilities with the following options, it
-+ is recommended that you enable each one.
-+
-+ Note that the chroot restrictions are not intended to apply to "chroots"
-+ to directories that are simple bind mounts of the global root filesystem.
-+ For several other reasons, a user shouldn't expect any significant
-+ security by performing such a chroot.
-+
-+config GRKERNSEC_CHROOT_MOUNT
-+ bool "Deny mounts"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to
-+ mount or remount filesystems. If the sysctl option is enabled, a
-+ sysctl option with name "chroot_deny_mount" is created.
-+
-+config GRKERNSEC_CHROOT_DOUBLE
-+ bool "Deny double-chroots"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to chroot
-+ again outside the chroot. This is a widely used method of breaking
-+ out of a chroot jail and should not be allowed. If the sysctl
-+ option is enabled, a sysctl option with name
-+ "chroot_deny_chroot" is created.
-+
-+config GRKERNSEC_CHROOT_PIVOT
-+ bool "Deny pivot_root in chroot"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to use
-+ a function called pivot_root() that was introduced in Linux 2.3.41. It
-+ works similar to chroot in that it changes the root filesystem. This
-+ function could be misused in a chrooted process to attempt to break out
-+ of the chroot, and therefore should not be allowed. If the sysctl
-+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
-+ created.
-+
-+config GRKERNSEC_CHROOT_CHDIR
-+ bool "Enforce chdir(\"/\") on all chroots"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, the current working directory of all newly-chrooted
-+ applications will be set to the the root directory of the chroot.
-+ The man page on chroot(2) states:
-+ Note that this call does not change the current working
-+ directory, so that `.' can be outside the tree rooted at
-+ `/'. In particular, the super-user can escape from a
-+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
-+
-+ It is recommended that you say Y here, since it's not known to break
-+ any software. If the sysctl option is enabled, a sysctl option with
-+ name "chroot_enforce_chdir" is created.
-+
-+config GRKERNSEC_CHROOT_CHMOD
-+ bool "Deny (f)chmod +s"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to chmod
-+ or fchmod files to make them have suid or sgid bits. This protects
-+ against another published method of breaking a chroot. If the sysctl
-+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
-+ created.
-+
-+config GRKERNSEC_CHROOT_FCHDIR
-+ bool "Deny fchdir and fhandle out of chroot"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
-+ to a file descriptor of the chrooting process that points to a directory
-+ outside the filesystem will be stopped. Additionally, this option prevents
-+ use of the recently-created syscall for opening files by a guessable "file
-+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
-+ with name "chroot_deny_fchdir" is created.
-+
-+config GRKERNSEC_CHROOT_MKNOD
-+ bool "Deny mknod"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be allowed to
-+ mknod. The problem with using mknod inside a chroot is that it
-+ would allow an attacker to create a device entry that is the same
-+ as one on the physical root of your system, which could range from
-+ anything from the console device to a device for your harddrive (which
-+ they could then use to wipe the drive or steal data). It is recommended
-+ that you say Y here, unless you run into software incompatibilities.
-+ If the sysctl option is enabled, a sysctl option with name
-+ "chroot_deny_mknod" is created.
-+
-+config GRKERNSEC_CHROOT_SHMAT
-+ bool "Deny shmat() out of chroot"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to attach
-+ to shared memory segments that were created outside of the chroot jail.
-+ It is recommended that you say Y here. If the sysctl option is enabled,
-+ a sysctl option with name "chroot_deny_shmat" is created.
-+
-+config GRKERNSEC_CHROOT_UNIX
-+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to
-+ connect to abstract (meaning not belonging to a filesystem) Unix
-+ domain sockets that were bound outside of a chroot. It is recommended
-+ that you say Y here. If the sysctl option is enabled, a sysctl option
-+ with name "chroot_deny_unix" is created.
-+
-+config GRKERNSEC_CHROOT_FINDTASK
-+ bool "Protect outside processes"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to
-+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
-+ getsid, or view any process outside of the chroot. If the sysctl
-+ option is enabled, a sysctl option with name "chroot_findtask" is
-+ created.
-+
-+config GRKERNSEC_CHROOT_NICE
-+ bool "Restrict priority changes"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, processes inside a chroot will not be able to raise
-+ the priority of processes in the chroot, or alter the priority of
-+ processes outside the chroot. This provides more security than simply
-+ removing CAP_SYS_NICE from the process' capability set. If the
-+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
-+ is created.
-+
-+config GRKERNSEC_CHROOT_SYSCTL
-+ bool "Deny sysctl writes"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, an attacker in a chroot will not be able to
-+ write to sysctl entries, either by sysctl(2) or through a /proc
-+ interface. It is strongly recommended that you say Y here. If the
-+ sysctl option is enabled, a sysctl option with name
-+ "chroot_deny_sysctl" is created.
-+
-+config GRKERNSEC_CHROOT_RENAME
-+ bool "Deny bad renames"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, an attacker in a chroot will not be able to
-+ abuse the ability to create double chroots to break out of the
-+ chroot by exploiting a race condition between a rename of a directory
-+ within a chroot against an open of a symlink with relative path
-+ components. This feature will likewise prevent an accomplice outside
-+ a chroot from enabling a user inside the chroot to break out and make
-+ use of their credentials on the global filesystem. Enabling this
-+ feature is essential to prevent root users from breaking out of a
-+ chroot. If the sysctl option is enabled, a sysctl option with name
-+ "chroot_deny_bad_rename" is created.
-+
-+config GRKERNSEC_CHROOT_CAPS
-+ bool "Capability restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT
-+ help
-+ If you say Y here, the capabilities on all processes within a
-+ chroot jail will be lowered to stop module insertion, raw i/o,
-+ system and net admin tasks, rebooting the system, modifying immutable
-+ files, modifying IPC owned by another, and changing the system time.
-+ This is left an option because it can break some apps. Disable this
-+ if your chrooted apps are having problems performing those kinds of
-+ tasks. If the sysctl option is enabled, a sysctl option with
-+ name "chroot_caps" is created.
-+
-+config GRKERNSEC_CHROOT_INITRD
-+ bool "Exempt initrd tasks from restrictions"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
-+ help
-+ If you say Y here, tasks started prior to init will be exempted from
-+ grsecurity's chroot restrictions. This option is mainly meant to
-+ resolve Plymouth's performing privileged operations unnecessarily
-+ in a chroot.
-+
-+endmenu
-+menu "Kernel Auditing"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_AUDIT_GROUP
-+ bool "Single group for auditing"
-+ help
-+ If you say Y here, the exec and chdir logging features will only operate
-+ on a group you specify. This option is recommended if you only want to
-+ watch certain users instead of having a large amount of logs from the
-+ entire system. If the sysctl option is enabled, a sysctl option with
-+ name "audit_group" is created.
-+
-+config GRKERNSEC_AUDIT_GID
-+ int "GID for auditing"
-+ depends on GRKERNSEC_AUDIT_GROUP
-+ default 1007
-+
-+config GRKERNSEC_EXECLOG
-+ bool "Exec logging"
-+ help
-+ If you say Y here, all execve() calls will be logged (since the
-+ other exec*() calls are frontends to execve(), all execution
-+ will be logged). Useful for shell-servers that like to keep track
-+ of their users. If the sysctl option is enabled, a sysctl option with
-+ name "exec_logging" is created.
-+ WARNING: This option when enabled will produce a LOT of logs, especially
-+ on an active system.
-+
-+config GRKERNSEC_RESLOG
-+ bool "Resource logging"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, all attempts to overstep resource limits will
-+ be logged with the resource name, the requested size, and the current
-+ limit. It is highly recommended that you say Y here. If the sysctl
-+ option is enabled, a sysctl option with name "resource_logging" is
-+ created. If the RBAC system is enabled, the sysctl value is ignored.
-+
-+config GRKERNSEC_CHROOT_EXECLOG
-+ bool "Log execs within chroot"
-+ help
-+ If you say Y here, all executions inside a chroot jail will be logged
-+ to syslog. This can cause a large amount of logs if certain
-+ applications (eg. djb's daemontools) are installed on the system, and
-+ is therefore left as an option. If the sysctl option is enabled, a
-+ sysctl option with name "chroot_execlog" is created.
-+
-+config GRKERNSEC_AUDIT_PTRACE
-+ bool "Ptrace logging"
-+ help
-+ If you say Y here, all attempts to attach to a process via ptrace
-+ will be logged. If the sysctl option is enabled, a sysctl option
-+ with name "audit_ptrace" is created.
-+
-+config GRKERNSEC_AUDIT_CHDIR
-+ bool "Chdir logging"
-+ help
-+ If you say Y here, all chdir() calls will be logged. If the sysctl
-+ option is enabled, a sysctl option with name "audit_chdir" is created.
-+
-+config GRKERNSEC_AUDIT_MOUNT
-+ bool "(Un)Mount logging"
-+ help
-+ If you say Y here, all mounts and unmounts will be logged. If the
-+ sysctl option is enabled, a sysctl option with name "audit_mount" is
-+ created.
-+
-+config GRKERNSEC_SIGNAL
-+ bool "Signal logging"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, certain important signals will be logged, such as
-+ SIGSEGV, which will as a result inform you of when a error in a program
-+ occurred, which in some cases could mean a possible exploit attempt.
-+ If the sysctl option is enabled, a sysctl option with name
-+ "signal_logging" is created.
-+
-+config GRKERNSEC_FORKFAIL
-+ bool "Fork failure logging"
-+ help
-+ If you say Y here, all failed fork() attempts will be logged.
-+ This could suggest a fork bomb, or someone attempting to overstep
-+ their process limit. If the sysctl option is enabled, a sysctl option
-+ with name "forkfail_logging" is created.
-+
-+config GRKERNSEC_TIME
-+ bool "Time change logging"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, any changes of the system clock will be logged.
-+ If the sysctl option is enabled, a sysctl option with name
-+ "timechange_logging" is created.
-+
-+config GRKERNSEC_PROC_IPADDR
-+ bool "/proc/<pid>/ipaddr support"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, a new entry will be added to each /proc/<pid>
-+ directory that contains the IP address of the person using the task.
-+ The IP is carried across local TCP and AF_UNIX stream sockets.
-+ This information can be useful for IDS/IPSes to perform remote response
-+ to a local attack. The entry is readable by only the owner of the
-+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
-+ the RBAC system), and thus does not create privacy concerns.
-+
-+config GRKERNSEC_RWXMAP_LOG
-+ bool 'Denied RWX mmap/mprotect logging'
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
-+ help
-+ If you say Y here, calls to mmap() and mprotect() with explicit
-+ usage of PROT_WRITE and PROT_EXEC together will be logged when
-+ denied by the PAX_MPROTECT feature. This feature will also
-+ log other problematic scenarios that can occur when PAX_MPROTECT
-+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
-+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
-+ is created.
-+
-+endmenu
-+
-+menu "Executable Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_DMESG
-+ bool "Dmesg(8) restriction"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, non-root users will not be able to use dmesg(8)
-+ to view the contents of the kernel's circular log buffer.
-+ The kernel's log buffer often contains kernel addresses and other
-+ identifying information useful to an attacker in fingerprinting a
-+ system for a targeted exploit.
-+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
-+ created.
-+
-+config GRKERNSEC_HARDEN_PTRACE
-+ bool "Deter ptrace-based process snooping"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, TTY sniffers and other malicious monitoring
-+ programs implemented through ptrace will be defeated. If you
-+ have been using the RBAC system, this option has already been
-+ enabled for several years for all users, with the ability to make
-+ fine-grained exceptions.
-+
-+ This option only affects the ability of non-root users to ptrace
-+ processes that are not a descendent of the ptracing process.
-+ This means that strace ./binary and gdb ./binary will still work,
-+ but attaching to arbitrary processes will not. If the sysctl
-+ option is enabled, a sysctl option with name "harden_ptrace" is
-+ created.
-+
-+config GRKERNSEC_PTRACE_READEXEC
-+ bool "Require read access to ptrace sensitive binaries"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, unprivileged users will not be able to ptrace unreadable
-+ binaries. This option is useful in environments that
-+ remove the read bits (e.g. file mode 4711) from suid binaries to
-+ prevent infoleaking of their contents. This option adds
-+ consistency to the use of that file mode, as the binary could normally
-+ be read out when run without privileges while ptracing.
-+
-+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
-+ is created.
-+
-+config GRKERNSEC_SETXID
-+ bool "Enforce consistent multithreaded privileges"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
-+ help
-+ If you say Y here, a change from a root uid to a non-root uid
-+ in a multithreaded application will cause the resulting uids,
-+ gids, supplementary groups, and capabilities in that thread
-+ to be propagated to the other threads of the process. In most
-+ cases this is unnecessary, as glibc will emulate this behavior
-+ on behalf of the application. Other libcs do not act in the
-+ same way, allowing the other threads of the process to continue
-+ running with root privileges. If the sysctl option is enabled,
-+ a sysctl option with name "consistent_setxid" is created.
-+
-+config GRKERNSEC_HARDEN_IPC
-+ bool "Disallow access to overly-permissive IPC objects"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on SYSVIPC
-+ help
-+ If you say Y here, access to overly-permissive IPC objects (shared
-+ memory, message queues, and semaphores) will be denied for processes
-+ given the following criteria beyond normal permission checks:
-+ 1) If the IPC object is world-accessible and the euid doesn't match
-+ that of the creator or current uid for the IPC object
-+ 2) If the IPC object is group-accessible and the egid doesn't
-+ match that of the creator or current gid for the IPC object
-+ It's a common error to grant too much permission to these objects,
-+ with impact ranging from denial of service and information leaking to
-+ privilege escalation. This feature was developed in response to
-+ research by Tim Brown:
-+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
-+ who found hundreds of such insecure usages. Processes with
-+ CAP_IPC_OWNER are still permitted to access these IPC objects.
-+ If the sysctl option is enabled, a sysctl option with name
-+ "harden_ipc" is created.
-+
-+config GRKERNSEC_TPE
-+ bool "Trusted Path Execution (TPE)"
-+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
-+ help
-+ If you say Y here, you will be able to choose a gid to add to the
-+ supplementary groups of users you want to mark as "untrusted."
-+ These users will not be able to execute any files that are not in
-+ root-owned directories writable only by root. If the sysctl option
-+ is enabled, a sysctl option with name "tpe" is created.
-+
-+config GRKERNSEC_TPE_ALL
-+ bool "Partially restrict all non-root users"
-+ depends on GRKERNSEC_TPE
-+ help
-+ If you say Y here, all non-root users will be covered under
-+ a weaker TPE restriction. This is separate from, and in addition to,
-+ the main TPE options that you have selected elsewhere. Thus, if a
-+ "trusted" GID is chosen, this restriction applies to even that GID.
-+ Under this restriction, all non-root users will only be allowed to
-+ execute files in directories they own that are not group or
-+ world-writable, or in directories owned by root and writable only by
-+ root. If the sysctl option is enabled, a sysctl option with name
-+ "tpe_restrict_all" is created.
-+
-+config GRKERNSEC_TPE_INVERT
-+ bool "Invert GID option"
-+ depends on GRKERNSEC_TPE
-+ help
-+ If you say Y here, the group you specify in the TPE configuration will
-+ decide what group TPE restrictions will be *disabled* for. This
-+ option is useful if you want TPE restrictions to be applied to most
-+ users on the system. If the sysctl option is enabled, a sysctl option
-+ with name "tpe_invert" is created. Unlike other sysctl options, this
-+ entry will default to on for backward-compatibility.
-+
-+config GRKERNSEC_TPE_GID
-+ int
-+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
-+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
-+
-+config GRKERNSEC_TPE_UNTRUSTED_GID
-+ int "GID for TPE-untrusted users"
-+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
-+ default 1005
-+ help
-+ Setting this GID determines what group TPE restrictions will be
-+ *enabled* for. If the sysctl option is enabled, a sysctl option
-+ with name "tpe_gid" is created.
-+
-+config GRKERNSEC_TPE_TRUSTED_GID
-+ int "GID for TPE-trusted users"
-+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
-+ default 1005
-+ help
-+ Setting this GID determines what group TPE restrictions will be
-+ *disabled* for. If the sysctl option is enabled, a sysctl option
-+ with name "tpe_gid" is created.
-+
-+endmenu
-+menu "Network Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_BLACKHOLE
-+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on NET
-+ help
-+ If you say Y here, neither TCP resets nor ICMP
-+ destination-unreachable packets will be sent in response to packets
-+ sent to ports for which no associated listening process exists.
-+ It will also prevent the sending of ICMP protocol unreachable packets
-+ in response to packets with unknown protocols.
-+ This feature supports both IPV4 and IPV6 and exempts the
-+ loopback interface from blackholing. Enabling this feature
-+ makes a host more resilient to DoS attacks and reduces network
-+ visibility against scanners.
-+
-+ The blackhole feature as-implemented is equivalent to the FreeBSD
-+ blackhole feature, as it prevents RST responses to all packets, not
-+ just SYNs. Under most application behavior this causes no
-+ problems, but applications (like haproxy) may not close certain
-+ connections in a way that cleanly terminates them on the remote
-+ end, leaving the remote host in LAST_ACK state. Because of this
-+ side-effect and to prevent intentional LAST_ACK DoSes, this
-+ feature also adds automatic mitigation against such attacks.
-+ The mitigation drastically reduces the amount of time a socket
-+ can spend in LAST_ACK state. If you're using haproxy and not
-+ all servers it connects to have this option enabled, consider
-+ disabling this feature on the haproxy host.
-+
-+ If the sysctl option is enabled, two sysctl options with names
-+ "ip_blackhole" and "lastack_retries" will be created.
-+ While "ip_blackhole" takes the standard zero/non-zero on/off
-+ toggle, "lastack_retries" uses the same kinds of values as
-+ "tcp_retries1" and "tcp_retries2". The default value of 4
-+ prevents a socket from lasting more than 45 seconds in LAST_ACK
-+ state.
-+
-+config GRKERNSEC_NO_SIMULT_CONNECT
-+ bool "Disable TCP Simultaneous Connect"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on NET
-+ help
-+ If you say Y here, a feature by Willy Tarreau will be enabled that
-+ removes a weakness in Linux's strict implementation of TCP that
-+ allows two clients to connect to each other without either entering
-+ a listening state. The weakness allows an attacker to easily prevent
-+ a client from connecting to a known server provided the source port
-+ for the connection is guessed correctly.
-+
-+ As the weakness could be used to prevent an antivirus or IPS from
-+ fetching updates, or prevent an SSL gateway from fetching a CRL,
-+ it should be eliminated by enabling this option. Though Linux is
-+ one of few operating systems supporting simultaneous connect, it
-+ has no legitimate use in practice and is rarely supported by firewalls.
-+
-+config GRKERNSEC_SOCKET
-+ bool "Socket restrictions"
-+ depends on NET
-+ help
-+ If you say Y here, you will be able to choose from several options.
-+ If you assign a GID on your system and add it to the supplementary
-+ groups of users you want to restrict socket access to, this patch
-+ will perform up to three things, based on the option(s) you choose.
-+
-+config GRKERNSEC_SOCKET_ALL
-+ bool "Deny any sockets to group"
-+ depends on GRKERNSEC_SOCKET
-+ help
-+ If you say Y here, you will be able to choose a GID of whose users will
-+ be unable to connect to other hosts from your machine or run server
-+ applications from your machine. If the sysctl option is enabled, a
-+ sysctl option with name "socket_all" is created.
-+
-+config GRKERNSEC_SOCKET_ALL_GID
-+ int "GID to deny all sockets for"
-+ depends on GRKERNSEC_SOCKET_ALL
-+ default 1004
-+ help
-+ Here you can choose the GID to disable socket access for. Remember to
-+ add the users you want socket access disabled for to the GID
-+ specified here. If the sysctl option is enabled, a sysctl option
-+ with name "socket_all_gid" is created.
-+
-+config GRKERNSEC_SOCKET_CLIENT
-+ bool "Deny client sockets to group"
-+ depends on GRKERNSEC_SOCKET
-+ help
-+ If you say Y here, you will be able to choose a GID of whose users will
-+ be unable to connect to other hosts from your machine, but will be
-+ able to run servers. If this option is enabled, all users in the group
-+ you specify will have to use passive mode when initiating ftp transfers
-+ from the shell on your machine. If the sysctl option is enabled, a
-+ sysctl option with name "socket_client" is created.
-+
-+config GRKERNSEC_SOCKET_CLIENT_GID
-+ int "GID to deny client sockets for"
-+ depends on GRKERNSEC_SOCKET_CLIENT
-+ default 1003
-+ help
-+ Here you can choose the GID to disable client socket access for.
-+ Remember to add the users you want client socket access disabled for to
-+ the GID specified here. If the sysctl option is enabled, a sysctl
-+ option with name "socket_client_gid" is created.
-+
-+config GRKERNSEC_SOCKET_SERVER
-+ bool "Deny server sockets to group"
-+ depends on GRKERNSEC_SOCKET
-+ help
-+ If you say Y here, you will be able to choose a GID of whose users will
-+ be unable to run server applications from your machine. If the sysctl
-+ option is enabled, a sysctl option with name "socket_server" is created.
-+
-+config GRKERNSEC_SOCKET_SERVER_GID
-+ int "GID to deny server sockets for"
-+ depends on GRKERNSEC_SOCKET_SERVER
-+ default 1002
-+ help
-+ Here you can choose the GID to disable server socket access for.
-+ Remember to add the users you want server socket access disabled for to
-+ the GID specified here. If the sysctl option is enabled, a sysctl
-+ option with name "socket_server_gid" is created.
-+
-+endmenu
-+
-+menu "Physical Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_DENYUSB
-+ bool "Deny new USB connections after toggle"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on SYSCTL && USB_SUPPORT
-+ help
-+ If you say Y here, a new sysctl option with name "deny_new_usb"
-+ will be created. Setting its value to 1 will prevent any new
-+ USB devices from being recognized by the OS. Any attempted USB
-+ device insertion will be logged. This option is intended to be
-+ used against custom USB devices designed to exploit vulnerabilities
-+ in various USB device drivers.
-+
-+ For greatest effectiveness, this sysctl should be set after any
-+ relevant init scripts. This option is safe to enable in distros
-+ as each user can choose whether or not to toggle the sysctl.
-+
-+config GRKERNSEC_DENYUSB_FORCE
-+ bool "Reject all USB devices not connected at boot"
-+ select USB
-+ depends on GRKERNSEC_DENYUSB
-+ help
-+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
-+ that doesn't involve a sysctl entry. This option should only be
-+ enabled if you're sure you want to deny all new USB connections
-+ at runtime and don't want to modify init scripts. This should not
-+ be enabled by distros. It forces the core USB code to be built
-+ into the kernel image so that all devices connected at boot time
-+ can be recognized and new USB device connections can be prevented
-+ prior to init running.
-+
-+endmenu
-+
-+menu "Sysctl Support"
-+depends on GRKERNSEC && SYSCTL
-+
-+config GRKERNSEC_SYSCTL
-+ bool "Sysctl support"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ If you say Y here, you will be able to change the options that
-+ grsecurity runs with at bootup, without having to recompile your
-+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
-+ to enable (1) or disable (0) various features. All the sysctl entries
-+ are mutable until the "grsec_lock" entry is set to a non-zero value.
-+ All features enabled in the kernel configuration are disabled at boot
-+ if you do not say Y to the "Turn on features by default" option.
-+ All options should be set at startup, and the grsec_lock entry should
-+ be set to a non-zero value after all the options are set.
-+ *THIS IS EXTREMELY IMPORTANT*
-+
-+config GRKERNSEC_SYSCTL_DISTRO
-+ bool "Extra sysctl support for distro makers (READ HELP)"
-+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
-+ help
-+ If you say Y here, additional sysctl options will be created
-+ for features that affect processes running as root. Therefore,
-+ it is critical when using this option that the grsec_lock entry be
-+ enabled after boot. Only distros with prebuilt kernel packages
-+ with this option enabled that can ensure grsec_lock is enabled
-+ after boot should use this option.
-+ *Failure to set grsec_lock after boot makes all grsec features
-+ this option covers useless*
-+
-+ Currently this option creates the following sysctl entries:
-+ "Disable Privileged I/O": "disable_priv_io"
-+
-+config GRKERNSEC_SYSCTL_ON
-+ bool "Turn on features by default"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC_SYSCTL
-+ help
-+ If you say Y here, instead of having all features enabled in the
-+ kernel configuration disabled at boot time, the features will be
-+ enabled at boot time. It is recommended you say Y here unless
-+ there is some reason you would want all sysctl-tunable features to
-+ be disabled by default. As mentioned elsewhere, it is important
-+ to enable the grsec_lock entry once you have finished modifying
-+ the sysctl entries.
-+
-+endmenu
-+menu "Logging Options"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_FLOODTIME
-+ int "Seconds in between log messages (minimum)"
-+ default 10
-+ help
-+ This option allows you to enforce the number of seconds between
-+ grsecurity log messages. The default should be suitable for most
-+ people, however, if you choose to change it, choose a value small enough
-+ to allow informative logs to be produced, but large enough to
-+ prevent flooding.
-+
-+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
-+ any rate limiting on grsecurity log messages.
-+
-+config GRKERNSEC_FLOODBURST
-+ int "Number of messages in a burst (maximum)"
-+ default 6
-+ help
-+ This option allows you to choose the maximum number of messages allowed
-+ within the flood time interval you chose in a separate option. The
-+ default should be suitable for most people, however if you find that
-+ many of your logs are being interpreted as flooding, you may want to
-+ raise this value.
-+
-+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
-+ any rate limiting on grsecurity log messages.
-+
-+endmenu
-diff --git a/grsecurity/Makefile b/grsecurity/Makefile
-new file mode 100644
-index 0000000..30ababb
---- /dev/null
-+++ b/grsecurity/Makefile
-@@ -0,0 +1,54 @@
-+# grsecurity – access control and security hardening for Linux
-+# All code in this directory and various hooks located throughout the Linux kernel are
-+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
-+# http://www.grsecurity.net spender@grsecurity.net
-+#
-+# This program is free software; you can redistribute it and/or
-+# modify it under the terms of the GNU General Public License version 2
-+# as published by the Free Software Foundation.
-+#
-+# This program is distributed in the hope that it will be useful,
-+# but WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+# GNU General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program; if not, write to the Free Software
-+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-+
-+KBUILD_CFLAGS += -Werror
-+
-+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
-+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
-+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
-+ grsec_usb.o grsec_ipc.o grsec_proc.o
-+
-+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
-+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
-+ gracl_learn.o grsec_log.o gracl_policy.o
-+ifdef CONFIG_COMPAT
-+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
-+endif
-+
-+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
-+
-+ifdef CONFIG_NET
-+obj-y += grsec_sock.o
-+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
-+endif
-+
-+ifndef CONFIG_GRKERNSEC
-+obj-y += grsec_disabled.o
-+endif
-+
-+ifdef CONFIG_GRKERNSEC_HIDESYM
-+extra-y := grsec_hidesym.o
-+$(obj)/grsec_hidesym.o:
-+ @-chmod -f 500 /boot
-+ @-chmod -f 500 /lib/modules
-+ @-chmod -f 500 /lib64/modules
-+ @-chmod -f 500 /lib32/modules
-+ @-chmod -f 700 .
-+ @-chmod -f 700 $(objtree)
-+ @echo ' grsec: protected kernel image paths'
-+endif
-diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
-new file mode 100644
-index 0000000..18911e9
---- /dev/null
-+++ b/grsecurity/gracl.c
-@@ -0,0 +1,2873 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/mount.h>
-+#include <linux/tty.h>
-+#include <linux/proc_fs.h>
-+#include <linux/lglock.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/types.h>
-+#include <linux/sysctl.h>
-+#include <linux/netdevice.h>
-+#include <linux/ptrace.h>
-+#include <linux/gracl.h>
-+#include <linux/gralloc.h>
-+#include <linux/security.h>
-+#include <linux/grinternal.h>
-+#include <linux/pid_namespace.h>
-+#include <linux/stop_machine.h>
-+#include <linux/fdtable.h>
-+#include <linux/percpu.h>
-+#include <linux/posix-timers.h>
-+#include <linux/prefetch.h>
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+#include <linux/magic.h>
-+#include <linux/pagemap.h>
-+#include "../fs/btrfs/async-thread.h"
-+#include "../fs/btrfs/ctree.h"
-+#include "../fs/btrfs/btrfs_inode.h"
-+#endif
-+
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <asm/mman.h>
-+
-+#define FOR_EACH_ROLE_START(role) \
-+ role = running_polstate.role_list; \
-+ while (role) {
-+
-+#define FOR_EACH_ROLE_END(role) \
-+ role = role->prev; \
-+ }
-+
-+extern struct path gr_real_root;
-+
-+static struct gr_policy_state running_polstate;
-+struct gr_policy_state *polstate = &running_polstate;
-+extern struct gr_alloc_state *current_alloc_state;
-+
-+extern char *gr_shared_page[4];
-+static DEFINE_MUTEX(gr_dev_mutex);
-+DEFINE_RWLOCK(gr_inode_lock);
-+
-+static unsigned int gr_status __read_only = GR_STATUS_INIT;
-+
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+extern void gr_log_resource(const struct task_struct *task,
-+ const int res, const unsigned long wanted, const int gt);
-+#endif
-+
-+#ifdef CONFIG_NET
-+extern struct vfsmount *sock_mnt;
-+#endif
-+
-+extern struct vfsmount *pipe_mnt;
-+extern struct vfsmount *shm_mnt;
-+#ifdef CONFIG_HUGETLBFS
-+extern struct vfsmount *hugetlbfs_vfsmount;
-+#endif
-+
-+DECLARE_BRLOCK(vfsmount_lock);
-+
-+extern u16 acl_sp_role_value;
-+extern struct acl_object_label *fakefs_obj_rw;
-+extern struct acl_object_label *fakefs_obj_rwx;
-+
-+int gr_acl_is_enabled(void)
-+{
-+ return (gr_status & GR_READY);
-+}
-+
-+void gr_enable_rbac_system(void)
-+{
-+ pax_open_kernel();
-+ gr_status |= GR_READY;
-+ pax_close_kernel();
-+}
-+
-+int gr_rbac_disable(void *unused)
-+{
-+ pax_open_kernel();
-+ gr_status &= ~GR_READY;
-+ pax_close_kernel();
-+
-+ return 0;
-+}
-+
-+static inline dev_t __get_dev(const struct dentry *dentry)
-+{
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
-+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
-+ else
-+#endif
-+ return dentry->d_sb->s_dev;
-+}
-+
-+static inline u64 __get_ino(const struct dentry *dentry)
-+{
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
-+ return btrfs_ino(dentry->d_inode);
-+ else
-+#endif
-+ return dentry->d_inode->i_ino;
-+}
-+
-+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
-+{
-+ return __get_dev(dentry);
-+}
-+
-+u64 gr_get_ino_from_dentry(struct dentry *dentry)
-+{
-+ return __get_ino(dentry);
-+}
-+
-+static char gr_task_roletype_to_char(struct task_struct *task)
-+{
-+ switch (task->role->roletype &
-+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
-+ GR_ROLE_SPECIAL)) {
-+ case GR_ROLE_DEFAULT:
-+ return 'D';
-+ case GR_ROLE_USER:
-+ return 'U';
-+ case GR_ROLE_GROUP:
-+ return 'G';
-+ case GR_ROLE_SPECIAL:
-+ return 'S';
-+ }
-+
-+ return 'X';
-+}
-+
-+char gr_roletype_to_char(void)
-+{
-+ return gr_task_roletype_to_char(current);
-+}
-+
-+int
-+gr_acl_tpe_check(void)
-+{
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+ if (current->role->roletype & GR_ROLE_TPE)
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+int
-+gr_handle_rawio(const struct inode *inode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
-+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
-+ !capable(CAP_SYS_RAWIO))
-+ return 1;
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
-+{
-+ if (likely(lena != lenb))
-+ return 0;
-+
-+ return !memcmp(a, b, lena);
-+}
-+
-+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
-+{
-+ *buflen -= namelen;
-+ if (*buflen < 0)
-+ return -ENAMETOOLONG;
-+ *buffer -= namelen;
-+ memcpy(*buffer, str, namelen);
-+ return 0;
-+}
-+
-+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
-+{
-+ return prepend(buffer, buflen, name->name, name->len);
-+}
-+
-+static int prepend_path(const struct path *path, struct path *root,
-+ char **buffer, int *buflen)
-+{
-+ struct dentry *dentry = path->dentry;
-+ struct vfsmount *vfsmnt = path->mnt;
-+ bool slash = false;
-+ int error = 0;
-+
-+ while (dentry != root->dentry || vfsmnt != root->mnt) {
-+ struct dentry * parent;
-+
-+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
-+ /* Global root? */
-+ if (vfsmnt->mnt_parent == vfsmnt) {
-+ goto out;
-+ }
-+ dentry = vfsmnt->mnt_mountpoint;
-+ vfsmnt = vfsmnt->mnt_parent;
-+ continue;
-+ }
-+ parent = dentry->d_parent;
-+ prefetch(parent);
-+ spin_lock(&dentry->d_lock);
-+ error = prepend_name(buffer, buflen, &dentry->d_name);
-+ spin_unlock(&dentry->d_lock);
-+ if (!error)
-+ error = prepend(buffer, buflen, "/", 1);
-+ if (error)
-+ break;
-+
-+ slash = true;
-+ dentry = parent;
-+ }
-+
-+out:
-+ if (!error && !slash)
-+ error = prepend(buffer, buflen, "/", 1);
-+
-+ return error;
-+}
-+
-+/* this must be called with vfsmount_lock and rename_lock held */
-+
-+static char *__our_d_path(const struct path *path, struct path *root,
-+ char *buf, int buflen)
-+{
-+ char *res = buf + buflen;
-+ int error;
-+
-+ prepend(&res, &buflen, "\0", 1);
-+ error = prepend_path(path, root, &res, &buflen);
-+ if (error)
-+ return ERR_PTR(error);
-+
-+ return res;
-+}
-+
-+static char *
-+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
-+{
-+ char *retval;
-+
-+ retval = __our_d_path(path, root, buf, buflen);
-+ if (unlikely(IS_ERR(retval)))
-+ retval = strcpy(buf, "<path too long>");
-+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
-+ retval[1] = '\0';
-+
-+ return retval;
-+}
-+
-+static char *
-+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
-+ char *buf, int buflen)
-+{
-+ struct path path;
-+ char *res;
-+
-+ path.dentry = (struct dentry *)dentry;
-+ path.mnt = (struct vfsmount *)vfsmnt;
-+
-+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
-+ by the RBAC system */
-+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
-+
-+ return res;
-+}
-+
-+static char *
-+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
-+ char *buf, int buflen)
-+{
-+ char *res;
-+ struct path path;
-+ struct path root;
-+ struct task_struct *reaper = &init_task;
-+
-+ path.dentry = (struct dentry *)dentry;
-+ path.mnt = (struct vfsmount *)vfsmnt;
-+
-+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
-+ get_fs_root(reaper->fs, &root);
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+ res = gen_full_path(&path, &root, buf, buflen);
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+
-+ path_put(&root);
-+ return res;
-+}
-+
-+char *
-+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ char *ret;
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
-+ PAGE_SIZE);
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+ return ret;
-+}
-+
-+static char *
-+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ char *ret;
-+ char *buf;
-+ int buflen;
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
-+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
-+ buflen = (int)(ret - buf);
-+ if (buflen >= 5)
-+ prepend(&ret, &buflen, "/proc", 5);
-+ else
-+ ret = strcpy(buf, "<path too long>");
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+ return ret;
-+}
-+
-+char *
-+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
-+ PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
-+ PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
-+ PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
-+ PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
-+ PAGE_SIZE);
-+}
-+
-+__u32
-+to_gr_audit(const __u32 reqmode)
-+{
-+ /* masks off auditable permission flags, then shifts them to create
-+ auditing flags, and adds the special case of append auditing if
-+ we're requesting write */
-+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
-+}
-+
-+struct acl_role_label *
-+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
-+ const gid_t gid)
-+{
-+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
-+ struct acl_role_label *match;
-+ struct role_allowed_ip *ipp;
-+ unsigned int x;
-+ u32 curr_ip = task->signal->saved_ip;
-+
-+ match = state->acl_role_set.r_hash[index];
-+
-+ while (match) {
-+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
-+ for (x = 0; x < match->domain_child_num; x++) {
-+ if (match->domain_children[x] == uid)
-+ goto found;
-+ }
-+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
-+ break;
-+ match = match->next;
-+ }
-+found:
-+ if (match == NULL) {
-+ try_group:
-+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
-+ match = state->acl_role_set.r_hash[index];
-+
-+ while (match) {
-+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
-+ for (x = 0; x < match->domain_child_num; x++) {
-+ if (match->domain_children[x] == gid)
-+ goto found2;
-+ }
-+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
-+ break;
-+ match = match->next;
-+ }
-+found2:
-+ if (match == NULL)
-+ match = state->default_role;
-+ if (match->allowed_ips == NULL)
-+ return match;
-+ else {
-+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
-+ if (likely
-+ ((ntohl(curr_ip) & ipp->netmask) ==
-+ (ntohl(ipp->addr) & ipp->netmask)))
-+ return match;
-+ }
-+ match = state->default_role;
-+ }
-+ } else if (match->allowed_ips == NULL) {
-+ return match;
-+ } else {
-+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
-+ if (likely
-+ ((ntohl(curr_ip) & ipp->netmask) ==
-+ (ntohl(ipp->addr) & ipp->netmask)))
-+ return match;
-+ }
-+ goto try_group;
-+ }
-+
-+ return match;
-+}
-+
-+static struct acl_role_label *
-+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
-+ const gid_t gid)
-+{
-+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
-+}
-+
-+struct acl_subject_label *
-+lookup_acl_subj_label(const u64 ino, const dev_t dev,
-+ const struct acl_role_label *role)
-+{
-+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
-+ struct acl_subject_label *match;
-+
-+ match = role->subj_hash[index];
-+
-+ while (match && (match->inode != ino || match->device != dev ||
-+ (match->mode & GR_DELETED))) {
-+ match = match->next;
-+ }
-+
-+ if (match && !(match->mode & GR_DELETED))
-+ return match;
-+ else
-+ return NULL;
-+}
-+
-+struct acl_subject_label *
-+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
-+ const struct acl_role_label *role)
-+{
-+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
-+ struct acl_subject_label *match;
-+
-+ match = role->subj_hash[index];
-+
-+ while (match && (match->inode != ino || match->device != dev ||
-+ !(match->mode & GR_DELETED))) {
-+ match = match->next;
-+ }
-+
-+ if (match && (match->mode & GR_DELETED))
-+ return match;
-+ else
-+ return NULL;
-+}
-+
-+static struct acl_object_label *
-+lookup_acl_obj_label(const u64 ino, const dev_t dev,
-+ const struct acl_subject_label *subj)
-+{
-+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
-+ struct acl_object_label *match;
-+
-+ match = subj->obj_hash[index];
-+
-+ while (match && (match->inode != ino || match->device != dev ||
-+ (match->mode & GR_DELETED))) {
-+ match = match->next;
-+ }
-+
-+ if (match && !(match->mode & GR_DELETED))
-+ return match;
-+ else
-+ return NULL;
-+}
-+
-+static struct acl_object_label *
-+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
-+ const struct acl_subject_label *subj)
-+{
-+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
-+ struct acl_object_label *match;
-+
-+ match = subj->obj_hash[index];
-+
-+ while (match && (match->inode != ino || match->device != dev ||
-+ !(match->mode & GR_DELETED))) {
-+ match = match->next;
-+ }
-+
-+ if (match && (match->mode & GR_DELETED))
-+ return match;
-+
-+ match = subj->obj_hash[index];
-+
-+ while (match && (match->inode != ino || match->device != dev ||
-+ (match->mode & GR_DELETED))) {
-+ match = match->next;
-+ }
-+
-+ if (match && !(match->mode & GR_DELETED))
-+ return match;
-+ else
-+ return NULL;
-+}
-+
-+struct name_entry *
-+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
-+{
-+ unsigned int len = strlen(name);
-+ unsigned int key = full_name_hash(name, len);
-+ unsigned int index = key % state->name_set.n_size;
-+ struct name_entry *match;
-+
-+ match = state->name_set.n_hash[index];
-+
-+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
-+ match = match->next;
-+
-+ return match;
-+}
-+
-+static struct name_entry *
-+lookup_name_entry(const char *name)
-+{
-+ return __lookup_name_entry(&running_polstate, name);
-+}
-+
-+static struct name_entry *
-+lookup_name_entry_create(const char *name)
-+{
-+ unsigned int len = strlen(name);
-+ unsigned int key = full_name_hash(name, len);
-+ unsigned int index = key % running_polstate.name_set.n_size;
-+ struct name_entry *match;
-+
-+ match = running_polstate.name_set.n_hash[index];
-+
-+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
-+ !match->deleted))
-+ match = match->next;
-+
-+ if (match && match->deleted)
-+ return match;
-+
-+ match = running_polstate.name_set.n_hash[index];
-+
-+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
-+ match->deleted))
-+ match = match->next;
-+
-+ if (match && !match->deleted)
-+ return match;
-+ else
-+ return NULL;
-+}
-+
-+static struct inodev_entry *
-+lookup_inodev_entry(const u64 ino, const dev_t dev)
-+{
-+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
-+ struct inodev_entry *match;
-+
-+ match = running_polstate.inodev_set.i_hash[index];
-+
-+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
-+ match = match->next;
-+
-+ return match;
-+}
-+
-+void
-+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
-+{
-+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
-+ state->inodev_set.i_size);
-+ struct inodev_entry **curr;
-+
-+ entry->prev = NULL;
-+
-+ curr = &state->inodev_set.i_hash[index];
-+ if (*curr != NULL)
-+ (*curr)->prev = entry;
-+
-+ entry->next = *curr;
-+ *curr = entry;
-+
-+ return;
-+}
-+
-+static void
-+insert_inodev_entry(struct inodev_entry *entry)
-+{
-+ __insert_inodev_entry(&running_polstate, entry);
-+}
-+
-+void
-+insert_acl_obj_label(struct acl_object_label *obj,
-+ struct acl_subject_label *subj)
-+{
-+ unsigned int index =
-+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
-+ struct acl_object_label **curr;
-+
-+ obj->prev = NULL;
-+
-+ curr = &subj->obj_hash[index];
-+ if (*curr != NULL)
-+ (*curr)->prev = obj;
-+
-+ obj->next = *curr;
-+ *curr = obj;
-+
-+ return;
-+}
-+
-+void
-+insert_acl_subj_label(struct acl_subject_label *obj,
-+ struct acl_role_label *role)
-+{
-+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
-+ struct acl_subject_label **curr;
-+
-+ obj->prev = NULL;
-+
-+ curr = &role->subj_hash[index];
-+ if (*curr != NULL)
-+ (*curr)->prev = obj;
-+
-+ obj->next = *curr;
-+ *curr = obj;
-+
-+ return;
-+}
-+
-+/* derived from glibc fnmatch() 0: match, 1: no match*/
-+
-+static int
-+glob_match(const char *p, const char *n)
-+{
-+ char c;
-+
-+ while ((c = *p++) != '\0') {
-+ switch (c) {
-+ case '?':
-+ if (*n == '\0')
-+ return 1;
-+ else if (*n == '/')
-+ return 1;
-+ break;
-+ case '\\':
-+ if (*n != c)
-+ return 1;
-+ break;
-+ case '*':
-+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
-+ if (*n == '/')
-+ return 1;
-+ else if (c == '?') {
-+ if (*n == '\0')
-+ return 1;
-+ else
-+ ++n;
-+ }
-+ }
-+ if (c == '\0') {
-+ return 0;
-+ } else {
-+ const char *endp;
-+
-+ if ((endp = strchr(n, '/')) == NULL)
-+ endp = n + strlen(n);
-+
-+ if (c == '[') {
-+ for (--p; n < endp; ++n)
-+ if (!glob_match(p, n))
-+ return 0;
-+ } else if (c == '/') {
-+ while (*n != '\0' && *n != '/')
-+ ++n;
-+ if (*n == '/' && !glob_match(p, n + 1))
-+ return 0;
-+ } else {
-+ for (--p; n < endp; ++n)
-+ if (*n == c && !glob_match(p, n))
-+ return 0;
-+ }
-+
-+ return 1;
-+ }
-+ case '[':
-+ {
-+ int not;
-+ char cold;
-+
-+ if (*n == '\0' || *n == '/')
-+ return 1;
-+
-+ not = (*p == '!' || *p == '^');
-+ if (not)
-+ ++p;
-+
-+ c = *p++;
-+ for (;;) {
-+ unsigned char fn = (unsigned char)*n;
-+
-+ if (c == '\0')
-+ return 1;
-+ else {
-+ if (c == fn)
-+ goto matched;
-+ cold = c;
-+ c = *p++;
-+
-+ if (c == '-' && *p != ']') {
-+ unsigned char cend = *p++;
-+
-+ if (cend == '\0')
-+ return 1;
-+
-+ if (cold <= fn && fn <= cend)
-+ goto matched;
-+
-+ c = *p++;
-+ }
-+ }
-+
-+ if (c == ']')
-+ break;
-+ }
-+ if (!not)
-+ return 1;
-+ break;
-+ matched:
-+ while (c != ']') {
-+ if (c == '\0')
-+ return 1;
-+
-+ c = *p++;
-+ }
-+ if (not)
-+ return 1;
-+ }
-+ break;
-+ default:
-+ if (c != *n)
-+ return 1;
-+ }
-+
-+ ++n;
-+ }
-+
-+ if (*n == '\0')
-+ return 0;
-+
-+ if (*n == '/')
-+ return 0;
-+
-+ return 1;
-+}
-+
-+static struct acl_object_label *
-+chk_glob_label(struct acl_object_label *globbed,
-+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
-+{
-+ struct acl_object_label *tmp;
-+
-+ if (*path == NULL)
-+ *path = gr_to_filename_nolock(dentry, mnt);
-+
-+ tmp = globbed;
-+
-+ while (tmp) {
-+ if (!glob_match(tmp->filename, *path))
-+ return tmp;
-+ tmp = tmp->next;
-+ }
-+
-+ return NULL;
-+}
-+
-+static struct acl_object_label *
-+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
-+ const u64 curr_ino, const dev_t curr_dev,
-+ const struct acl_subject_label *subj, char **path, const int checkglob)
-+{
-+ struct acl_subject_label *tmpsubj;
-+ struct acl_object_label *retval;
-+ struct acl_object_label *retval2;
-+
-+ tmpsubj = (struct acl_subject_label *) subj;
-+ read_lock(&gr_inode_lock);
-+ do {
-+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
-+ if (retval) {
-+ if (checkglob && retval->globbed) {
-+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
-+ if (retval2)
-+ retval = retval2;
-+ }
-+ break;
-+ }
-+ } while ((tmpsubj = tmpsubj->parent_subject));
-+ read_unlock(&gr_inode_lock);
-+
-+ return retval;
-+}
-+
-+static struct acl_object_label *
-+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
-+ struct dentry *curr_dentry,
-+ const struct acl_subject_label *subj, char **path, const int checkglob)
-+{
-+ int newglob = checkglob;
-+ u64 inode;
-+ dev_t device;
-+
-+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
-+ as we don't want a / * rule to match instead of the / object
-+ don't do this for create lookups that call this function though, since they're looking up
-+ on the parent and thus need globbing checks on all paths
-+ */
-+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
-+ newglob = GR_NO_GLOB;
-+
-+ spin_lock(&curr_dentry->d_lock);
-+ inode = __get_ino(curr_dentry);
-+ device = __get_dev(curr_dentry);
-+ spin_unlock(&curr_dentry->d_lock);
-+
-+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
-+}
-+
-+static struct acl_object_label *
-+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+ const struct acl_subject_label *subj, char *path, const int checkglob)
-+{
-+ struct dentry *dentry = (struct dentry *) l_dentry;
-+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
-+ struct acl_object_label *retval;
-+ struct dentry *parent;
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+
-+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
-+#ifdef CONFIG_NET
-+ mnt == sock_mnt ||
-+#endif
-+#ifdef CONFIG_HUGETLBFS
-+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
-+#endif
-+ /* ignore Eric Biederman */
-+ IS_PRIVATE(l_dentry->d_inode))) {
-+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
-+ goto out;
-+ }
-+
-+ for (;;) {
-+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
-+ break;
-+
-+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
-+ if (mnt->mnt_parent == mnt)
-+ break;
-+
-+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
-+ if (retval != NULL)
-+ goto out;
-+
-+ dentry = mnt->mnt_mountpoint;
-+ mnt = mnt->mnt_parent;
-+ continue;
-+ }
-+
-+ parent = dentry->d_parent;
-+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
-+ if (retval != NULL)
-+ goto out;
-+
-+ dentry = parent;
-+ }
-+
-+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
-+
-+ /* gr_real_root is pinned so we don't have to hold a reference */
-+ if (retval == NULL)
-+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
-+out:
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+
-+ BUG_ON(retval == NULL);
-+
-+ return retval;
-+}
-+
-+static struct acl_object_label *
-+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+ const struct acl_subject_label *subj)
-+{
-+ char *path = NULL;
-+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
-+}
-+
-+static struct acl_object_label *
-+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+ const struct acl_subject_label *subj)
-+{
-+ char *path = NULL;
-+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
-+}
-+
-+static struct acl_object_label *
-+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+ const struct acl_subject_label *subj, char *path)
-+{
-+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
-+}
-+
-+struct acl_subject_label *
-+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+ const struct acl_role_label *role)
-+{
-+ struct dentry *dentry = (struct dentry *) l_dentry;
-+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
-+ struct acl_subject_label *retval;
-+ struct dentry *parent;
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+
-+ for (;;) {
-+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
-+ break;
-+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
-+ if (mnt->mnt_parent == mnt)
-+ break;
-+
-+ spin_lock(&dentry->d_lock);
-+ read_lock(&gr_inode_lock);
-+ retval =
-+ lookup_acl_subj_label(__get_ino(dentry),
-+ __get_dev(dentry), role);
-+ read_unlock(&gr_inode_lock);
-+ spin_unlock(&dentry->d_lock);
-+ if (retval != NULL)
-+ goto out;
-+
-+ dentry = mnt->mnt_mountpoint;
-+ mnt = mnt->mnt_parent;
-+ continue;
-+ }
-+
-+ spin_lock(&dentry->d_lock);
-+ read_lock(&gr_inode_lock);
-+ retval = lookup_acl_subj_label(__get_ino(dentry),
-+ __get_dev(dentry), role);
-+ read_unlock(&gr_inode_lock);
-+ parent = dentry->d_parent;
-+ spin_unlock(&dentry->d_lock);
-+
-+ if (retval != NULL)
-+ goto out;
-+
-+ dentry = parent;
-+ }
-+
-+ spin_lock(&dentry->d_lock);
-+ read_lock(&gr_inode_lock);
-+ retval = lookup_acl_subj_label(__get_ino(dentry),
-+ __get_dev(dentry), role);
-+ read_unlock(&gr_inode_lock);
-+ spin_unlock(&dentry->d_lock);
-+
-+ if (unlikely(retval == NULL)) {
-+ /* gr_real_root is pinned, we don't need to hold a reference */
-+ read_lock(&gr_inode_lock);
-+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
-+ __get_dev(gr_real_root.dentry), role);
-+ read_unlock(&gr_inode_lock);
-+ }
-+out:
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+
-+ BUG_ON(retval == NULL);
-+
-+ return retval;
-+}
-+
-+void
-+assign_special_role(const char *rolename)
-+{
-+ struct acl_object_label *obj;
-+ struct acl_role_label *r;
-+ struct acl_role_label *assigned = NULL;
-+ struct task_struct *tsk;
-+ struct file *filp;
-+
-+ FOR_EACH_ROLE_START(r)
-+ if (!strcmp(rolename, r->rolename) &&
-+ (r->roletype & GR_ROLE_SPECIAL)) {
-+ assigned = r;
-+ break;
-+ }
-+ FOR_EACH_ROLE_END(r)
-+
-+ if (!assigned)
-+ return;
-+
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+
-+ tsk = current->real_parent;
-+ if (tsk == NULL)
-+ goto out_unlock;
-+
-+ filp = tsk->exec_file;
-+ if (filp == NULL)
-+ goto out_unlock;
-+
-+ tsk->is_writable = 0;
-+ tsk->inherited = 0;
-+
-+ tsk->acl_sp_role = 1;
-+ tsk->acl_role_id = ++acl_sp_role_value;
-+ tsk->role = assigned;
-+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
-+
-+ /* ignore additional mmap checks for processes that are writable
-+ by the default ACL */
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ tsk->is_writable = 1;
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ tsk->is_writable = 1;
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
-+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
-+#endif
-+
-+out_unlock:
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ return;
-+}
-+
-+
-+static void
-+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
-+{
-+ struct task_struct *task = current;
-+ const struct cred *cred = current_cred();
-+
-+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
-+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
-+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
-+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
-+
-+ return;
-+}
-+
-+static void
-+gr_log_learn_sysctl(const char *path, const __u32 mode)
-+{
-+ struct task_struct *task = current;
-+ const struct cred *cred = current_cred();
-+
-+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
-+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
-+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
-+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
-+
-+ return;
-+}
-+
-+static void
-+gr_log_learn_id_change(const char type, const unsigned int real,
-+ const unsigned int effective, const unsigned int fs)
-+{
-+ struct task_struct *task = current;
-+ const struct cred *cred = current_cred();
-+
-+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
-+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
-+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
-+ type, real, effective, fs, &task->signal->saved_ip);
-+
-+ return;
-+}
-+
-+static void
-+gr_set_proc_res(struct task_struct *task)
-+{
-+ struct acl_subject_label *proc;
-+ unsigned short i;
-+
-+ proc = task->acl;
-+
-+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
-+ return;
-+
-+ for (i = 0; i < RLIM_NLIMITS; i++) {
-+ unsigned long rlim_cur, rlim_max;
-+
-+ if (!(proc->resmask & (1U << i)))
-+ continue;
-+
-+ rlim_cur = proc->res[i].rlim_cur;
-+ rlim_max = proc->res[i].rlim_max;
-+
-+ if (i == RLIMIT_NOFILE) {
-+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
-+ if (rlim_cur > saved_sysctl_nr_open)
-+ rlim_cur = saved_sysctl_nr_open;
-+ if (rlim_max > saved_sysctl_nr_open)
-+ rlim_max = saved_sysctl_nr_open;
-+ }
-+
-+ task->signal->rlim[i].rlim_cur = rlim_cur;
-+ task->signal->rlim[i].rlim_max = rlim_max;
-+
-+ if (i == RLIMIT_CPU)
-+ update_rlimit_cpu(task, rlim_cur);
-+ }
-+
-+ return;
-+}
-+
-+/* both of the below must be called with
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
-+*/
-+
-+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
-+{
-+ char *tmpname;
-+ struct acl_subject_label *tmpsubj;
-+ struct file *filp;
-+ struct name_entry *nmatch;
-+
-+ filp = task->exec_file;
-+ if (filp == NULL)
-+ return NULL;
-+
-+ /* the following is to apply the correct subject
-+ on binaries running when the RBAC system
-+ is enabled, when the binaries have been
-+ replaced or deleted since their execution
-+ -----
-+ when the RBAC system starts, the inode/dev
-+ from exec_file will be one the RBAC system
-+ is unaware of. It only knows the inode/dev
-+ of the present file on disk, or the absence
-+ of it.
-+ */
-+
-+ if (filename)
-+ nmatch = __lookup_name_entry(state, filename);
-+ else {
-+ preempt_disable();
-+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
-+
-+ nmatch = __lookup_name_entry(state, tmpname);
-+ preempt_enable();
-+ }
-+ tmpsubj = NULL;
-+ if (nmatch) {
-+ if (nmatch->deleted)
-+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
-+ else
-+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
-+ }
-+ /* this also works for the reload case -- if we don't match a potentially inherited subject
-+ then we fall back to a normal lookup based on the binary's ino/dev
-+ */
-+ if (tmpsubj == NULL && fallback)
-+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
-+
-+ return tmpsubj;
-+}
-+
-+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
-+{
-+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
-+}
-+
-+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
-+{
-+ struct acl_object_label *obj;
-+ struct file *filp;
-+
-+ filp = task->exec_file;
-+
-+ task->acl = subj;
-+ task->is_writable = 0;
-+ /* ignore additional mmap checks for processes that are writable
-+ by the default ACL */
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+
-+ gr_set_proc_res(task);
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
-+#endif
-+}
-+
-+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
-+{
-+ __gr_apply_subject_to_task(&running_polstate, task, subj);
-+}
-+
-+__u32
-+gr_search_file(const struct dentry * dentry, const __u32 mode,
-+ const struct vfsmount * mnt)
-+{
-+ __u32 retval = mode;
-+ struct acl_subject_label *curracl;
-+ struct acl_object_label *currobj;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return (mode & ~GR_AUDITS);
-+
-+ curracl = current->acl;
-+
-+ currobj = chk_obj_label(dentry, mnt, curracl);
-+ retval = currobj->mode & mode;
-+
-+ /* if we're opening a specified transfer file for writing
-+ (e.g. /dev/initctl), then transfer our role to init
-+ */
-+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
-+ current->role->roletype & GR_ROLE_PERSIST)) {
-+ struct task_struct *task = init_pid_ns.child_reaper;
-+
-+ if (task->role != current->role) {
-+ struct acl_subject_label *subj;
-+
-+ task->acl_sp_role = 0;
-+ task->acl_role_id = current->acl_role_id;
-+ task->role = current->role;
-+ rcu_read_lock();
-+ read_lock(&grsec_exec_file_lock);
-+ subj = gr_get_subject_for_task(task, NULL, 1);
-+ gr_apply_subject_to_task(task, subj);
-+ read_unlock(&grsec_exec_file_lock);
-+ rcu_read_unlock();
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
-+ }
-+ }
-+
-+ if (unlikely
-+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
-+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
-+ __u32 new_mode = mode;
-+
-+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+ retval = new_mode;
-+
-+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
-+ new_mode |= GR_INHERIT;
-+
-+ if (!(mode & GR_NOLEARN))
-+ gr_log_learn(dentry, mnt, new_mode);
-+ }
-+
-+ return retval;
-+}
-+
-+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
-+ const struct dentry *parent,
-+ const struct vfsmount *mnt)
-+{
-+ struct name_entry *match;
-+ struct acl_object_label *matchpo;
-+ struct acl_subject_label *curracl;
-+ char *path;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return NULL;
-+
-+ preempt_disable();
-+ path = gr_to_filename_rbac(new_dentry, mnt);
-+ match = lookup_name_entry_create(path);
-+
-+ curracl = current->acl;
-+
-+ if (match) {
-+ read_lock(&gr_inode_lock);
-+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
-+ read_unlock(&gr_inode_lock);
-+
-+ if (matchpo) {
-+ preempt_enable();
-+ return matchpo;
-+ }
-+ }
-+
-+ // lookup parent
-+
-+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
-+
-+ preempt_enable();
-+ return matchpo;
-+}
-+
-+__u32
-+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
-+ const struct vfsmount * mnt, const __u32 mode)
-+{
-+ struct acl_object_label *matchpo;
-+ __u32 retval;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return (mode & ~GR_AUDITS);
-+
-+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
-+
-+ retval = matchpo->mode & mode;
-+
-+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
-+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
-+ __u32 new_mode = mode;
-+
-+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+ gr_log_learn(new_dentry, mnt, new_mode);
-+ return new_mode;
-+ }
-+
-+ return retval;
-+}
-+
-+__u32
-+gr_check_link(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt,
-+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
-+{
-+ struct acl_object_label *obj;
-+ __u32 oldmode, newmode;
-+ __u32 needmode;
-+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
-+ GR_DELETE | GR_INHERIT;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return (GR_CREATE | GR_LINK);
-+
-+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
-+ oldmode = obj->mode;
-+
-+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
-+ newmode = obj->mode;
-+
-+ needmode = newmode & checkmodes;
-+
-+ // old name for hardlink must have at least the permissions of the new name
-+ if ((oldmode & needmode) != needmode)
-+ goto bad;
-+
-+ // if old name had restrictions/auditing, make sure the new name does as well
-+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
-+
-+ // don't allow hardlinking of suid/sgid/fcapped files without permission
-+ if (is_privileged_binary(old_dentry))
-+ needmode |= GR_SETID;
-+
-+ if ((newmode & needmode) != needmode)
-+ goto bad;
-+
-+ // enforce minimum permissions
-+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
-+ return newmode;
-+bad:
-+ needmode = oldmode;
-+ if (is_privileged_binary(old_dentry))
-+ needmode |= GR_SETID;
-+
-+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
-+ return (GR_CREATE | GR_LINK);
-+ } else if (newmode & GR_SUPPRESS)
-+ return GR_SUPPRESS;
-+ else
-+ return 0;
-+}
-+
-+int
-+gr_check_hidden_task(const struct task_struct *task)
-+{
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+int
-+gr_check_protected_task(const struct task_struct *task)
-+{
-+ if (unlikely(!(gr_status & GR_READY) || !task))
-+ return 0;
-+
-+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
-+ task->acl != current->acl)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+int
-+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
-+{
-+ struct task_struct *p;
-+ int ret = 0;
-+
-+ if (unlikely(!(gr_status & GR_READY) || !pid))
-+ return ret;
-+
-+ read_lock(&tasklist_lock);
-+ do_each_pid_task(pid, type, p) {
-+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
-+ p->acl != current->acl) {
-+ ret = 1;
-+ goto out;
-+ }
-+ } while_each_pid_task(pid, type, p);
-+out:
-+ read_unlock(&tasklist_lock);
-+
-+ return ret;
-+}
-+
-+void
-+gr_copy_label(struct task_struct *tsk)
-+{
-+ struct task_struct *p = current;
-+
-+ tsk->inherited = p->inherited;
-+ tsk->acl_sp_role = 0;
-+ tsk->acl_role_id = p->acl_role_id;
-+ tsk->acl = p->acl;
-+ tsk->role = p->role;
-+ tsk->signal->used_accept = 0;
-+ tsk->signal->curr_ip = p->signal->curr_ip;
-+ tsk->signal->saved_ip = p->signal->saved_ip;
-+ if (p->exec_file)
-+ get_file(p->exec_file);
-+ tsk->exec_file = p->exec_file;
-+ tsk->is_writable = p->is_writable;
-+ if (unlikely(p->signal->used_accept)) {
-+ p->signal->curr_ip = 0;
-+ p->signal->saved_ip = 0;
-+ }
-+
-+ return;
-+}
-+
-+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
-+
-+int
-+gr_check_user_change(int real, int effective, int fs)
-+{
-+ unsigned int i;
-+ __u16 num;
-+ uid_t *uidlist;
-+ int curuid;
-+ int realok = 0;
-+ int effectiveok = 0;
-+ int fsok = 0;
-+
-+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
-+ struct user_struct *user;
-+
-+ if (real == -1)
-+ goto skipit;
-+
-+ user = find_user(real);
-+ if (user == NULL)
-+ goto skipit;
-+
-+ if (gr_process_kernel_setuid_ban(user)) {
-+ /* for find_user */
-+ free_uid(user);
-+ return 1;
-+ }
-+
-+ /* for find_user */
-+ free_uid(user);
-+
-+skipit:
-+#endif
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+ gr_log_learn_id_change('u', real, effective, fs);
-+
-+ num = current->acl->user_trans_num;
-+ uidlist = current->acl->user_transitions;
-+
-+ if (uidlist == NULL)
-+ return 0;
-+
-+ if (real == -1)
-+ realok = 1;
-+ if (effective == -1)
-+ effectiveok = 1;
-+ if (fs == -1)
-+ fsok = 1;
-+
-+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
-+ for (i = 0; i < num; i++) {
-+ curuid = (int)uidlist[i];
-+ if (real == curuid)
-+ realok = 1;
-+ if (effective == curuid)
-+ effectiveok = 1;
-+ if (fs == curuid)
-+ fsok = 1;
-+ }
-+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
-+ for (i = 0; i < num; i++) {
-+ curuid = (int)uidlist[i];
-+ if (real == curuid)
-+ break;
-+ if (effective == curuid)
-+ break;
-+ if (fs == curuid)
-+ break;
-+ }
-+ /* not in deny list */
-+ if (i == num) {
-+ realok = 1;
-+ effectiveok = 1;
-+ fsok = 1;
-+ }
-+ }
-+
-+ if (realok && effectiveok && fsok)
-+ return 0;
-+ else {
-+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
-+ return 1;
-+ }
-+}
-+
-+int
-+gr_check_group_change(int real, int effective, int fs)
-+{
-+ unsigned int i;
-+ __u16 num;
-+ gid_t *gidlist;
-+ int curgid;
-+ int realok = 0;
-+ int effectiveok = 0;
-+ int fsok = 0;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+ gr_log_learn_id_change('g', real, effective, fs);
-+
-+ num = current->acl->group_trans_num;
-+ gidlist = current->acl->group_transitions;
-+
-+ if (gidlist == NULL)
-+ return 0;
-+
-+ if (real == -1)
-+ realok = 1;
-+ if (effective == -1)
-+ effectiveok = 1;
-+ if (fs == -1)
-+ fsok = 1;
-+
-+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
-+ for (i = 0; i < num; i++) {
-+ curgid = (int)gidlist[i];
-+ if (real == curgid)
-+ realok = 1;
-+ if (effective == curgid)
-+ effectiveok = 1;
-+ if (fs == curgid)
-+ fsok = 1;
-+ }
-+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
-+ for (i = 0; i < num; i++) {
-+ curgid = (int)gidlist[i];
-+ if (real == curgid)
-+ break;
-+ if (effective == curgid)
-+ break;
-+ if (fs == curgid)
-+ break;
-+ }
-+ /* not in deny list */
-+ if (i == num) {
-+ realok = 1;
-+ effectiveok = 1;
-+ fsok = 1;
-+ }
-+ }
-+
-+ if (realok && effectiveok && fsok)
-+ return 0;
-+ else {
-+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
-+ return 1;
-+ }
-+}
-+
-+extern int gr_acl_is_capable(const int cap);
-+
-+void
-+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
-+{
-+ struct acl_role_label *role = task->role;
-+ struct acl_role_label *origrole = role;
-+ struct acl_subject_label *subj = NULL;
-+ struct acl_object_label *obj;
-+ struct file *filp;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ filp = task->exec_file;
-+
-+ /* kernel process, we'll give them the kernel role */
-+ if (unlikely(!filp)) {
-+ task->role = running_polstate.kernel_role;
-+ task->acl = running_polstate.kernel_role->root_label;
-+ return;
-+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
-+ /* save the current ip at time of role lookup so that the proper
-+ IP will be learned for role_allowed_ip */
-+ task->signal->saved_ip = task->signal->curr_ip;
-+ role = lookup_acl_role_label(task, uid, gid);
-+ }
-+
-+ /* don't change the role if we're not a privileged process */
-+ if (role && task->role != role &&
-+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
-+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
-+ return;
-+
-+ task->role = role;
-+
-+ if (task->inherited) {
-+ /* if we reached our subject through inheritance, then first see
-+ if there's a subject of the same name in the new role that has
-+ an object that would result in the same inherited subject
-+ */
-+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
-+ if (subj) {
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
-+ if (!(obj->mode & GR_INHERIT))
-+ subj = NULL;
-+ }
-+
-+ }
-+ if (subj == NULL) {
-+ /* otherwise:
-+ perform subject lookup in possibly new role
-+ we can use this result below in the case where role == task->role
-+ */
-+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
-+ }
-+
-+ /* if we changed uid/gid, but result in the same role
-+ and are using inheritance, don't lose the inherited subject
-+ if current subject is other than what normal lookup
-+ would result in, we arrived via inheritance, don't
-+ lose subject
-+ */
-+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
-+ (subj == task->acl)))
-+ task->acl = subj;
-+
-+ /* leave task->inherited unaffected */
-+
-+ task->is_writable = 0;
-+
-+ /* ignore additional mmap checks for processes that are writable
-+ by the default ACL */
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
-+#endif
-+
-+ gr_set_proc_res(task);
-+
-+ return;
-+}
-+
-+int
-+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
-+ const int unsafe_flags)
-+{
-+ struct task_struct *task = current;
-+ struct acl_subject_label *newacl;
-+ struct acl_object_label *obj;
-+ __u32 retmode;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ newacl = chk_subj_label(dentry, mnt, task->role);
-+
-+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
-+ did an exec
-+ */
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
-+ (task->parent->acl->mode & GR_POVERRIDE))) {
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ goto skip_check;
-+ }
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+
-+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
-+ !(task->role->roletype & GR_ROLE_GOD) &&
-+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
-+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
-+ if (unsafe_flags & LSM_UNSAFE_SHARE)
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
-+ else
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
-+ return -EACCES;
-+ }
-+
-+skip_check:
-+
-+ obj = chk_obj_label(dentry, mnt, task->acl);
-+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
-+
-+ if (!(task->acl->mode & GR_INHERITLEARN) &&
-+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
-+ if (obj->nested)
-+ task->acl = obj->nested;
-+ else
-+ task->acl = newacl;
-+ task->inherited = 0;
-+ } else {
-+ task->inherited = 1;
-+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
-+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
-+ }
-+
-+ task->is_writable = 0;
-+
-+ /* ignore additional mmap checks for processes that are writable
-+ by the default ACL */
-+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
-+ if (unlikely(obj->mode & GR_WRITE))
-+ task->is_writable = 1;
-+
-+ gr_set_proc_res(task);
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
-+#endif
-+ return 0;
-+}
-+
-+/* always called with valid inodev ptr */
-+static void
-+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
-+{
-+ struct acl_object_label *matchpo;
-+ struct acl_subject_label *matchps;
-+ struct acl_subject_label *subj;
-+ struct acl_role_label *role;
-+ unsigned int x;
-+
-+ FOR_EACH_ROLE_START(role)
-+ FOR_EACH_SUBJECT_START(role, subj, x)
-+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
-+ matchpo->mode |= GR_DELETED;
-+ FOR_EACH_SUBJECT_END(subj,x)
-+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+ /* nested subjects aren't in the role's subj_hash table */
-+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
-+ matchpo->mode |= GR_DELETED;
-+ FOR_EACH_NESTED_SUBJECT_END(subj)
-+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
-+ matchps->mode |= GR_DELETED;
-+ FOR_EACH_ROLE_END(role)
-+
-+ inodev->nentry->deleted = 1;
-+
-+ return;
-+}
-+
-+void
-+gr_handle_delete(const u64 ino, const dev_t dev)
-+{
-+ struct inodev_entry *inodev;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ write_lock(&gr_inode_lock);
-+ inodev = lookup_inodev_entry(ino, dev);
-+ if (inodev != NULL)
-+ do_handle_delete(inodev, ino, dev);
-+ write_unlock(&gr_inode_lock);
-+
-+ return;
-+}
-+
-+static void
-+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
-+ const u64 newinode, const dev_t newdevice,
-+ struct acl_subject_label *subj)
-+{
-+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
-+ struct acl_object_label *match;
-+
-+ match = subj->obj_hash[index];
-+
-+ while (match && (match->inode != oldinode ||
-+ match->device != olddevice ||
-+ !(match->mode & GR_DELETED)))
-+ match = match->next;
-+
-+ if (match && (match->inode == oldinode)
-+ && (match->device == olddevice)
-+ && (match->mode & GR_DELETED)) {
-+ if (match->prev == NULL) {
-+ subj->obj_hash[index] = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = NULL;
-+ } else {
-+ match->prev->next = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = match->prev;
-+ }
-+ match->prev = NULL;
-+ match->next = NULL;
-+ match->inode = newinode;
-+ match->device = newdevice;
-+ match->mode &= ~GR_DELETED;
-+
-+ insert_acl_obj_label(match, subj);
-+ }
-+
-+ return;
-+}
-+
-+static void
-+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
-+ const u64 newinode, const dev_t newdevice,
-+ struct acl_role_label *role)
-+{
-+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
-+ struct acl_subject_label *match;
-+
-+ match = role->subj_hash[index];
-+
-+ while (match && (match->inode != oldinode ||
-+ match->device != olddevice ||
-+ !(match->mode & GR_DELETED)))
-+ match = match->next;
-+
-+ if (match && (match->inode == oldinode)
-+ && (match->device == olddevice)
-+ && (match->mode & GR_DELETED)) {
-+ if (match->prev == NULL) {
-+ role->subj_hash[index] = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = NULL;
-+ } else {
-+ match->prev->next = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = match->prev;
-+ }
-+ match->prev = NULL;
-+ match->next = NULL;
-+ match->inode = newinode;
-+ match->device = newdevice;
-+ match->mode &= ~GR_DELETED;
-+
-+ insert_acl_subj_label(match, role);
-+ }
-+
-+ return;
-+}
-+
-+static void
-+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
-+ const u64 newinode, const dev_t newdevice)
-+{
-+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
-+ struct inodev_entry *match;
-+
-+ match = running_polstate.inodev_set.i_hash[index];
-+
-+ while (match && (match->nentry->inode != oldinode ||
-+ match->nentry->device != olddevice || !match->nentry->deleted))
-+ match = match->next;
-+
-+ if (match && (match->nentry->inode == oldinode)
-+ && (match->nentry->device == olddevice) &&
-+ match->nentry->deleted) {
-+ if (match->prev == NULL) {
-+ running_polstate.inodev_set.i_hash[index] = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = NULL;
-+ } else {
-+ match->prev->next = match->next;
-+ if (match->next != NULL)
-+ match->next->prev = match->prev;
-+ }
-+ match->prev = NULL;
-+ match->next = NULL;
-+ match->nentry->inode = newinode;
-+ match->nentry->device = newdevice;
-+ match->nentry->deleted = 0;
-+
-+ insert_inodev_entry(match);
-+ }
-+
-+ return;
-+}
-+
-+static void
-+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
-+{
-+ struct acl_subject_label *subj;
-+ struct acl_role_label *role;
-+ unsigned int x;
-+
-+ FOR_EACH_ROLE_START(role)
-+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
-+
-+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+ if ((subj->inode == ino) && (subj->device == dev)) {
-+ subj->inode = ino;
-+ subj->device = dev;
-+ }
-+ /* nested subjects aren't in the role's subj_hash table */
-+ update_acl_obj_label(matchn->inode, matchn->device,
-+ ino, dev, subj);
-+ FOR_EACH_NESTED_SUBJECT_END(subj)
-+ FOR_EACH_SUBJECT_START(role, subj, x)
-+ update_acl_obj_label(matchn->inode, matchn->device,
-+ ino, dev, subj);
-+ FOR_EACH_SUBJECT_END(subj,x)
-+ FOR_EACH_ROLE_END(role)
-+
-+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
-+
-+ return;
-+}
-+
-+static void
-+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
-+ const struct vfsmount *mnt)
-+{
-+ u64 ino = __get_ino(dentry);
-+ dev_t dev = __get_dev(dentry);
-+
-+ __do_handle_create(matchn, ino, dev);
-+
-+ return;
-+}
-+
-+void
-+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ struct name_entry *matchn;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ preempt_disable();
-+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
-+
-+ if (unlikely((unsigned long)matchn)) {
-+ write_lock(&gr_inode_lock);
-+ do_handle_create(matchn, dentry, mnt);
-+ write_unlock(&gr_inode_lock);
-+ }
-+ preempt_enable();
-+
-+ return;
-+}
-+
-+void
-+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
-+{
-+ struct name_entry *matchn;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ preempt_disable();
-+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
-+
-+ if (unlikely((unsigned long)matchn)) {
-+ write_lock(&gr_inode_lock);
-+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
-+ write_unlock(&gr_inode_lock);
-+ }
-+ preempt_enable();
-+
-+ return;
-+}
-+
-+void
-+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+ struct dentry *old_dentry,
-+ struct dentry *new_dentry,
-+ struct vfsmount *mnt, const __u8 replace)
-+{
-+ struct name_entry *matchn;
-+ struct inodev_entry *inodev;
-+ struct inode *inode = new_dentry->d_inode;
-+ u64 old_ino = __get_ino(old_dentry);
-+ dev_t old_dev = __get_dev(old_dentry);
-+
-+ /* vfs_rename swaps the name and parent link for old_dentry and
-+ new_dentry
-+ at this point, old_dentry has the new name, parent link, and inode
-+ for the renamed file
-+ if a file is being replaced by a rename, new_dentry has the inode
-+ and name for the replaced file
-+ */
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ preempt_disable();
-+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
-+
-+ /* we wouldn't have to check d_inode if it weren't for
-+ NFS silly-renaming
-+ */
-+
-+ write_lock(&gr_inode_lock);
-+ if (unlikely(replace && inode)) {
-+ u64 new_ino = __get_ino(new_dentry);
-+ dev_t new_dev = __get_dev(new_dentry);
-+
-+ inodev = lookup_inodev_entry(new_ino, new_dev);
-+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
-+ do_handle_delete(inodev, new_ino, new_dev);
-+ }
-+
-+ inodev = lookup_inodev_entry(old_ino, old_dev);
-+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
-+ do_handle_delete(inodev, old_ino, old_dev);
-+
-+ if (unlikely((unsigned long)matchn))
-+ do_handle_create(matchn, old_dentry, mnt);
-+
-+ write_unlock(&gr_inode_lock);
-+ preempt_enable();
-+
-+ return;
-+}
-+
-+void
-+gr_learn_resource(const struct task_struct *task,
-+ const int res, const unsigned long wanted, const int gt)
-+{
-+ struct acl_subject_label *acl;
-+ const struct cred *cred;
-+
-+ if (unlikely((gr_status & GR_READY) &&
-+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
-+ goto skip_reslog;
-+
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+ gr_log_resource(task, res, wanted, gt);
-+#endif
-+ skip_reslog:
-+
-+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
-+ return;
-+
-+ acl = task->acl;
-+
-+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
-+ !(acl->resmask & (1U << (unsigned short) res))))
-+ return;
-+
-+ if (wanted >= acl->res[res].rlim_cur) {
-+ unsigned long res_add;
-+
-+ res_add = wanted;
-+ switch (res) {
-+ case RLIMIT_CPU:
-+ res_add += GR_RLIM_CPU_BUMP;
-+ break;
-+ case RLIMIT_FSIZE:
-+ res_add += GR_RLIM_FSIZE_BUMP;
-+ break;
-+ case RLIMIT_DATA:
-+ res_add += GR_RLIM_DATA_BUMP;
-+ break;
-+ case RLIMIT_STACK:
-+ res_add += GR_RLIM_STACK_BUMP;
-+ break;
-+ case RLIMIT_CORE:
-+ res_add += GR_RLIM_CORE_BUMP;
-+ break;
-+ case RLIMIT_RSS:
-+ res_add += GR_RLIM_RSS_BUMP;
-+ break;
-+ case RLIMIT_NPROC:
-+ res_add += GR_RLIM_NPROC_BUMP;
-+ break;
-+ case RLIMIT_NOFILE:
-+ res_add += GR_RLIM_NOFILE_BUMP;
-+ break;
-+ case RLIMIT_MEMLOCK:
-+ res_add += GR_RLIM_MEMLOCK_BUMP;
-+ break;
-+ case RLIMIT_AS:
-+ res_add += GR_RLIM_AS_BUMP;
-+ break;
-+ case RLIMIT_LOCKS:
-+ res_add += GR_RLIM_LOCKS_BUMP;
-+ break;
-+ case RLIMIT_SIGPENDING:
-+ res_add += GR_RLIM_SIGPENDING_BUMP;
-+ break;
-+ case RLIMIT_MSGQUEUE:
-+ res_add += GR_RLIM_MSGQUEUE_BUMP;
-+ break;
-+ case RLIMIT_NICE:
-+ res_add += GR_RLIM_NICE_BUMP;
-+ break;
-+ case RLIMIT_RTPRIO:
-+ res_add += GR_RLIM_RTPRIO_BUMP;
-+ break;
-+ case RLIMIT_RTTIME:
-+ res_add += GR_RLIM_RTTIME_BUMP;
-+ break;
-+ }
-+
-+ acl->res[res].rlim_cur = res_add;
-+
-+ if (wanted > acl->res[res].rlim_max)
-+ acl->res[res].rlim_max = res_add;
-+
-+ /* only log the subject filename, since resource logging is supported for
-+ single-subject learning only */
-+ rcu_read_lock();
-+ cred = __task_cred(task);
-+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+ task->role->roletype, cred->uid, cred->gid, acl->filename,
-+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
-+ "", (unsigned long) res, &task->signal->saved_ip);
-+ rcu_read_unlock();
-+ }
-+
-+ return;
-+}
-+
-+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
-+void
-+pax_set_initial_flags(struct linux_binprm *bprm)
-+{
-+ struct task_struct *task = current;
-+ struct acl_subject_label *proc;
-+ unsigned long flags;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ flags = pax_get_flags(task);
-+
-+ proc = task->acl;
-+
-+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
-+ flags &= ~MF_PAX_PAGEEXEC;
-+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
-+ flags &= ~MF_PAX_SEGMEXEC;
-+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
-+ flags &= ~MF_PAX_RANDMMAP;
-+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
-+ flags &= ~MF_PAX_EMUTRAMP;
-+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
-+ flags &= ~MF_PAX_MPROTECT;
-+
-+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
-+ flags |= MF_PAX_PAGEEXEC;
-+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
-+ flags |= MF_PAX_SEGMEXEC;
-+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
-+ flags |= MF_PAX_RANDMMAP;
-+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
-+ flags |= MF_PAX_EMUTRAMP;
-+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
-+ flags |= MF_PAX_MPROTECT;
-+
-+ pax_set_flags(task, flags);
-+
-+ return;
-+}
-+#endif
-+
-+#ifdef CONFIG_SYSCTL
-+/* Eric Biederman likes breaking userland ABI and every inode-based security
-+ system to save 35kb of memory */
-+
-+/* we modify the passed in filename, but adjust it back before returning */
-+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
-+{
-+ struct name_entry *nmatch;
-+ char *p, *lastp = NULL;
-+ struct acl_object_label *obj = NULL, *tmp;
-+ struct acl_subject_label *tmpsubj;
-+ char c = '\0';
-+
-+ read_lock(&gr_inode_lock);
-+
-+ p = name + len - 1;
-+ do {
-+ nmatch = lookup_name_entry(name);
-+ if (lastp != NULL)
-+ *lastp = c;
-+
-+ if (nmatch == NULL)
-+ goto next_component;
-+ tmpsubj = current->acl;
-+ do {
-+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
-+ if (obj != NULL) {
-+ tmp = obj->globbed;
-+ while (tmp) {
-+ if (!glob_match(tmp->filename, name)) {
-+ obj = tmp;
-+ goto found_obj;
-+ }
-+ tmp = tmp->next;
-+ }
-+ goto found_obj;
-+ }
-+ } while ((tmpsubj = tmpsubj->parent_subject));
-+next_component:
-+ /* end case */
-+ if (p == name)
-+ break;
-+
-+ while (*p != '/')
-+ p--;
-+ if (p == name)
-+ lastp = p + 1;
-+ else {
-+ lastp = p;
-+ p--;
-+ }
-+ c = *lastp;
-+ *lastp = '\0';
-+ } while (1);
-+found_obj:
-+ read_unlock(&gr_inode_lock);
-+ /* obj returned will always be non-null */
-+ return obj;
-+}
-+
-+/* returns 0 when allowing, non-zero on error
-+ op of 0 is used for readdir, so we don't log the names of hidden files
-+*/
-+__u32
-+gr_handle_sysctl(const struct ctl_table *table, const int op)
-+{
-+ struct ctl_table *tmp;
-+ const char *proc_sys = "/proc/sys";
-+ char *path;
-+ struct acl_object_label *obj;
-+ unsigned short len = 0, pos = 0, depth = 0, i;
-+ __u32 err = 0;
-+ __u32 mode = 0;
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ /* for now, ignore operations on non-sysctl entries if it's not a
-+ readdir*/
-+ if (table->child != NULL && op != 0)
-+ return 0;
-+
-+ mode |= GR_FIND;
-+ /* it's only a read if it's an entry, read on dirs is for readdir */
-+ if (op & MAY_READ)
-+ mode |= GR_READ;
-+ if (op & MAY_WRITE)
-+ mode |= GR_WRITE;
-+
-+ preempt_disable();
-+
-+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
-+
-+ /* it's only a read/write if it's an actual entry, not a dir
-+ (which are opened for readdir)
-+ */
-+
-+ /* convert the requested sysctl entry into a pathname */
-+
-+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
-+ len += strlen(tmp->procname);
-+ len++;
-+ depth++;
-+ }
-+
-+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
-+ /* deny */
-+ goto out;
-+ }
-+
-+ memset(path, 0, PAGE_SIZE);
-+
-+ memcpy(path, proc_sys, strlen(proc_sys));
-+
-+ pos += strlen(proc_sys);
-+
-+ for (; depth > 0; depth--) {
-+ path[pos] = '/';
-+ pos++;
-+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
-+ if (depth == i) {
-+ memcpy(path + pos, tmp->procname,
-+ strlen(tmp->procname));
-+ pos += strlen(tmp->procname);
-+ }
-+ i++;
-+ }
-+ }
-+
-+ obj = gr_lookup_by_name(path, pos);
-+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
-+
-+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
-+ ((err & mode) != mode))) {
-+ __u32 new_mode = mode;
-+
-+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+ err = 0;
-+ gr_log_learn_sysctl(path, new_mode);
-+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
-+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
-+ err = -ENOENT;
-+ } else if (!(err & GR_FIND)) {
-+ err = -ENOENT;
-+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
-+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
-+ path, (mode & GR_READ) ? " reading" : "",
-+ (mode & GR_WRITE) ? " writing" : "");
-+ err = -EACCES;
-+ } else if ((err & mode) != mode) {
-+ err = -EACCES;
-+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
-+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
-+ path, (mode & GR_READ) ? " reading" : "",
-+ (mode & GR_WRITE) ? " writing" : "");
-+ err = 0;
-+ } else
-+ err = 0;
-+
-+ out:
-+ preempt_enable();
-+
-+ return err;
-+}
-+#endif
-+
-+int
-+gr_handle_proc_ptrace(struct task_struct *task)
-+{
-+ struct file *filp;
-+ struct task_struct *tmp = task;
-+ struct task_struct *curtemp = current;
-+ __u32 retmode;
-+
-+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+#endif
-+
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+ filp = task->exec_file;
-+
-+ while (tmp->pid > 0) {
-+ if (tmp == curtemp)
-+ break;
-+ tmp = tmp->real_parent;
-+ }
-+
-+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
-+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ return 1;
-+ }
-+
-+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ if (!(gr_status & GR_READY)) {
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ return 0;
-+ }
-+#endif
-+
-+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+
-+ if (retmode & GR_NOPTRACE)
-+ return 1;
-+
-+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
-+ && (current->acl != task->acl || (current->acl != current->role->root_label
-+ && current->pid != task->pid)))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
-+{
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return;
-+
-+ if (!(current->role->roletype & GR_ROLE_GOD))
-+ return;
-+
-+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
-+ p->role->rolename, gr_task_roletype_to_char(p),
-+ p->acl->filename);
-+}
-+
-+int
-+gr_handle_ptrace(struct task_struct *task, const long request)
-+{
-+ struct task_struct *tmp = task;
-+ struct task_struct *curtemp = current;
-+ __u32 retmode;
-+
-+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+#endif
-+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
-+ read_lock(&tasklist_lock);
-+ while (tmp->pid > 0) {
-+ if (tmp == curtemp)
-+ break;
-+ tmp = tmp->real_parent;
-+ }
-+
-+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
-+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
-+ read_unlock(&tasklist_lock);
-+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+ return 1;
-+ }
-+ read_unlock(&tasklist_lock);
-+ }
-+
-+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ if (!(gr_status & GR_READY))
-+ return 0;
-+#endif
-+
-+ read_lock(&grsec_exec_file_lock);
-+ if (unlikely(!task->exec_file)) {
-+ read_unlock(&grsec_exec_file_lock);
-+ return 0;
-+ }
-+
-+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
-+ read_unlock(&grsec_exec_file_lock);
-+
-+ if (retmode & GR_NOPTRACE) {
-+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+ return 1;
-+ }
-+
-+ if (retmode & GR_PTRACERD) {
-+ switch (request) {
-+ case PTRACE_SEIZE:
-+ case PTRACE_POKETEXT:
-+ case PTRACE_POKEDATA:
-+ case PTRACE_POKEUSR:
-+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
-+ case PTRACE_SETREGS:
-+ case PTRACE_SETFPREGS:
-+#endif
-+#ifdef CONFIG_X86
-+ case PTRACE_SETFPXREGS:
-+#endif
-+#ifdef CONFIG_ALTIVEC
-+ case PTRACE_SETVRREGS:
-+#endif
-+ return 1;
-+ default:
-+ return 0;
-+ }
-+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
-+ !(current->role->roletype & GR_ROLE_GOD) &&
-+ (current->acl != task->acl)) {
-+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int is_writable_mmap(const struct file *filp)
-+{
-+ struct task_struct *task = current;
-+ struct acl_object_label *obj, *obj2;
-+
-+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
-+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
-+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
-+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
-+ task->role->root_label);
-+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
-+ return 1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+int
-+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
-+{
-+ __u32 mode;
-+
-+ if (unlikely(!file || !(prot & PROT_EXEC)))
-+ return 1;
-+
-+ if (is_writable_mmap(file))
-+ return 0;
-+
-+ mode =
-+ gr_search_file(file->f_path.dentry,
-+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
-+ file->f_path.mnt);
-+
-+ if (!gr_tpe_allow(file))
-+ return 0;
-+
-+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
-+ return 0;
-+ } else if (unlikely(!(mode & GR_EXEC))) {
-+ return 0;
-+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
-+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
-+ return 1;
-+ }
-+
-+ return 1;
-+}
-+
-+int
-+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
-+{
-+ __u32 mode;
-+
-+ if (unlikely(!file || !(prot & PROT_EXEC)))
-+ return 1;
-+
-+ if (is_writable_mmap(file))
-+ return 0;
-+
-+ mode =
-+ gr_search_file(file->f_path.dentry,
-+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
-+ file->f_path.mnt);
-+
-+ if (!gr_tpe_allow(file))
-+ return 0;
-+
-+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
-+ return 0;
-+ } else if (unlikely(!(mode & GR_EXEC))) {
-+ return 0;
-+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
-+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
-+ return 1;
-+ }
-+
-+ return 1;
-+}
-+
-+void
-+gr_acl_handle_psacct(struct task_struct *task, const long code)
-+{
-+ unsigned long runtime, cputime;
-+ cputime_t utime, stime;
-+ unsigned int wday, cday;
-+ __u8 whr, chr;
-+ __u8 wmin, cmin;
-+ __u8 wsec, csec;
-+ struct timespec timeval;
-+
-+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
-+ !(task->acl->mode & GR_PROCACCT)))
-+ return;
-+
-+ do_posix_clock_monotonic_gettime(&timeval);
-+ runtime = timeval.tv_sec - task->start_time.tv_sec;
-+ wday = runtime / (60 * 60 * 24);
-+ runtime -= wday * (60 * 60 * 24);
-+ whr = runtime / (60 * 60);
-+ runtime -= whr * (60 * 60);
-+ wmin = runtime / 60;
-+ runtime -= wmin * 60;
-+ wsec = runtime;
-+
-+ task_times(task, &utime, &stime);
-+ cputime = cputime_to_secs(utime + stime);
-+ cday = cputime / (60 * 60 * 24);
-+ cputime -= cday * (60 * 60 * 24);
-+ chr = cputime / (60 * 60);
-+ cputime -= chr * (60 * 60);
-+ cmin = cputime / 60;
-+ cputime -= cmin * 60;
-+ csec = cputime;
-+
-+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
-+
-+ return;
-+}
-+
-+#ifdef CONFIG_TASKSTATS
-+int gr_is_taskstats_denied(int pid)
-+{
-+ struct task_struct *task;
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ const struct cred *cred;
-+#endif
-+ int ret = 0;
-+
-+ /* restrict taskstats viewing to un-chrooted root users
-+ who have the 'view' subject flag if the RBAC system is enabled
-+ */
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ task = find_task_by_vpid(pid);
-+ if (task) {
-+#ifdef CONFIG_GRKERNSEC_CHROOT
-+ if (proc_is_chrooted(task))
-+ ret = -EACCES;
-+#endif
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ cred = __task_cred(task);
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ if (cred->uid != 0)
-+ ret = -EACCES;
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ if (cred->uid != 0 && !groups_search(cred->group_info, grsec_proc_gid))
-+ ret = -EACCES;
-+#endif
-+#endif
-+ if (gr_status & GR_READY) {
-+ if (!(task->acl->mode & GR_VIEW))
-+ ret = -EACCES;
-+ }
-+ } else
-+ ret = -ENOENT;
-+
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+
-+ return ret;
-+}
-+#endif
-+
-+/* AUXV entries are filled via a descendant of search_binary_handler
-+ after we've already applied the subject for the target
-+*/
-+int gr_acl_enable_at_secure(void)
-+{
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 0;
-+
-+ if (current->acl->mode & GR_ATSECURE)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
-+{
-+ struct task_struct *task = current;
-+ struct dentry *dentry = file->f_path.dentry;
-+ struct vfsmount *mnt = file->f_path.mnt;
-+ struct acl_object_label *obj, *tmp;
-+ struct acl_subject_label *subj;
-+ unsigned int bufsize;
-+ int is_not_root;
-+ char *path;
-+ dev_t dev = __get_dev(dentry);
-+
-+ if (unlikely(!(gr_status & GR_READY)))
-+ return 1;
-+
-+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+ return 1;
-+
-+ /* ignore Eric Biederman */
-+ if (IS_PRIVATE(dentry->d_inode))
-+ return 1;
-+
-+ subj = task->acl;
-+ read_lock(&gr_inode_lock);
-+ do {
-+ obj = lookup_acl_obj_label(ino, dev, subj);
-+ if (obj != NULL) {
-+ read_unlock(&gr_inode_lock);
-+ return (obj->mode & GR_FIND) ? 1 : 0;
-+ }
-+ } while ((subj = subj->parent_subject));
-+ read_unlock(&gr_inode_lock);
-+
-+ /* this is purely an optimization since we're looking for an object
-+ for the directory we're doing a readdir on
-+ if it's possible for any globbed object to match the entry we're
-+ filling into the directory, then the object we find here will be
-+ an anchor point with attached globbed objects
-+ */
-+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
-+ if (obj->globbed == NULL)
-+ return (obj->mode & GR_FIND) ? 1 : 0;
-+
-+ is_not_root = ((obj->filename[0] == '/') &&
-+ (obj->filename[1] == '\0')) ? 0 : 1;
-+ bufsize = PAGE_SIZE - namelen - is_not_root;
-+
-+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
-+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
-+ return 1;
-+
-+ preempt_disable();
-+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
-+ bufsize);
-+
-+ bufsize = strlen(path);
-+
-+ /* if base is "/", don't append an additional slash */
-+ if (is_not_root)
-+ *(path + bufsize) = '/';
-+ memcpy(path + bufsize + is_not_root, name, namelen);
-+ *(path + bufsize + namelen + is_not_root) = '\0';
-+
-+ tmp = obj->globbed;
-+ while (tmp) {
-+ if (!glob_match(tmp->filename, path)) {
-+ preempt_enable();
-+ return (tmp->mode & GR_FIND) ? 1 : 0;
-+ }
-+ tmp = tmp->next;
-+ }
-+ preempt_enable();
-+ return (obj->mode & GR_FIND) ? 1 : 0;
-+}
-+
-+void gr_put_exec_file(struct task_struct *task)
-+{
-+ struct file *filp;
-+
-+ write_lock(&grsec_exec_file_lock);
-+ filp = task->exec_file;
-+ task->exec_file = NULL;
-+ write_unlock(&grsec_exec_file_lock);
-+
-+ if (filp)
-+ fput(filp);
-+
-+ return;
-+}
-+
-+
-+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
-+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
-+#endif
-+EXPORT_SYMBOL_GPL(gr_learn_resource);
-+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL_GPL(gr_check_user_change);
-+EXPORT_SYMBOL_GPL(gr_check_group_change);
-+#endif
-+
-diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
-new file mode 100644
-index 0000000..9adc75c
---- /dev/null
-+++ b/grsecurity/gracl_alloc.c
-@@ -0,0 +1,105 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+
-+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
-+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
-+
-+static int
-+alloc_pop(void)
-+{
-+ if (current_alloc_state->alloc_stack_next == 1)
-+ return 0;
-+
-+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
-+
-+ current_alloc_state->alloc_stack_next--;
-+
-+ return 1;
-+}
-+
-+static int
-+alloc_push(void *buf)
-+{
-+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
-+ return 1;
-+
-+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
-+
-+ current_alloc_state->alloc_stack_next++;
-+
-+ return 0;
-+}
-+
-+void *
-+acl_alloc(unsigned long len)
-+{
-+ void *ret = NULL;
-+
-+ if (!len || len > PAGE_SIZE)
-+ goto out;
-+
-+ ret = kmalloc(len, GFP_KERNEL);
-+
-+ if (ret) {
-+ if (alloc_push(ret)) {
-+ kfree(ret);
-+ ret = NULL;
-+ }
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+void *
-+acl_alloc_num(unsigned long num, unsigned long len)
-+{
-+ if (!len || (num > (PAGE_SIZE / len)))
-+ return NULL;
-+
-+ return acl_alloc(num * len);
-+}
-+
-+void
-+acl_free_all(void)
-+{
-+ if (!current_alloc_state->alloc_stack)
-+ return;
-+
-+ while (alloc_pop()) ;
-+
-+ if (current_alloc_state->alloc_stack) {
-+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
-+ kfree(current_alloc_state->alloc_stack);
-+ else
-+ vfree(current_alloc_state->alloc_stack);
-+ }
-+
-+ current_alloc_state->alloc_stack = NULL;
-+ current_alloc_state->alloc_stack_size = 1;
-+ current_alloc_state->alloc_stack_next = 1;
-+
-+ return;
-+}
-+
-+int
-+acl_alloc_stack_init(unsigned long size)
-+{
-+ if ((size * sizeof (void *)) <= PAGE_SIZE)
-+ current_alloc_state->alloc_stack =
-+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
-+ else
-+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
-+
-+ current_alloc_state->alloc_stack_size = size;
-+ current_alloc_state->alloc_stack_next = 1;
-+
-+ if (!current_alloc_state->alloc_stack)
-+ return 0;
-+ else
-+ return 1;
-+}
-diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
-new file mode 100644
-index 0000000..b2ec14c
---- /dev/null
-+++ b/grsecurity/gracl_cap.c
-@@ -0,0 +1,118 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+extern const char *captab_log[];
-+extern int captab_log_entries;
-+
-+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
-+{
-+ struct acl_subject_label *curracl;
-+
-+ if (!gr_acl_is_enabled())
-+ return 1;
-+
-+ curracl = task->acl;
-+
-+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+ task->role->roletype, cred->uid,
-+ cred->gid, task->exec_file ?
-+ gr_to_filename(task->exec_file->f_path.dentry,
-+ task->exec_file->f_path.mnt) : curracl->filename,
-+ curracl->filename, 0UL,
-+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+int
-+gr_acl_is_capable(const int cap)
-+{
-+ struct task_struct *task = current;
-+ const struct cred *cred = current_cred();
-+ struct acl_subject_label *curracl;
-+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
-+ kernel_cap_t cap_audit = __cap_empty_set;
-+
-+ if (!gr_acl_is_enabled())
-+ return 1;
-+
-+ curracl = task->acl;
-+
-+ cap_drop = curracl->cap_lower;
-+ cap_mask = curracl->cap_mask;
-+ cap_audit = curracl->cap_invert_audit;
-+
-+ while ((curracl = curracl->parent_subject)) {
-+ /* if the cap isn't specified in the current computed mask but is specified in the
-+ current level subject, and is lowered in the current level subject, then add
-+ it to the set of dropped capabilities
-+ otherwise, add the current level subject's mask to the current computed mask
-+ */
-+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
-+ cap_raise(cap_mask, cap);
-+ if (cap_raised(curracl->cap_lower, cap))
-+ cap_raise(cap_drop, cap);
-+ if (cap_raised(curracl->cap_invert_audit, cap))
-+ cap_raise(cap_audit, cap);
-+ }
-+ }
-+
-+ if (!cap_raised(cap_drop, cap)) {
-+ if (cap_raised(cap_audit, cap))
-+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
-+ return 1;
-+ }
-+
-+ /* only learn the capability use if the process has the capability in the
-+ general case, the two uses in sys.c of gr_learn_cap are an exception
-+ to this rule to ensure any role transition involves what the full-learned
-+ policy believes in a privileged process
-+ */
-+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
-+ return 1;
-+
-+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
-+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
-+ return 0;
-+}
-+
-+int
-+gr_acl_is_capable_nolog(const int cap)
-+{
-+ struct acl_subject_label *curracl;
-+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
-+
-+ if (!gr_acl_is_enabled())
-+ return 1;
-+
-+ curracl = current->acl;
-+
-+ cap_drop = curracl->cap_lower;
-+ cap_mask = curracl->cap_mask;
-+
-+ while ((curracl = curracl->parent_subject)) {
-+ /* if the cap isn't specified in the current computed mask but is specified in the
-+ current level subject, and is lowered in the current level subject, then add
-+ it to the set of dropped capabilities
-+ otherwise, add the current level subject's mask to the current computed mask
-+ */
-+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
-+ cap_raise(cap_mask, cap);
-+ if (cap_raised(curracl->cap_lower, cap))
-+ cap_raise(cap_drop, cap);
-+ }
-+ }
-+
-+ if (!cap_raised(cap_drop, cap))
-+ return 1;
-+
-+ return 0;
-+}
-+
-diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
-new file mode 100644
-index 0000000..a43dd06
---- /dev/null
-+++ b/grsecurity/gracl_compat.c
-@@ -0,0 +1,269 @@
-+#include <linux/kernel.h>
-+#include <linux/gracl.h>
-+#include <linux/compat.h>
-+#include <linux/gracl_compat.h>
-+
-+#include <asm/uaccess.h>
-+
-+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
-+{
-+ struct gr_arg_wrapper_compat uwrapcompat;
-+
-+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
-+ return -EFAULT;
-+
-+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
-+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
-+ return -EINVAL;
-+
-+ uwrap->arg = compat_ptr(uwrapcompat.arg);
-+ uwrap->version = uwrapcompat.version;
-+ uwrap->size = sizeof(struct gr_arg);
-+
-+ return 0;
-+}
-+
-+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
-+{
-+ struct gr_arg_compat argcompat;
-+
-+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
-+ return -EFAULT;
-+
-+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
-+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
-+ arg->role_db.num_roles = argcompat.role_db.num_roles;
-+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
-+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
-+ arg->role_db.num_objects = argcompat.role_db.num_objects;
-+
-+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
-+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
-+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
-+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
-+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
-+ arg->segv_device = argcompat.segv_device;
-+ arg->segv_inode = argcompat.segv_inode;
-+ arg->segv_uid = argcompat.segv_uid;
-+ arg->num_sprole_pws = argcompat.num_sprole_pws;
-+ arg->mode = argcompat.mode;
-+
-+ return 0;
-+}
-+
-+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
-+{
-+ struct acl_object_label_compat objcompat;
-+
-+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
-+ return -EFAULT;
-+
-+ obj->filename = compat_ptr(objcompat.filename);
-+ obj->inode = objcompat.inode;
-+ obj->device = objcompat.device;
-+ obj->mode = objcompat.mode;
-+
-+ obj->nested = compat_ptr(objcompat.nested);
-+ obj->globbed = compat_ptr(objcompat.globbed);
-+
-+ obj->prev = compat_ptr(objcompat.prev);
-+ obj->next = compat_ptr(objcompat.next);
-+
-+ return 0;
-+}
-+
-+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
-+{
-+ unsigned int i;
-+ struct acl_subject_label_compat subjcompat;
-+
-+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
-+ return -EFAULT;
-+
-+ subj->filename = compat_ptr(subjcompat.filename);
-+ subj->inode = subjcompat.inode;
-+ subj->device = subjcompat.device;
-+ subj->mode = subjcompat.mode;
-+ subj->cap_mask = subjcompat.cap_mask;
-+ subj->cap_lower = subjcompat.cap_lower;
-+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
-+
-+ for (i = 0; i < GR_NLIMITS; i++) {
-+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
-+ subj->res[i].rlim_cur = RLIM_INFINITY;
-+ else
-+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
-+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
-+ subj->res[i].rlim_max = RLIM_INFINITY;
-+ else
-+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
-+ }
-+ subj->resmask = subjcompat.resmask;
-+
-+ subj->user_trans_type = subjcompat.user_trans_type;
-+ subj->group_trans_type = subjcompat.group_trans_type;
-+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
-+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
-+ subj->user_trans_num = subjcompat.user_trans_num;
-+ subj->group_trans_num = subjcompat.group_trans_num;
-+
-+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
-+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
-+ subj->ip_type = subjcompat.ip_type;
-+ subj->ips = compat_ptr(subjcompat.ips);
-+ subj->ip_num = subjcompat.ip_num;
-+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
-+
-+ subj->crashes = subjcompat.crashes;
-+ subj->expires = subjcompat.expires;
-+
-+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
-+ subj->hash = compat_ptr(subjcompat.hash);
-+ subj->prev = compat_ptr(subjcompat.prev);
-+ subj->next = compat_ptr(subjcompat.next);
-+
-+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
-+ subj->obj_hash_size = subjcompat.obj_hash_size;
-+ subj->pax_flags = subjcompat.pax_flags;
-+
-+ return 0;
-+}
-+
-+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
-+{
-+ struct acl_role_label_compat rolecompat;
-+
-+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
-+ return -EFAULT;
-+
-+ role->rolename = compat_ptr(rolecompat.rolename);
-+ role->uidgid = rolecompat.uidgid;
-+ role->roletype = rolecompat.roletype;
-+
-+ role->auth_attempts = rolecompat.auth_attempts;
-+ role->expires = rolecompat.expires;
-+
-+ role->root_label = compat_ptr(rolecompat.root_label);
-+ role->hash = compat_ptr(rolecompat.hash);
-+
-+ role->prev = compat_ptr(rolecompat.prev);
-+ role->next = compat_ptr(rolecompat.next);
-+
-+ role->transitions = compat_ptr(rolecompat.transitions);
-+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
-+ role->domain_children = compat_ptr(rolecompat.domain_children);
-+ role->domain_child_num = rolecompat.domain_child_num;
-+
-+ role->umask = rolecompat.umask;
-+
-+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
-+ role->subj_hash_size = rolecompat.subj_hash_size;
-+
-+ return 0;
-+}
-+
-+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
-+{
-+ struct role_allowed_ip_compat roleip_compat;
-+
-+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
-+ return -EFAULT;
-+
-+ roleip->addr = roleip_compat.addr;
-+ roleip->netmask = roleip_compat.netmask;
-+
-+ roleip->prev = compat_ptr(roleip_compat.prev);
-+ roleip->next = compat_ptr(roleip_compat.next);
-+
-+ return 0;
-+}
-+
-+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
-+{
-+ struct role_transition_compat trans_compat;
-+
-+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
-+ return -EFAULT;
-+
-+ trans->rolename = compat_ptr(trans_compat.rolename);
-+
-+ trans->prev = compat_ptr(trans_compat.prev);
-+ trans->next = compat_ptr(trans_compat.next);
-+
-+ return 0;
-+
-+}
-+
-+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
-+{
-+ struct gr_hash_struct_compat hash_compat;
-+
-+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
-+ return -EFAULT;
-+
-+ hash->table = compat_ptr(hash_compat.table);
-+ hash->nametable = compat_ptr(hash_compat.nametable);
-+ hash->first = compat_ptr(hash_compat.first);
-+
-+ hash->table_size = hash_compat.table_size;
-+ hash->used_size = hash_compat.used_size;
-+
-+ hash->type = hash_compat.type;
-+
-+ return 0;
-+}
-+
-+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
-+{
-+ compat_uptr_t ptrcompat;
-+
-+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
-+ return -EFAULT;
-+
-+ *(void **)ptr = compat_ptr(ptrcompat);
-+
-+ return 0;
-+}
-+
-+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
-+{
-+ struct acl_ip_label_compat ip_compat;
-+
-+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
-+ return -EFAULT;
-+
-+ ip->iface = compat_ptr(ip_compat.iface);
-+ ip->addr = ip_compat.addr;
-+ ip->netmask = ip_compat.netmask;
-+ ip->low = ip_compat.low;
-+ ip->high = ip_compat.high;
-+ ip->mode = ip_compat.mode;
-+ ip->type = ip_compat.type;
-+
-+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
-+
-+ ip->prev = compat_ptr(ip_compat.prev);
-+ ip->next = compat_ptr(ip_compat.next);
-+
-+ return 0;
-+}
-+
-+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
-+{
-+ struct sprole_pw_compat pw_compat;
-+
-+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
-+ return -EFAULT;
-+
-+ pw->rolename = compat_ptr(pw_compat.rolename);
-+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
-+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
-+
-+ return 0;
-+}
-+
-+size_t get_gr_arg_wrapper_size_compat(void)
-+{
-+ return sizeof(struct gr_arg_wrapper_compat);
-+}
-+
-diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
-new file mode 100644
-index 0000000..b916759
---- /dev/null
-+++ b/grsecurity/gracl_fs.c
-@@ -0,0 +1,439 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/types.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/stat.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/gracl.h>
-+
-+umode_t
-+gr_acl_umask(void)
-+{
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return 0;
-+
-+ return current->role->umask;
-+}
-+
-+__u32
-+gr_acl_handle_hidden_file(const struct dentry * dentry,
-+ const struct vfsmount * mnt)
-+{
-+ __u32 mode;
-+
-+ if (unlikely(!dentry->d_inode))
-+ return GR_FIND;
-+
-+ mode =
-+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
-+
-+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
-+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
-+ return mode;
-+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
-+ return 0;
-+ } else if (unlikely(!(mode & GR_FIND)))
-+ return 0;
-+
-+ return GR_FIND;
-+}
-+
-+__u32
-+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
-+ int acc_mode)
-+{
-+ __u32 reqmode = GR_FIND;
-+ __u32 mode;
-+
-+ if (unlikely(!dentry->d_inode))
-+ return reqmode;
-+
-+ if (acc_mode & MAY_APPEND)
-+ reqmode |= GR_APPEND;
-+ else if (acc_mode & MAY_WRITE)
-+ reqmode |= GR_WRITE;
-+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
-+ reqmode |= GR_READ;
-+
-+ mode =
-+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
-+ mnt);
-+
-+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : reqmode &
-+ GR_APPEND ? " appending" : "");
-+ return reqmode;
-+ } else
-+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+ {
-+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : reqmode &
-+ GR_APPEND ? " appending" : "");
-+ return 0;
-+ } else if (unlikely((mode & reqmode) != reqmode))
-+ return 0;
-+
-+ return reqmode;
-+}
-+
-+__u32
-+gr_acl_handle_creat(const struct dentry * dentry,
-+ const struct dentry * p_dentry,
-+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
-+ const int imode)
-+{
-+ __u32 reqmode = GR_WRITE | GR_CREATE;
-+ __u32 mode;
-+
-+ if (acc_mode & MAY_APPEND)
-+ reqmode |= GR_APPEND;
-+ // if a directory was required or the directory already exists, then
-+ // don't count this open as a read
-+ if ((acc_mode & MAY_READ) &&
-+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
-+ reqmode |= GR_READ;
-+ if ((open_flags & O_CREAT) &&
-+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
-+ reqmode |= GR_SETID;
-+
-+ mode =
-+ gr_check_create(dentry, p_dentry, p_mnt,
-+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
-+
-+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : reqmode &
-+ GR_APPEND ? " appending" : "");
-+ return reqmode;
-+ } else
-+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+ {
-+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : reqmode &
-+ GR_APPEND ? " appending" : "");
-+ return 0;
-+ } else if (unlikely((mode & reqmode) != reqmode))
-+ return 0;
-+
-+ return reqmode;
-+}
-+
-+__u32
-+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
-+ const int fmode)
-+{
-+ __u32 mode, reqmode = GR_FIND;
-+
-+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
-+ reqmode |= GR_EXEC;
-+ if (fmode & S_IWOTH)
-+ reqmode |= GR_WRITE;
-+ if (fmode & S_IROTH)
-+ reqmode |= GR_READ;
-+
-+ mode =
-+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
-+ mnt);
-+
-+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : "",
-+ reqmode & GR_EXEC ? " executing" : "");
-+ return reqmode;
-+ } else
-+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+ {
-+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
-+ reqmode & GR_READ ? " reading" : "",
-+ reqmode & GR_WRITE ? " writing" : "",
-+ reqmode & GR_EXEC ? " executing" : "");
-+ return 0;
-+ } else if (unlikely((mode & reqmode) != reqmode))
-+ return 0;
-+
-+ return reqmode;
-+}
-+
-+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
-+{
-+ __u32 mode;
-+
-+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
-+
-+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
-+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
-+ return mode;
-+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
-+ return 0;
-+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
-+ return 0;
-+
-+ return (reqmode);
-+}
-+
-+__u32
-+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
-+ umode_t *modeptr)
-+{
-+ umode_t mode;
-+
-+ *modeptr &= ~gr_acl_umask();
-+ mode = *modeptr;
-+
-+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
-+ return 1;
-+
-+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
-+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
-+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
-+ GR_CHMOD_ACL_MSG);
-+ } else {
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
-+ }
-+}
-+
-+__u32
-+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
-+ GR_UNIXCONNECT_ACL_MSG);
-+}
-+
-+/* hardlinks require at minimum create and link permission,
-+ any additional privilege required is based on the
-+ privilege of the file being linked to
-+*/
-+__u32
-+gr_acl_handle_link(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt,
-+ const struct dentry * old_dentry,
-+ const struct vfsmount * old_mnt, const char *to)
-+{
-+ __u32 mode;
-+ __u32 needmode = GR_CREATE | GR_LINK;
-+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
-+
-+ mode =
-+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
-+ old_mnt);
-+
-+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
-+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
-+ return mode;
-+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
-+ return 0;
-+ } else if (unlikely((mode & needmode) != needmode))
-+ return 0;
-+
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_symlink(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt, const char *from)
-+{
-+ __u32 needmode = GR_WRITE | GR_CREATE;
-+ __u32 mode;
-+
-+ mode =
-+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
-+ GR_CREATE | GR_AUDIT_CREATE |
-+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
-+
-+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
-+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
-+ return mode;
-+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
-+ return 0;
-+ } else if (unlikely((mode & needmode) != needmode))
-+ return 0;
-+
-+ return (GR_WRITE | GR_CREATE);
-+}
-+
-+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
-+{
-+ __u32 mode;
-+
-+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
-+
-+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
-+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
-+ return mode;
-+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
-+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
-+ return 0;
-+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
-+ return 0;
-+
-+ return (reqmode);
-+}
-+
-+__u32
-+gr_acl_handle_mknod(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt,
-+ const int mode)
-+{
-+ __u32 reqmode = GR_WRITE | GR_CREATE;
-+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
-+ reqmode |= GR_SETID;
-+
-+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
-+ reqmode, GR_MKNOD_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_mkdir(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt)
-+{
-+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
-+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
-+}
-+
-+#define RENAME_CHECK_SUCCESS(old, new) \
-+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
-+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
-+
-+int
-+gr_acl_handle_rename(struct dentry *new_dentry,
-+ struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ struct dentry *old_dentry,
-+ struct inode *old_parent_inode,
-+ struct vfsmount *old_mnt, const char *newname)
-+{
-+ __u32 comp1, comp2;
-+ int error = 0;
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return 0;
-+
-+ if (!new_dentry->d_inode) {
-+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
-+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
-+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
-+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
-+ GR_DELETE | GR_AUDIT_DELETE |
-+ GR_AUDIT_READ | GR_AUDIT_WRITE |
-+ GR_SUPPRESS, old_mnt);
-+ } else {
-+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
-+ GR_CREATE | GR_DELETE |
-+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
-+ GR_AUDIT_READ | GR_AUDIT_WRITE |
-+ GR_SUPPRESS, parent_mnt);
-+ comp2 =
-+ gr_search_file(old_dentry,
-+ GR_READ | GR_WRITE | GR_AUDIT_READ |
-+ GR_DELETE | GR_AUDIT_DELETE |
-+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
-+ }
-+
-+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
-+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
-+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
-+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
-+ && !(comp2 & GR_SUPPRESS)) {
-+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
-+ error = -EACCES;
-+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
-+ error = -EACCES;
-+
-+ return error;
-+}
-+
-+void
-+gr_acl_handle_exit(void)
-+{
-+ u16 id;
-+ char *rolename;
-+
-+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
-+ !(current->role->roletype & GR_ROLE_PERSIST))) {
-+ id = current->acl_role_id;
-+ rolename = current->role->rolename;
-+ gr_set_acls(1);
-+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
-+ }
-+
-+ gr_put_exec_file(current);
-+ return;
-+}
-+
-+int
-+gr_acl_handle_procpidmem(const struct task_struct *task)
-+{
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return 0;
-+
-+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
-+ !(current->acl->mode & GR_POVERRIDE) &&
-+ !(current->role->roletype & GR_ROLE_GOD))
-+ return -EACCES;
-+
-+ return 0;
-+}
-diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
-new file mode 100644
-index 0000000..e0bbcf4
---- /dev/null
-+++ b/grsecurity/gracl_ip.c
-@@ -0,0 +1,386 @@
-+#include <linux/kernel.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <net/sock.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/skbuff.h>
-+#include <linux/ip.h>
-+#include <linux/udp.h>
-+#include <linux/types.h>
-+#include <linux/sched.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+#define GR_BIND 0x01
-+#define GR_CONNECT 0x02
-+#define GR_INVERT 0x04
-+#define GR_BINDOVERRIDE 0x08
-+#define GR_CONNECTOVERRIDE 0x10
-+#define GR_SOCK_FAMILY 0x20
-+
-+static const char * gr_protocols[IPPROTO_MAX] = {
-+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
-+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
-+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
-+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
-+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
-+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
-+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
-+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
-+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
-+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
-+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
-+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
-+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
-+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
-+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
-+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
-+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
-+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
-+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
-+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
-+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
-+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
-+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
-+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
-+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
-+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
-+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
-+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
-+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
-+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
-+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
-+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
-+ };
-+
-+static const char * gr_socktypes[SOCK_MAX] = {
-+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
-+ "unknown:7", "unknown:8", "unknown:9", "packet"
-+ };
-+
-+static const char * gr_sockfamilies[AF_MAX+1] = {
-+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
-+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
-+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
-+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", "nfc"
-+ };
-+
-+const char *
-+gr_proto_to_name(unsigned char proto)
-+{
-+ return gr_protocols[proto];
-+}
-+
-+const char *
-+gr_socktype_to_name(unsigned char type)
-+{
-+ return gr_socktypes[type];
-+}
-+
-+const char *
-+gr_sockfamily_to_name(unsigned char family)
-+{
-+ return gr_sockfamilies[family];
-+}
-+
-+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
-+
-+int
-+gr_search_socket(const int domain, const int type, const int protocol)
-+{
-+ struct acl_subject_label *curr;
-+ const struct cred *cred = current_cred();
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ goto exit;
-+
-+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
-+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
-+ goto exit; // let the kernel handle it
-+
-+ curr = current->acl;
-+
-+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
-+ /* the family is allowed, if this is PF_INET allow it only if
-+ the extra sock type/protocol checks pass */
-+ if (domain == PF_INET)
-+ goto inet_check;
-+ goto exit;
-+ } else {
-+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+ __u32 fakeip = 0;
-+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+ current->role->roletype, cred->uid,
-+ cred->gid, current->exec_file ?
-+ gr_to_filename(current->exec_file->f_path.dentry,
-+ current->exec_file->f_path.mnt) :
-+ curr->filename, curr->filename,
-+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
-+ &current->signal->saved_ip);
-+ goto exit;
-+ }
-+ goto exit_fail;
-+ }
-+
-+inet_check:
-+ /* the rest of this checking is for IPv4 only */
-+ if (!curr->ips)
-+ goto exit;
-+
-+ if ((curr->ip_type & (1U << type)) &&
-+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
-+ goto exit;
-+
-+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+ /* we don't place acls on raw sockets , and sometimes
-+ dgram/ip sockets are opened for ioctl and not
-+ bind/connect, so we'll fake a bind learn log */
-+ if (type == SOCK_RAW || type == SOCK_PACKET) {
-+ __u32 fakeip = 0;
-+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+ current->role->roletype, cred->uid,
-+ cred->gid, current->exec_file ?
-+ gr_to_filename(current->exec_file->f_path.dentry,
-+ current->exec_file->f_path.mnt) :
-+ curr->filename, curr->filename,
-+ &fakeip, 0, type,
-+ protocol, GR_CONNECT, &current->signal->saved_ip);
-+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
-+ __u32 fakeip = 0;
-+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+ current->role->roletype, cred->uid,
-+ cred->gid, current->exec_file ?
-+ gr_to_filename(current->exec_file->f_path.dentry,
-+ current->exec_file->f_path.mnt) :
-+ curr->filename, curr->filename,
-+ &fakeip, 0, type,
-+ protocol, GR_BIND, &current->signal->saved_ip);
-+ }
-+ /* we'll log when they use connect or bind */
-+ goto exit;
-+ }
-+
-+exit_fail:
-+ if (domain == PF_INET)
-+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
-+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
-+ else if (rcu_access_pointer(net_families[domain]) != NULL)
-+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
-+ gr_socktype_to_name(type), protocol);
-+
-+ return 0;
-+exit:
-+ return 1;
-+}
-+
-+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
-+{
-+ if ((ip->mode & mode) &&
-+ (ip_port >= ip->low) &&
-+ (ip_port <= ip->high) &&
-+ ((ntohl(ip_addr) & our_netmask) ==
-+ (ntohl(our_addr) & our_netmask))
-+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
-+ && (ip->type & (1U << type))) {
-+ if (ip->mode & GR_INVERT)
-+ return 2; // specifically denied
-+ else
-+ return 1; // allowed
-+ }
-+
-+ return 0; // not specifically allowed, may continue parsing
-+}
-+
-+static int
-+gr_search_connectbind(const int full_mode, struct sock *sk,
-+ struct sockaddr_in *addr, const int type)
-+{
-+ char iface[IFNAMSIZ] = {0};
-+ struct acl_subject_label *curr;
-+ struct acl_ip_label *ip;
-+ struct inet_sock *isk;
-+ struct net_device *dev;
-+ struct in_device *idev;
-+ unsigned long i;
-+ int ret;
-+ int mode = full_mode & (GR_BIND | GR_CONNECT);
-+ __u32 ip_addr = 0;
-+ __u32 our_addr;
-+ __u32 our_netmask;
-+ char *p;
-+ __u16 ip_port = 0;
-+ const struct cred *cred = current_cred();
-+
-+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
-+ return 0;
-+
-+ curr = current->acl;
-+ isk = inet_sk(sk);
-+
-+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
-+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
-+ addr->sin_addr.s_addr = curr->inaddr_any_override;
-+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
-+ struct sockaddr_in saddr;
-+ int err;
-+
-+ saddr.sin_family = AF_INET;
-+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
-+ saddr.sin_port = isk->inet_sport;
-+
-+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
-+ if (err)
-+ return err;
-+
-+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
-+ if (err)
-+ return err;
-+ }
-+
-+ if (!curr->ips)
-+ return 0;
-+
-+ ip_addr = addr->sin_addr.s_addr;
-+ ip_port = ntohs(addr->sin_port);
-+
-+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+ current->role->roletype, cred->uid,
-+ cred->gid, current->exec_file ?
-+ gr_to_filename(current->exec_file->f_path.dentry,
-+ current->exec_file->f_path.mnt) :
-+ curr->filename, curr->filename,
-+ &ip_addr, ip_port, type,
-+ sk->sk_protocol, mode, &current->signal->saved_ip);
-+ return 0;
-+ }
-+
-+ for (i = 0; i < curr->ip_num; i++) {
-+ ip = *(curr->ips + i);
-+ if (ip->iface != NULL) {
-+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
-+ p = strchr(iface, ':');
-+ if (p != NULL)
-+ *p = '\0';
-+ dev = dev_get_by_name(sock_net(sk), iface);
-+ if (dev == NULL)
-+ continue;
-+ idev = in_dev_get(dev);
-+ if (idev == NULL) {
-+ dev_put(dev);
-+ continue;
-+ }
-+ rcu_read_lock();
-+ for_ifa(idev) {
-+ if (!strcmp(ip->iface, ifa->ifa_label)) {
-+ our_addr = ifa->ifa_address;
-+ our_netmask = 0xffffffff;
-+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
-+ if (ret == 1) {
-+ rcu_read_unlock();
-+ in_dev_put(idev);
-+ dev_put(dev);
-+ return 0;
-+ } else if (ret == 2) {
-+ rcu_read_unlock();
-+ in_dev_put(idev);
-+ dev_put(dev);
-+ goto denied;
-+ }
-+ }
-+ } endfor_ifa(idev);
-+ rcu_read_unlock();
-+ in_dev_put(idev);
-+ dev_put(dev);
-+ } else {
-+ our_addr = ip->addr;
-+ our_netmask = ip->netmask;
-+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
-+ if (ret == 1)
-+ return 0;
-+ else if (ret == 2)
-+ goto denied;
-+ }
-+ }
-+
-+denied:
-+ if (mode == GR_BIND)
-+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
-+ else if (mode == GR_CONNECT)
-+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
-+
-+ return -EACCES;
-+}
-+
-+int
-+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
-+{
-+ /* always allow disconnection of dgram sockets with connect */
-+ if (addr->sin_family == AF_UNSPEC)
-+ return 0;
-+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
-+}
-+
-+int
-+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
-+{
-+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
-+}
-+
-+int gr_search_listen(struct socket *sock)
-+{
-+ struct sock *sk = sock->sk;
-+ struct sockaddr_in addr;
-+
-+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
-+ addr.sin_port = inet_sk(sk)->inet_sport;
-+
-+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
-+}
-+
-+int gr_search_accept(struct socket *sock)
-+{
-+ struct sock *sk = sock->sk;
-+ struct sockaddr_in addr;
-+
-+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
-+ addr.sin_port = inet_sk(sk)->inet_sport;
-+
-+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
-+}
-+
-+int
-+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
-+{
-+ if (addr)
-+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
-+ else {
-+ struct sockaddr_in sin;
-+ const struct inet_sock *inet = inet_sk(sk);
-+
-+ sin.sin_addr.s_addr = inet->inet_daddr;
-+ sin.sin_port = inet->inet_dport;
-+
-+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
-+ }
-+}
-+
-+int
-+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
-+{
-+ struct sockaddr_in sin;
-+
-+ if (unlikely(skb->len < sizeof (struct udphdr)))
-+ return 0; // skip this packet
-+
-+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
-+ sin.sin_port = udp_hdr(skb)->source;
-+
-+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
-+}
-diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
-new file mode 100644
-index 0000000..25f54ef
---- /dev/null
-+++ b/grsecurity/gracl_learn.c
-@@ -0,0 +1,207 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/poll.h>
-+#include <linux/string.h>
-+#include <linux/file.h>
-+#include <linux/types.h>
-+#include <linux/vmalloc.h>
-+#include <linux/grinternal.h>
-+
-+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
-+ size_t count, loff_t *ppos);
-+extern int gr_acl_is_enabled(void);
-+
-+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
-+static int gr_learn_attached;
-+
-+/* use a 512k buffer */
-+#define LEARN_BUFFER_SIZE (512 * 1024)
-+
-+static DEFINE_SPINLOCK(gr_learn_lock);
-+static DEFINE_MUTEX(gr_learn_user_mutex);
-+
-+/* we need to maintain two buffers, so that the kernel context of grlearn
-+ uses a semaphore around the userspace copying, and the other kernel contexts
-+ use a spinlock when copying into the buffer, since they cannot sleep
-+*/
-+static char *learn_buffer;
-+static char *learn_buffer_user;
-+static int learn_buffer_len;
-+static int learn_buffer_user_len;
-+
-+static ssize_t
-+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
-+{
-+ DECLARE_WAITQUEUE(wait, current);
-+ ssize_t retval = 0;
-+
-+ add_wait_queue(&learn_wait, &wait);
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ do {
-+ mutex_lock(&gr_learn_user_mutex);
-+ spin_lock(&gr_learn_lock);
-+ if (learn_buffer_len)
-+ break;
-+ spin_unlock(&gr_learn_lock);
-+ mutex_unlock(&gr_learn_user_mutex);
-+ if (file->f_flags & O_NONBLOCK) {
-+ retval = -EAGAIN;
-+ goto out;
-+ }
-+ if (signal_pending(current)) {
-+ retval = -ERESTARTSYS;
-+ goto out;
-+ }
-+
-+ schedule();
-+ } while (1);
-+
-+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
-+ learn_buffer_user_len = learn_buffer_len;
-+ retval = learn_buffer_len;
-+ learn_buffer_len = 0;
-+
-+ spin_unlock(&gr_learn_lock);
-+
-+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
-+ retval = -EFAULT;
-+
-+ mutex_unlock(&gr_learn_user_mutex);
-+out:
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&learn_wait, &wait);
-+ return retval;
-+}
-+
-+static unsigned int
-+poll_learn(struct file * file, poll_table * wait)
-+{
-+ poll_wait(file, &learn_wait, wait);
-+
-+ if (learn_buffer_len)
-+ return (POLLIN | POLLRDNORM);
-+
-+ return 0;
-+}
-+
-+void
-+gr_clear_learn_entries(void)
-+{
-+ char *tmp;
-+
-+ mutex_lock(&gr_learn_user_mutex);
-+ spin_lock(&gr_learn_lock);
-+ tmp = learn_buffer;
-+ learn_buffer = NULL;
-+ spin_unlock(&gr_learn_lock);
-+ if (tmp)
-+ vfree(tmp);
-+ if (learn_buffer_user != NULL) {
-+ vfree(learn_buffer_user);
-+ learn_buffer_user = NULL;
-+ }
-+ learn_buffer_len = 0;
-+ mutex_unlock(&gr_learn_user_mutex);
-+
-+ return;
-+}
-+
-+void
-+gr_add_learn_entry(const char *fmt, ...)
-+{
-+ va_list args;
-+ unsigned int len;
-+
-+ if (!gr_learn_attached)
-+ return;
-+
-+ spin_lock(&gr_learn_lock);
-+
-+ /* leave a gap at the end so we know when it's "full" but don't have to
-+ compute the exact length of the string we're trying to append
-+ */
-+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
-+ spin_unlock(&gr_learn_lock);
-+ wake_up_interruptible(&learn_wait);
-+ return;
-+ }
-+ if (learn_buffer == NULL) {
-+ spin_unlock(&gr_learn_lock);
-+ return;
-+ }
-+
-+ va_start(args, fmt);
-+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
-+ va_end(args);
-+
-+ learn_buffer_len += len + 1;
-+
-+ spin_unlock(&gr_learn_lock);
-+ wake_up_interruptible(&learn_wait);
-+
-+ return;
-+}
-+
-+static int
-+open_learn(struct inode *inode, struct file *file)
-+{
-+ if (file->f_mode & FMODE_READ && gr_learn_attached)
-+ return -EBUSY;
-+ if (file->f_mode & FMODE_READ) {
-+ int retval = 0;
-+ mutex_lock(&gr_learn_user_mutex);
-+ if (learn_buffer == NULL)
-+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
-+ if (learn_buffer_user == NULL)
-+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
-+ if (learn_buffer == NULL) {
-+ retval = -ENOMEM;
-+ goto out_error;
-+ }
-+ if (learn_buffer_user == NULL) {
-+ retval = -ENOMEM;
-+ goto out_error;
-+ }
-+ learn_buffer_len = 0;
-+ learn_buffer_user_len = 0;
-+ gr_learn_attached = 1;
-+out_error:
-+ mutex_unlock(&gr_learn_user_mutex);
-+ return retval;
-+ }
-+ return 0;
-+}
-+
-+static int
-+close_learn(struct inode *inode, struct file *file)
-+{
-+ if (file->f_mode & FMODE_READ) {
-+ char *tmp = NULL;
-+ mutex_lock(&gr_learn_user_mutex);
-+ spin_lock(&gr_learn_lock);
-+ tmp = learn_buffer;
-+ learn_buffer = NULL;
-+ spin_unlock(&gr_learn_lock);
-+ if (tmp)
-+ vfree(tmp);
-+ if (learn_buffer_user != NULL) {
-+ vfree(learn_buffer_user);
-+ learn_buffer_user = NULL;
-+ }
-+ learn_buffer_len = 0;
-+ learn_buffer_user_len = 0;
-+ gr_learn_attached = 0;
-+ mutex_unlock(&gr_learn_user_mutex);
-+ }
-+
-+ return 0;
-+}
-+
-+const struct file_operations grsec_fops = {
-+ .read = read_learn,
-+ .write = write_grsec_handler,
-+ .open = open_learn,
-+ .release = close_learn,
-+ .poll = poll_learn,
-+};
-diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
-new file mode 100644
-index 0000000..edcb09b
---- /dev/null
-+++ b/grsecurity/gracl_policy.c
-@@ -0,0 +1,1780 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/mount.h>
-+#include <linux/tty.h>
-+#include <linux/proc_fs.h>
-+#include <linux/lglock.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/types.h>
-+#include <linux/sysctl.h>
-+#include <linux/netdevice.h>
-+#include <linux/ptrace.h>
-+#include <linux/gracl.h>
-+#include <linux/gralloc.h>
-+#include <linux/security.h>
-+#include <linux/grinternal.h>
-+#include <linux/pid_namespace.h>
-+#include <linux/stop_machine.h>
-+#include <linux/fdtable.h>
-+#include <linux/percpu.h>
-+#include <linux/lglock.h>
-+#include <linux/hugetlb.h>
-+#include <linux/posix-timers.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <asm/mman.h>
-+
-+extern struct gr_policy_state *polstate;
-+
-+#define FOR_EACH_ROLE_START(role) \
-+ role = polstate->role_list; \
-+ while (role) {
-+
-+#define FOR_EACH_ROLE_END(role) \
-+ role = role->prev; \
-+ }
-+
-+struct path gr_real_root;
-+
-+extern struct gr_alloc_state *current_alloc_state;
-+
-+u16 acl_sp_role_value;
-+
-+static DEFINE_MUTEX(gr_dev_mutex);
-+
-+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
-+extern void gr_clear_learn_entries(void);
-+
-+struct gr_arg *gr_usermode __read_only;
-+unsigned char *gr_system_salt __read_only;
-+unsigned char *gr_system_sum __read_only;
-+
-+static unsigned int gr_auth_attempts = 0;
-+static unsigned long gr_auth_expires = 0UL;
-+
-+struct acl_object_label *fakefs_obj_rw;
-+struct acl_object_label *fakefs_obj_rwx;
-+
-+extern int gr_init_uidset(void);
-+extern void gr_free_uidset(void);
-+extern void gr_remove_uid(uid_t uid);
-+extern int gr_find_uid(uid_t uid);
-+
-+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
-+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
-+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
-+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
-+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
-+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
-+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
-+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
-+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
-+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
-+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
-+extern void assign_special_role(const char *rolename);
-+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
-+extern int gr_rbac_disable(void *unused);
-+extern void gr_enable_rbac_system(void);
-+
-+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
-+{
-+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
-+{
-+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
-+{
-+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
-+{
-+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
-+{
-+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
-+{
-+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
-+{
-+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
-+{
-+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
-+{
-+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
-+{
-+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
-+ return -EFAULT;
-+
-+ if ((uwrap->version != GRSECURITY_VERSION) ||
-+ (uwrap->size != sizeof(struct gr_arg)))
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+
-+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
-+{
-+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
-+ return -EFAULT;
-+
-+ return 0;
-+}
-+
-+static size_t get_gr_arg_wrapper_size_normal(void)
-+{
-+ return sizeof(struct gr_arg_wrapper);
-+}
-+
-+#ifdef CONFIG_COMPAT
-+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
-+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
-+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
-+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
-+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
-+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
-+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
-+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
-+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
-+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
-+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
-+extern size_t get_gr_arg_wrapper_size_compat(void);
-+
-+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
-+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
-+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
-+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
-+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
-+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
-+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
-+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
-+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
-+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
-+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
-+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
-+
-+#else
-+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
-+#define copy_gr_arg copy_gr_arg_normal
-+#define copy_gr_hash_struct copy_gr_hash_struct_normal
-+#define copy_acl_object_label copy_acl_object_label_normal
-+#define copy_acl_subject_label copy_acl_subject_label_normal
-+#define copy_acl_role_label copy_acl_role_label_normal
-+#define copy_acl_ip_label copy_acl_ip_label_normal
-+#define copy_pointer_from_array copy_pointer_from_array_normal
-+#define copy_sprole_pw copy_sprole_pw_normal
-+#define copy_role_transition copy_role_transition_normal
-+#define copy_role_allowed_ip copy_role_allowed_ip_normal
-+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
-+#endif
-+
-+static struct acl_subject_label *
-+lookup_subject_map(const struct acl_subject_label *userp)
-+{
-+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
-+ struct subject_map *match;
-+
-+ match = polstate->subj_map_set.s_hash[index];
-+
-+ while (match && match->user != userp)
-+ match = match->next;
-+
-+ if (match != NULL)
-+ return match->kernel;
-+ else
-+ return NULL;
-+}
-+
-+static void
-+insert_subj_map_entry(struct subject_map *subjmap)
-+{
-+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
-+ struct subject_map **curr;
-+
-+ subjmap->prev = NULL;
-+
-+ curr = &polstate->subj_map_set.s_hash[index];
-+ if (*curr != NULL)
-+ (*curr)->prev = subjmap;
-+
-+ subjmap->next = *curr;
-+ *curr = subjmap;
-+
-+ return;
-+}
-+
-+static void
-+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
-+{
-+ unsigned int index =
-+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
-+ struct acl_role_label **curr;
-+ struct acl_role_label *tmp, *tmp2;
-+
-+ curr = &polstate->acl_role_set.r_hash[index];
-+
-+ /* simple case, slot is empty, just set it to our role */
-+ if (*curr == NULL) {
-+ *curr = role;
-+ } else {
-+ /* example:
-+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
-+ 2 -> 3
-+ */
-+ /* first check to see if we can already be reached via this slot */
-+ tmp = *curr;
-+ while (tmp && tmp != role)
-+ tmp = tmp->next;
-+ if (tmp == role) {
-+ /* we don't need to add ourselves to this slot's chain */
-+ return;
-+ }
-+ /* we need to add ourselves to this chain, two cases */
-+ if (role->next == NULL) {
-+ /* simple case, append the current chain to our role */
-+ role->next = *curr;
-+ *curr = role;
-+ } else {
-+ /* 1 -> 2 -> 3 -> 4
-+ 2 -> 3 -> 4
-+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
-+ */
-+ /* trickier case: walk our role's chain until we find
-+ the role for the start of the current slot's chain */
-+ tmp = role;
-+ tmp2 = *curr;
-+ while (tmp->next && tmp->next != tmp2)
-+ tmp = tmp->next;
-+ if (tmp->next == tmp2) {
-+ /* from example above, we found 3, so just
-+ replace this slot's chain with ours */
-+ *curr = role;
-+ } else {
-+ /* we didn't find a subset of our role's chain
-+ in the current slot's chain, so append their
-+ chain to ours, and set us as the first role in
-+ the slot's chain
-+
-+ we could fold this case with the case above,
-+ but making it explicit for clarity
-+ */
-+ tmp->next = tmp2;
-+ *curr = role;
-+ }
-+ }
-+ }
-+
-+ return;
-+}
-+
-+static void
-+insert_acl_role_label(struct acl_role_label *role)
-+{
-+ int i;
-+
-+ if (polstate->role_list == NULL) {
-+ polstate->role_list = role;
-+ role->prev = NULL;
-+ } else {
-+ role->prev = polstate->role_list;
-+ polstate->role_list = role;
-+ }
-+
-+ /* used for hash chains */
-+ role->next = NULL;
-+
-+ if (role->roletype & GR_ROLE_DOMAIN) {
-+ for (i = 0; i < role->domain_child_num; i++)
-+ __insert_acl_role_label(role, role->domain_children[i]);
-+ } else
-+ __insert_acl_role_label(role, role->uidgid);
-+}
-+
-+static int
-+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
-+{
-+ struct name_entry **curr, *nentry;
-+ struct inodev_entry *ientry;
-+ unsigned int len = strlen(name);
-+ unsigned int key = full_name_hash(name, len);
-+ unsigned int index = key % polstate->name_set.n_size;
-+
-+ curr = &polstate->name_set.n_hash[index];
-+
-+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
-+ curr = &((*curr)->next);
-+
-+ if (*curr != NULL)
-+ return 1;
-+
-+ nentry = acl_alloc(sizeof (struct name_entry));
-+ if (nentry == NULL)
-+ return 0;
-+ ientry = acl_alloc(sizeof (struct inodev_entry));
-+ if (ientry == NULL)
-+ return 0;
-+ ientry->nentry = nentry;
-+
-+ nentry->key = key;
-+ nentry->name = name;
-+ nentry->inode = inode;
-+ nentry->device = device;
-+ nentry->len = len;
-+ nentry->deleted = deleted;
-+
-+ nentry->prev = NULL;
-+ curr = &polstate->name_set.n_hash[index];
-+ if (*curr != NULL)
-+ (*curr)->prev = nentry;
-+ nentry->next = *curr;
-+ *curr = nentry;
-+
-+ /* insert us into the table searchable by inode/dev */
-+ __insert_inodev_entry(polstate, ientry);
-+
-+ return 1;
-+}
-+
-+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
-+
-+static void *
-+create_table(__u32 * len, int elementsize)
-+{
-+ unsigned int table_sizes[] = {
-+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
-+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
-+ 4194301, 8388593, 16777213, 33554393, 67108859
-+ };
-+ void *newtable = NULL;
-+ unsigned int pwr = 0;
-+
-+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
-+ table_sizes[pwr] <= *len)
-+ pwr++;
-+
-+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
-+ return newtable;
-+
-+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
-+ newtable =
-+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
-+ else
-+ newtable = vmalloc(table_sizes[pwr] * elementsize);
-+
-+ *len = table_sizes[pwr];
-+
-+ return newtable;
-+}
-+
-+static int
-+init_variables(const struct gr_arg *arg, bool reload)
-+{
-+ struct task_struct *reaper = init_pid_ns.child_reaper;
-+ unsigned int stacksize;
-+
-+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
-+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
-+ polstate->name_set.n_size = arg->role_db.num_objects;
-+ polstate->inodev_set.i_size = arg->role_db.num_objects;
-+
-+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
-+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
-+ return 1;
-+
-+ if (!reload) {
-+ if (!gr_init_uidset())
-+ return 1;
-+ }
-+
-+ /* set up the stack that holds allocation info */
-+
-+ stacksize = arg->role_db.num_pointers + 5;
-+
-+ if (!acl_alloc_stack_init(stacksize))
-+ return 1;
-+
-+ if (!reload) {
-+ /* grab reference for the real root dentry and vfsmount */
-+ get_fs_root(reaper->fs, &gr_real_root);
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", gr_get_dev_from_dentry(gr_real_root.dentry), gr_get_ino_from_dentry(gr_real_root.dentry));
-+#endif
-+
-+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
-+ if (fakefs_obj_rw == NULL)
-+ return 1;
-+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
-+
-+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
-+ if (fakefs_obj_rwx == NULL)
-+ return 1;
-+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
-+ }
-+
-+ polstate->subj_map_set.s_hash =
-+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
-+ polstate->acl_role_set.r_hash =
-+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
-+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
-+ polstate->inodev_set.i_hash =
-+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
-+
-+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
-+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
-+ return 1;
-+
-+ memset(polstate->subj_map_set.s_hash, 0,
-+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
-+ memset(polstate->acl_role_set.r_hash, 0,
-+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
-+ memset(polstate->name_set.n_hash, 0,
-+ sizeof (struct name_entry *) * polstate->name_set.n_size);
-+ memset(polstate->inodev_set.i_hash, 0,
-+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
-+
-+ return 0;
-+}
-+
-+/* free information not needed after startup
-+ currently contains user->kernel pointer mappings for subjects
-+*/
-+
-+static void
-+free_init_variables(void)
-+{
-+ __u32 i;
-+
-+ if (polstate->subj_map_set.s_hash) {
-+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
-+ if (polstate->subj_map_set.s_hash[i]) {
-+ kfree(polstate->subj_map_set.s_hash[i]);
-+ polstate->subj_map_set.s_hash[i] = NULL;
-+ }
-+ }
-+
-+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
-+ PAGE_SIZE)
-+ kfree(polstate->subj_map_set.s_hash);
-+ else
-+ vfree(polstate->subj_map_set.s_hash);
-+ }
-+
-+ return;
-+}
-+
-+static void
-+free_variables(bool reload)
-+{
-+ struct acl_subject_label *s;
-+ struct acl_role_label *r;
-+ struct task_struct *task, *task2;
-+ unsigned int x;
-+
-+ if (!reload) {
-+ gr_clear_learn_entries();
-+
-+ read_lock(&tasklist_lock);
-+ do_each_thread(task2, task) {
-+ task->acl_sp_role = 0;
-+ task->acl_role_id = 0;
-+ task->inherited = 0;
-+ task->acl = NULL;
-+ task->role = NULL;
-+ } while_each_thread(task2, task);
-+ read_unlock(&tasklist_lock);
-+
-+ kfree(fakefs_obj_rw);
-+ fakefs_obj_rw = NULL;
-+ kfree(fakefs_obj_rwx);
-+ fakefs_obj_rwx = NULL;
-+
-+ /* release the reference to the real root dentry and vfsmount */
-+ path_put(&gr_real_root);
-+ memset(&gr_real_root, 0, sizeof(gr_real_root));
-+ }
-+
-+ /* free all object hash tables */
-+
-+ FOR_EACH_ROLE_START(r)
-+ if (r->subj_hash == NULL)
-+ goto next_role;
-+ FOR_EACH_SUBJECT_START(r, s, x)
-+ if (s->obj_hash == NULL)
-+ break;
-+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
-+ kfree(s->obj_hash);
-+ else
-+ vfree(s->obj_hash);
-+ FOR_EACH_SUBJECT_END(s, x)
-+ FOR_EACH_NESTED_SUBJECT_START(r, s)
-+ if (s->obj_hash == NULL)
-+ break;
-+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
-+ kfree(s->obj_hash);
-+ else
-+ vfree(s->obj_hash);
-+ FOR_EACH_NESTED_SUBJECT_END(s)
-+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
-+ kfree(r->subj_hash);
-+ else
-+ vfree(r->subj_hash);
-+ r->subj_hash = NULL;
-+next_role:
-+ FOR_EACH_ROLE_END(r)
-+
-+ acl_free_all();
-+
-+ if (polstate->acl_role_set.r_hash) {
-+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
-+ PAGE_SIZE)
-+ kfree(polstate->acl_role_set.r_hash);
-+ else
-+ vfree(polstate->acl_role_set.r_hash);
-+ }
-+ if (polstate->name_set.n_hash) {
-+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
-+ PAGE_SIZE)
-+ kfree(polstate->name_set.n_hash);
-+ else
-+ vfree(polstate->name_set.n_hash);
-+ }
-+
-+ if (polstate->inodev_set.i_hash) {
-+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
-+ PAGE_SIZE)
-+ kfree(polstate->inodev_set.i_hash);
-+ else
-+ vfree(polstate->inodev_set.i_hash);
-+ }
-+
-+ if (!reload)
-+ gr_free_uidset();
-+
-+ memset(&polstate->name_set, 0, sizeof (struct name_db));
-+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
-+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
-+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
-+
-+ polstate->default_role = NULL;
-+ polstate->kernel_role = NULL;
-+ polstate->role_list = NULL;
-+
-+ return;
-+}
-+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
-+
-+static int alloc_and_copy_string(char **name, unsigned int maxlen)
-+{
-+ unsigned int len = strnlen_user(*name, maxlen);
-+ char *tmp;
-+
-+ if (!len || len >= maxlen)
-+ return -EINVAL;
-+
-+ if ((tmp = (char *) acl_alloc(len)) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(tmp, *name, len))
-+ return -EFAULT;
-+
-+ tmp[len-1] = '\0';
-+ *name = tmp;
-+
-+ return 0;
-+}
-+
-+static int
-+copy_user_glob(struct acl_object_label *obj)
-+{
-+ struct acl_object_label *g_tmp, **guser;
-+ int error;
-+
-+ if (obj->globbed == NULL)
-+ return 0;
-+
-+ guser = &obj->globbed;
-+ while (*guser) {
-+ g_tmp = (struct acl_object_label *)
-+ acl_alloc(sizeof (struct acl_object_label));
-+ if (g_tmp == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_acl_object_label(g_tmp, *guser))
-+ return -EFAULT;
-+
-+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
-+ if (error)
-+ return error;
-+
-+ *guser = g_tmp;
-+ guser = &(g_tmp->next);
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
-+ struct acl_role_label *role)
-+{
-+ struct acl_object_label *o_tmp;
-+ int ret;
-+
-+ while (userp) {
-+ if ((o_tmp = (struct acl_object_label *)
-+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_acl_object_label(o_tmp, userp))
-+ return -EFAULT;
-+
-+ userp = o_tmp->prev;
-+
-+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
-+ if (ret)
-+ return ret;
-+
-+ insert_acl_obj_label(o_tmp, subj);
-+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
-+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
-+ return -ENOMEM;
-+
-+ ret = copy_user_glob(o_tmp);
-+ if (ret)
-+ return ret;
-+
-+ if (o_tmp->nested) {
-+ int already_copied;
-+
-+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
-+ if (IS_ERR(o_tmp->nested))
-+ return PTR_ERR(o_tmp->nested);
-+
-+ /* insert into nested subject list if we haven't copied this one yet
-+ to prevent duplicate entries */
-+ if (!already_copied) {
-+ o_tmp->nested->next = role->hash->first;
-+ role->hash->first = o_tmp->nested;
-+ }
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+static __u32
-+count_user_subjs(struct acl_subject_label *userp)
-+{
-+ struct acl_subject_label s_tmp;
-+ __u32 num = 0;
-+
-+ while (userp) {
-+ if (copy_acl_subject_label(&s_tmp, userp))
-+ break;
-+
-+ userp = s_tmp.prev;
-+ }
-+
-+ return num;
-+}
-+
-+static int
-+copy_user_allowedips(struct acl_role_label *rolep)
-+{
-+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
-+
-+ ruserip = rolep->allowed_ips;
-+
-+ while (ruserip) {
-+ rlast = rtmp;
-+
-+ if ((rtmp = (struct role_allowed_ip *)
-+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_role_allowed_ip(rtmp, ruserip))
-+ return -EFAULT;
-+
-+ ruserip = rtmp->prev;
-+
-+ if (!rlast) {
-+ rtmp->prev = NULL;
-+ rolep->allowed_ips = rtmp;
-+ } else {
-+ rlast->next = rtmp;
-+ rtmp->prev = rlast;
-+ }
-+
-+ if (!ruserip)
-+ rtmp->next = NULL;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+copy_user_transitions(struct acl_role_label *rolep)
-+{
-+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
-+ int error;
-+
-+ rusertp = rolep->transitions;
-+
-+ while (rusertp) {
-+ rlast = rtmp;
-+
-+ if ((rtmp = (struct role_transition *)
-+ acl_alloc(sizeof (struct role_transition))) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_role_transition(rtmp, rusertp))
-+ return -EFAULT;
-+
-+ rusertp = rtmp->prev;
-+
-+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
-+ if (error)
-+ return error;
-+
-+ if (!rlast) {
-+ rtmp->prev = NULL;
-+ rolep->transitions = rtmp;
-+ } else {
-+ rlast->next = rtmp;
-+ rtmp->prev = rlast;
-+ }
-+
-+ if (!rusertp)
-+ rtmp->next = NULL;
-+ }
-+
-+ return 0;
-+}
-+
-+static __u32 count_user_objs(const struct acl_object_label __user *userp)
-+{
-+ struct acl_object_label o_tmp;
-+ __u32 num = 0;
-+
-+ while (userp) {
-+ if (copy_acl_object_label(&o_tmp, userp))
-+ break;
-+
-+ userp = o_tmp.prev;
-+ num++;
-+ }
-+
-+ return num;
-+}
-+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
-+{
-+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
-+ __u32 num_objs;
-+ struct acl_ip_label **i_tmp, *i_utmp2;
-+ struct gr_hash_struct ghash;
-+ struct subject_map *subjmap;
-+ unsigned int i_num;
-+ int err;
-+
-+ if (already_copied != NULL)
-+ *already_copied = 0;
-+
-+ s_tmp = lookup_subject_map(userp);
-+
-+ /* we've already copied this subject into the kernel, just return
-+ the reference to it, and don't copy it over again
-+ */
-+ if (s_tmp) {
-+ if (already_copied != NULL)
-+ *already_copied = 1;
-+ return(s_tmp);
-+ }
-+
-+ if ((s_tmp = (struct acl_subject_label *)
-+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
-+ return ERR_PTR(-ENOMEM);
-+
-+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
-+ if (subjmap == NULL)
-+ return ERR_PTR(-ENOMEM);
-+
-+ subjmap->user = userp;
-+ subjmap->kernel = s_tmp;
-+ insert_subj_map_entry(subjmap);
-+
-+ if (copy_acl_subject_label(s_tmp, userp))
-+ return ERR_PTR(-EFAULT);
-+
-+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
-+ if (err)
-+ return ERR_PTR(err);
-+
-+ if (!strcmp(s_tmp->filename, "/"))
-+ role->root_label = s_tmp;
-+
-+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
-+ return ERR_PTR(-EFAULT);
-+
-+ /* copy user and group transition tables */
-+
-+ if (s_tmp->user_trans_num) {
-+ uid_t *uidlist;
-+
-+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
-+ if (uidlist == NULL)
-+ return ERR_PTR(-ENOMEM);
-+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
-+ return ERR_PTR(-EFAULT);
-+
-+ s_tmp->user_transitions = uidlist;
-+ }
-+
-+ if (s_tmp->group_trans_num) {
-+ gid_t *gidlist;
-+
-+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
-+ if (gidlist == NULL)
-+ return ERR_PTR(-ENOMEM);
-+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
-+ return ERR_PTR(-EFAULT);
-+
-+ s_tmp->group_transitions = gidlist;
-+ }
-+
-+ /* set up object hash table */
-+ num_objs = count_user_objs(ghash.first);
-+
-+ s_tmp->obj_hash_size = num_objs;
-+ s_tmp->obj_hash =
-+ (struct acl_object_label **)
-+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
-+
-+ if (!s_tmp->obj_hash)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(s_tmp->obj_hash, 0,
-+ s_tmp->obj_hash_size *
-+ sizeof (struct acl_object_label *));
-+
-+ /* add in objects */
-+ err = copy_user_objs(ghash.first, s_tmp, role);
-+
-+ if (err)
-+ return ERR_PTR(err);
-+
-+ /* set pointer for parent subject */
-+ if (s_tmp->parent_subject) {
-+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
-+
-+ if (IS_ERR(s_tmp2))
-+ return s_tmp2;
-+
-+ s_tmp->parent_subject = s_tmp2;
-+ }
-+
-+ /* add in ip acls */
-+
-+ if (!s_tmp->ip_num) {
-+ s_tmp->ips = NULL;
-+ goto insert;
-+ }
-+
-+ i_tmp =
-+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
-+ sizeof (struct acl_ip_label *));
-+
-+ if (!i_tmp)
-+ return ERR_PTR(-ENOMEM);
-+
-+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
-+ *(i_tmp + i_num) =
-+ (struct acl_ip_label *)
-+ acl_alloc(sizeof (struct acl_ip_label));
-+ if (!*(i_tmp + i_num))
-+ return ERR_PTR(-ENOMEM);
-+
-+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
-+ return ERR_PTR(-EFAULT);
-+
-+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
-+ return ERR_PTR(-EFAULT);
-+
-+ if ((*(i_tmp + i_num))->iface == NULL)
-+ continue;
-+
-+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
-+ if (err)
-+ return ERR_PTR(err);
-+ }
-+
-+ s_tmp->ips = i_tmp;
-+
-+insert:
-+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
-+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
-+ return ERR_PTR(-ENOMEM);
-+
-+ return s_tmp;
-+}
-+
-+static int
-+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
-+{
-+ struct acl_subject_label s_pre;
-+ struct acl_subject_label * ret;
-+ int err;
-+
-+ while (userp) {
-+ if (copy_acl_subject_label(&s_pre, userp))
-+ return -EFAULT;
-+
-+ ret = do_copy_user_subj(userp, role, NULL);
-+
-+ err = PTR_ERR(ret);
-+ if (IS_ERR(ret))
-+ return err;
-+
-+ insert_acl_subj_label(ret, role);
-+
-+ userp = s_pre.prev;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+copy_user_acl(struct gr_arg *arg)
-+{
-+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
-+ struct acl_subject_label *subj_list;
-+ struct sprole_pw *sptmp;
-+ struct gr_hash_struct *ghash;
-+ uid_t *domainlist;
-+ unsigned int r_num;
-+ int err = 0;
-+ __u16 i;
-+ __u32 num_subjs;
-+
-+ /* we need a default and kernel role */
-+ if (arg->role_db.num_roles < 2)
-+ return -EINVAL;
-+
-+ /* copy special role authentication info from userspace */
-+
-+ polstate->num_sprole_pws = arg->num_sprole_pws;
-+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
-+
-+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < polstate->num_sprole_pws; i++) {
-+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
-+ if (!sptmp)
-+ return -ENOMEM;
-+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
-+ return -EFAULT;
-+
-+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
-+ if (err)
-+ return err;
-+
-+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
-+#endif
-+
-+ polstate->acl_special_roles[i] = sptmp;
-+ }
-+
-+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
-+
-+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
-+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
-+
-+ if (!r_tmp)
-+ return -ENOMEM;
-+
-+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
-+ return -EFAULT;
-+
-+ if (copy_acl_role_label(r_tmp, r_utmp2))
-+ return -EFAULT;
-+
-+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
-+ if (err)
-+ return err;
-+
-+ if (!strcmp(r_tmp->rolename, "default")
-+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
-+ polstate->default_role = r_tmp;
-+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
-+ polstate->kernel_role = r_tmp;
-+ }
-+
-+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
-+ return -EFAULT;
-+
-+ r_tmp->hash = ghash;
-+
-+ num_subjs = count_user_subjs(r_tmp->hash->first);
-+
-+ r_tmp->subj_hash_size = num_subjs;
-+ r_tmp->subj_hash =
-+ (struct acl_subject_label **)
-+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
-+
-+ if (!r_tmp->subj_hash)
-+ return -ENOMEM;
-+
-+ err = copy_user_allowedips(r_tmp);
-+ if (err)
-+ return err;
-+
-+ /* copy domain info */
-+ if (r_tmp->domain_children != NULL) {
-+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
-+ if (domainlist == NULL)
-+ return -ENOMEM;
-+
-+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
-+ return -EFAULT;
-+
-+ r_tmp->domain_children = domainlist;
-+ }
-+
-+ err = copy_user_transitions(r_tmp);
-+ if (err)
-+ return err;
-+
-+ memset(r_tmp->subj_hash, 0,
-+ r_tmp->subj_hash_size *
-+ sizeof (struct acl_subject_label *));
-+
-+ /* acquire the list of subjects, then NULL out
-+ the list prior to parsing the subjects for this role,
-+ as during this parsing the list is replaced with a list
-+ of *nested* subjects for the role
-+ */
-+ subj_list = r_tmp->hash->first;
-+
-+ /* set nested subject list to null */
-+ r_tmp->hash->first = NULL;
-+
-+ err = copy_user_subjs(subj_list, r_tmp);
-+
-+ if (err)
-+ return err;
-+
-+ insert_acl_role_label(r_tmp);
-+ }
-+
-+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
-+ return -EINVAL;
-+
-+ return err;
-+}
-+
-+static int gracl_reload_apply_policies(void *reload)
-+{
-+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
-+ struct task_struct *task, *task2;
-+ struct acl_role_label *role, *rtmp;
-+ struct acl_subject_label *subj;
-+ const struct cred *cred;
-+ int role_applied;
-+ int ret = 0;
-+
-+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
-+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
-+
-+ /* first make sure we'll be able to apply the new policy cleanly */
-+ do_each_thread(task2, task) {
-+ if (task->exec_file == NULL)
-+ continue;
-+ role_applied = 0;
-+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
-+ /* preserve special roles */
-+ FOR_EACH_ROLE_START(role)
-+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
-+ rtmp = task->role;
-+ task->role = role;
-+ role_applied = 1;
-+ break;
-+ }
-+ FOR_EACH_ROLE_END(role)
-+ }
-+ if (!role_applied) {
-+ cred = __task_cred(task);
-+ rtmp = task->role;
-+ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
-+ }
-+ /* this handles non-nested inherited subjects, nested subjects will still
-+ be dropped currently */
-+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
-+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
-+ /* change the role back so that we've made no modifications to the policy */
-+ task->role = rtmp;
-+
-+ if (subj == NULL || task->tmpacl == NULL) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ } while_each_thread(task2, task);
-+
-+ /* now actually apply the policy */
-+
-+ do_each_thread(task2, task) {
-+ if (task->exec_file) {
-+ role_applied = 0;
-+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
-+ /* preserve special roles */
-+ FOR_EACH_ROLE_START(role)
-+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
-+ task->role = role;
-+ role_applied = 1;
-+ break;
-+ }
-+ FOR_EACH_ROLE_END(role)
-+ }
-+ if (!role_applied) {
-+ cred = __task_cred(task);
-+ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
-+ }
-+ /* this handles non-nested inherited subjects, nested subjects will still
-+ be dropped currently */
-+ if (!reload_state->oldmode && task->inherited)
-+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
-+ else {
-+ /* looked up and tagged to the task previously */
-+ subj = task->tmpacl;
-+ }
-+ /* subj will be non-null */
-+ __gr_apply_subject_to_task(polstate, task, subj);
-+ if (reload_state->oldmode) {
-+ task->acl_role_id = 0;
-+ task->acl_sp_role = 0;
-+ task->inherited = 0;
-+ }
-+ } else {
-+ // it's a kernel process
-+ task->role = polstate->kernel_role;
-+ task->acl = polstate->kernel_role->root_label;
-+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
-+ task->acl->mode &= ~GR_PROCFIND;
-+#endif
-+ }
-+ } while_each_thread(task2, task);
-+
-+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
-+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
-+
-+out:
-+
-+ return ret;
-+}
-+
-+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
-+{
-+ struct gr_reload_state new_reload_state = { };
-+ int err;
-+
-+ new_reload_state.oldpolicy_ptr = polstate;
-+ new_reload_state.oldalloc_ptr = current_alloc_state;
-+ new_reload_state.oldmode = oldmode;
-+
-+ current_alloc_state = &new_reload_state.newalloc;
-+ polstate = &new_reload_state.newpolicy;
-+
-+ /* everything relevant is now saved off, copy in the new policy */
-+ if (init_variables(args, true)) {
-+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
-+ err = -ENOMEM;
-+ goto error;
-+ }
-+
-+ err = copy_user_acl(args);
-+ free_init_variables();
-+ if (err)
-+ goto error;
-+ /* the new policy is copied in, with the old policy available via saved_state
-+ first go through applying roles, making sure to preserve special roles
-+ then apply new subjects, making sure to preserve inherited and nested subjects,
-+ though currently only inherited subjects will be preserved
-+ */
-+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
-+ if (err)
-+ goto error;
-+
-+ /* we've now applied the new policy, so restore the old policy state to free it */
-+ polstate = &new_reload_state.oldpolicy;
-+ current_alloc_state = &new_reload_state.oldalloc;
-+ free_variables(true);
-+
-+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
-+ to running_polstate/current_alloc_state inside stop_machine
-+ */
-+ err = 0;
-+ goto out;
-+error:
-+ /* on error of loading the new policy, we'll just keep the previous
-+ policy set around
-+ */
-+ free_variables(true);
-+
-+ /* doesn't affect runtime, but maintains consistent state */
-+out:
-+ polstate = new_reload_state.oldpolicy_ptr;
-+ current_alloc_state = new_reload_state.oldalloc_ptr;
-+
-+ return err;
-+}
-+
-+static int
-+gracl_init(struct gr_arg *args)
-+{
-+ int error = 0;
-+
-+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
-+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
-+
-+ if (init_variables(args, false)) {
-+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
-+ error = -ENOMEM;
-+ goto out;
-+ }
-+
-+ error = copy_user_acl(args);
-+ free_init_variables();
-+ if (error)
-+ goto out;
-+
-+ error = gr_set_acls(0);
-+ if (error)
-+ goto out;
-+
-+ gr_enable_rbac_system();
-+
-+ return 0;
-+
-+out:
-+ free_variables(false);
-+ return error;
-+}
-+
-+static int
-+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
-+ unsigned char **sum)
-+{
-+ struct acl_role_label *r;
-+ struct role_allowed_ip *ipp;
-+ struct role_transition *trans;
-+ unsigned int i;
-+ int found = 0;
-+ u32 curr_ip = current->signal->curr_ip;
-+
-+ current->signal->saved_ip = curr_ip;
-+
-+ /* check transition table */
-+
-+ for (trans = current->role->transitions; trans; trans = trans->next) {
-+ if (!strcmp(rolename, trans->rolename)) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+
-+ if (!found)
-+ return 0;
-+
-+ /* handle special roles that do not require authentication
-+ and check ip */
-+
-+ FOR_EACH_ROLE_START(r)
-+ if (!strcmp(rolename, r->rolename) &&
-+ (r->roletype & GR_ROLE_SPECIAL)) {
-+ found = 0;
-+ if (r->allowed_ips != NULL) {
-+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
-+ if ((ntohl(curr_ip) & ipp->netmask) ==
-+ (ntohl(ipp->addr) & ipp->netmask))
-+ found = 1;
-+ }
-+ } else
-+ found = 2;
-+ if (!found)
-+ return 0;
-+
-+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
-+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
-+ *salt = NULL;
-+ *sum = NULL;
-+ return 1;
-+ }
-+ }
-+ FOR_EACH_ROLE_END(r)
-+
-+ for (i = 0; i < polstate->num_sprole_pws; i++) {
-+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
-+ *salt = polstate->acl_special_roles[i]->salt;
-+ *sum = polstate->acl_special_roles[i]->sum;
-+ return 1;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+int gr_check_secure_terminal(struct task_struct *task)
-+{
-+ struct task_struct *p, *p2, *p3;
-+ struct files_struct *files;
-+ struct fdtable *fdt;
-+ struct file *our_file = NULL, *file;
-+ int i;
-+
-+ if (task->signal->tty == NULL)
-+ return 1;
-+
-+ files = get_files_struct(task);
-+ if (files != NULL) {
-+ rcu_read_lock();
-+ fdt = files_fdtable(files);
-+ for (i=0; i < fdt->max_fds; i++) {
-+ file = fcheck_files(files, i);
-+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
-+ get_file(file);
-+ our_file = file;
-+ }
-+ }
-+ rcu_read_unlock();
-+ put_files_struct(files);
-+ }
-+
-+ if (our_file == NULL)
-+ return 1;
-+
-+ read_lock(&tasklist_lock);
-+ do_each_thread(p2, p) {
-+ files = get_files_struct(p);
-+ if (files == NULL ||
-+ (p->signal && p->signal->tty == task->signal->tty)) {
-+ if (files != NULL)
-+ put_files_struct(files);
-+ continue;
-+ }
-+ rcu_read_lock();
-+ fdt = files_fdtable(files);
-+ for (i=0; i < fdt->max_fds; i++) {
-+ file = fcheck_files(files, i);
-+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
-+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
-+ p3 = task;
-+ while (task_pid_nr(p3) > 0) {
-+ if (p3 == p)
-+ break;
-+ p3 = p3->real_parent;
-+ }
-+ if (p3 == p)
-+ break;
-+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
-+ gr_handle_alertkill(p);
-+ rcu_read_unlock();
-+ put_files_struct(files);
-+ read_unlock(&tasklist_lock);
-+ fput(our_file);
-+ return 0;
-+ }
-+ }
-+ rcu_read_unlock();
-+ put_files_struct(files);
-+ } while_each_thread(p2, p);
-+ read_unlock(&tasklist_lock);
-+
-+ fput(our_file);
-+ return 1;
-+}
-+
-+ssize_t
-+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
-+{
-+ struct gr_arg_wrapper uwrap;
-+ unsigned char *sprole_salt = NULL;
-+ unsigned char *sprole_sum = NULL;
-+ int error = 0;
-+ int error2 = 0;
-+ size_t req_count = 0;
-+ unsigned char oldmode = 0;
-+
-+ mutex_lock(&gr_dev_mutex);
-+
-+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
-+ error = -EPERM;
-+ goto out;
-+ }
-+
-+#ifdef CONFIG_COMPAT
-+ pax_open_kernel();
-+ if (is_compat_task()) {
-+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
-+ copy_gr_arg = &copy_gr_arg_compat;
-+ copy_acl_object_label = &copy_acl_object_label_compat;
-+ copy_acl_subject_label = &copy_acl_subject_label_compat;
-+ copy_acl_role_label = &copy_acl_role_label_compat;
-+ copy_acl_ip_label = &copy_acl_ip_label_compat;
-+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
-+ copy_role_transition = &copy_role_transition_compat;
-+ copy_sprole_pw = &copy_sprole_pw_compat;
-+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
-+ copy_pointer_from_array = &copy_pointer_from_array_compat;
-+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
-+ } else {
-+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
-+ copy_gr_arg = &copy_gr_arg_normal;
-+ copy_acl_object_label = &copy_acl_object_label_normal;
-+ copy_acl_subject_label = &copy_acl_subject_label_normal;
-+ copy_acl_role_label = &copy_acl_role_label_normal;
-+ copy_acl_ip_label = &copy_acl_ip_label_normal;
-+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
-+ copy_role_transition = &copy_role_transition_normal;
-+ copy_sprole_pw = &copy_sprole_pw_normal;
-+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
-+ copy_pointer_from_array = &copy_pointer_from_array_normal;
-+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
-+ }
-+ pax_close_kernel();
-+#endif
-+
-+ req_count = get_gr_arg_wrapper_size();
-+
-+ if (count != req_count) {
-+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
-+ error = -EINVAL;
-+ goto out;
-+ }
-+
-+
-+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
-+ gr_auth_expires = 0;
-+ gr_auth_attempts = 0;
-+ }
-+
-+ error = copy_gr_arg_wrapper(buf, &uwrap);
-+ if (error)
-+ goto out;
-+
-+ error = copy_gr_arg(uwrap.arg, gr_usermode);
-+ if (error)
-+ goto out;
-+
-+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
-+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
-+ time_after(gr_auth_expires, get_seconds())) {
-+ error = -EBUSY;
-+ goto out;
-+ }
-+
-+ /* if non-root trying to do anything other than use a special role,
-+ do not attempt authentication, do not count towards authentication
-+ locking
-+ */
-+
-+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
-+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
-+ current_uid()) {
-+ error = -EPERM;
-+ goto out;
-+ }
-+
-+ /* ensure pw and special role name are null terminated */
-+
-+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
-+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
-+
-+ /* Okay.
-+ * We have our enough of the argument structure..(we have yet
-+ * to copy_from_user the tables themselves) . Copy the tables
-+ * only if we need them, i.e. for loading operations. */
-+
-+ switch (gr_usermode->mode) {
-+ case GR_STATUS:
-+ if (gr_acl_is_enabled()) {
-+ error = 1;
-+ if (!gr_check_secure_terminal(current))
-+ error = 3;
-+ } else
-+ error = 2;
-+ goto out;
-+ case GR_SHUTDOWN:
-+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+ stop_machine(gr_rbac_disable, NULL, NULL);
-+ free_variables(false);
-+ memset(gr_usermode, 0, sizeof(struct gr_arg));
-+ memset(gr_system_salt, 0, GR_SALT_LEN);
-+ memset(gr_system_sum, 0, GR_SHA_LEN);
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
-+ } else if (gr_acl_is_enabled()) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
-+ error = -EPERM;
-+ } else {
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
-+ error = -EAGAIN;
-+ }
-+ break;
-+ case GR_ENABLE:
-+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
-+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
-+ else {
-+ if (gr_acl_is_enabled())
-+ error = -EAGAIN;
-+ else
-+ error = error2;
-+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
-+ }
-+ break;
-+ case GR_OLDRELOAD:
-+ oldmode = 1;
-+ case GR_RELOAD:
-+ if (!gr_acl_is_enabled()) {
-+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
-+ error = -EAGAIN;
-+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+ error2 = gracl_reload(gr_usermode, oldmode);
-+ if (!error2)
-+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
-+ else {
-+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
-+ error = error2;
-+ }
-+ } else {
-+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
-+ error = -EPERM;
-+ }
-+ break;
-+ case GR_SEGVMOD:
-+ if (unlikely(!gr_acl_is_enabled())) {
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
-+ error = -EAGAIN;
-+ break;
-+ }
-+
-+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
-+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
-+ struct acl_subject_label *segvacl;
-+ segvacl =
-+ lookup_acl_subj_label(gr_usermode->segv_inode,
-+ gr_usermode->segv_device,
-+ current->role);
-+ if (segvacl) {
-+ segvacl->crashes = 0;
-+ segvacl->expires = 0;
-+ }
-+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
-+ gr_remove_uid(gr_usermode->segv_uid);
-+ }
-+ } else {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
-+ error = -EPERM;
-+ }
-+ break;
-+ case GR_SPROLE:
-+ case GR_SPROLEPAM:
-+ if (unlikely(!gr_acl_is_enabled())) {
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
-+ error = -EAGAIN;
-+ break;
-+ }
-+
-+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
-+ current->role->expires = 0;
-+ current->role->auth_attempts = 0;
-+ }
-+
-+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
-+ time_after(current->role->expires, get_seconds())) {
-+ error = -EBUSY;
-+ goto out;
-+ }
-+
-+ if (lookup_special_role_auth
-+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
-+ && ((!sprole_salt && !sprole_sum)
-+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
-+ char *p = "";
-+ assign_special_role(gr_usermode->sp_role);
-+ read_lock(&tasklist_lock);
-+ if (current->real_parent)
-+ p = current->real_parent->role->rolename;
-+ read_unlock(&tasklist_lock);
-+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
-+ p, acl_sp_role_value);
-+ } else {
-+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
-+ error = -EPERM;
-+ if(!(current->role->auth_attempts++))
-+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
-+
-+ goto out;
-+ }
-+ break;
-+ case GR_UNSPROLE:
-+ if (unlikely(!gr_acl_is_enabled())) {
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
-+ error = -EAGAIN;
-+ break;
-+ }
-+
-+ if (current->role->roletype & GR_ROLE_SPECIAL) {
-+ char *p = "";
-+ int i = 0;
-+
-+ read_lock(&tasklist_lock);
-+ if (current->real_parent) {
-+ p = current->real_parent->role->rolename;
-+ i = current->real_parent->acl_role_id;
-+ }
-+ read_unlock(&tasklist_lock);
-+
-+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
-+ gr_set_acls(1);
-+ } else {
-+ error = -EPERM;
-+ goto out;
-+ }
-+ break;
-+ default:
-+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
-+ error = -EINVAL;
-+ break;
-+ }
-+
-+ if (error != -EPERM)
-+ goto out;
-+
-+ if(!(gr_auth_attempts++))
-+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
-+
-+ out:
-+ mutex_unlock(&gr_dev_mutex);
-+
-+ if (!error)
-+ error = req_count;
-+
-+ return error;
-+}
-+
-+int
-+gr_set_acls(const int type)
-+{
-+ struct task_struct *task, *task2;
-+ struct acl_role_label *role = current->role;
-+ struct acl_subject_label *subj;
-+ __u16 acl_role_id = current->acl_role_id;
-+ const struct cred *cred;
-+ int ret;
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+ do_each_thread(task2, task) {
-+ /* check to see if we're called from the exit handler,
-+ if so, only replace ACLs that have inherited the admin
-+ ACL */
-+
-+ if (type && (task->role != role ||
-+ task->acl_role_id != acl_role_id))
-+ continue;
-+
-+ task->acl_role_id = 0;
-+ task->acl_sp_role = 0;
-+ task->inherited = 0;
-+
-+ if (task->exec_file) {
-+ cred = __task_cred(task);
-+ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
-+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
-+ if (subj == NULL) {
-+ ret = -EINVAL;
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
-+ return ret;
-+ }
-+ __gr_apply_subject_to_task(polstate, task, subj);
-+ } else {
-+ // it's a kernel process
-+ task->role = polstate->kernel_role;
-+ task->acl = polstate->kernel_role->root_label;
-+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
-+ task->acl->mode &= ~GR_PROCFIND;
-+#endif
-+ }
-+ } while_each_thread(task2, task);
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+
-+ return 0;
-+}
-diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
-new file mode 100644
-index 0000000..39645c9
---- /dev/null
-+++ b/grsecurity/gracl_res.c
-@@ -0,0 +1,68 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/gracl.h>
-+#include <linux/grinternal.h>
-+
-+static const char *restab_log[] = {
-+ [RLIMIT_CPU] = "RLIMIT_CPU",
-+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
-+ [RLIMIT_DATA] = "RLIMIT_DATA",
-+ [RLIMIT_STACK] = "RLIMIT_STACK",
-+ [RLIMIT_CORE] = "RLIMIT_CORE",
-+ [RLIMIT_RSS] = "RLIMIT_RSS",
-+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
-+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
-+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
-+ [RLIMIT_AS] = "RLIMIT_AS",
-+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
-+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
-+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
-+ [RLIMIT_NICE] = "RLIMIT_NICE",
-+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
-+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
-+ [GR_CRASH_RES] = "RLIMIT_CRASH"
-+};
-+
-+void
-+gr_log_resource(const struct task_struct *task,
-+ const int res, const unsigned long wanted, const int gt)
-+{
-+ const struct cred *cred;
-+ unsigned long rlim;
-+
-+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
-+ return;
-+
-+ // not yet supported resource
-+ if (unlikely(!restab_log[res]))
-+ return;
-+
-+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
-+ rlim = task_rlimit_max(task, res);
-+ else
-+ rlim = task_rlimit(task, res);
-+
-+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
-+ return;
-+
-+ rcu_read_lock();
-+ cred = __task_cred(task);
-+
-+ if (res == RLIMIT_NPROC &&
-+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
-+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
-+ goto out_rcu_unlock;
-+ else if (res == RLIMIT_MEMLOCK &&
-+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
-+ goto out_rcu_unlock;
-+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
-+ goto out_rcu_unlock;
-+ rcu_read_unlock();
-+
-+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
-+
-+ return;
-+out_rcu_unlock:
-+ rcu_read_unlock();
-+ return;
-+}
-diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
-new file mode 100644
-index 0000000..8769655
---- /dev/null
-+++ b/grsecurity/gracl_segv.c
-@@ -0,0 +1,320 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <asm/mman.h>
-+#include <net/sock.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <linux/sched.h>
-+#include <linux/timer.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+#include <linux/magic.h>
-+#include <linux/pagemap.h>
-+#include "../fs/btrfs/async-thread.h"
-+#include "../fs/btrfs/ctree.h"
-+#include "../fs/btrfs/btrfs_inode.h"
-+#endif
-+
-+static struct crash_uid *uid_set;
-+static unsigned short uid_used;
-+static DEFINE_SPINLOCK(gr_uid_lock);
-+extern rwlock_t gr_inode_lock;
-+extern struct acl_subject_label *
-+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
-+ struct acl_role_label *role);
-+
-+static inline dev_t __get_dev(const struct dentry *dentry)
-+{
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
-+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
-+ else
-+#endif
-+ return dentry->d_sb->s_dev;
-+}
-+
-+static inline u64 __get_ino(const struct dentry *dentry)
-+{
-+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
-+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
-+ return btrfs_ino(dentry->d_inode);
-+ else
-+#endif
-+ return dentry->d_inode->i_ino;
-+}
-+
-+int
-+gr_init_uidset(void)
-+{
-+ uid_set =
-+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
-+ uid_used = 0;
-+
-+ return uid_set ? 1 : 0;
-+}
-+
-+void
-+gr_free_uidset(void)
-+{
-+ if (uid_set) {
-+ struct crash_uid *tmpset;
-+ spin_lock(&gr_uid_lock);
-+ tmpset = uid_set;
-+ uid_set = NULL;
-+ uid_used = 0;
-+ spin_unlock(&gr_uid_lock);
-+ if (tmpset)
-+ kfree(tmpset);
-+ }
-+
-+ return;
-+}
-+
-+int
-+gr_find_uid(const uid_t uid)
-+{
-+ struct crash_uid *tmp = uid_set;
-+ uid_t buid;
-+ int low = 0, high = uid_used - 1, mid;
-+
-+ while (high >= low) {
-+ mid = (low + high) >> 1;
-+ buid = tmp[mid].uid;
-+ if (buid == uid)
-+ return mid;
-+ if (buid > uid)
-+ high = mid - 1;
-+ if (buid < uid)
-+ low = mid + 1;
-+ }
-+
-+ return -1;
-+}
-+
-+static void
-+gr_insertsort(void)
-+{
-+ unsigned short i, j;
-+ struct crash_uid index;
-+
-+ for (i = 1; i < uid_used; i++) {
-+ index = uid_set[i];
-+ j = i;
-+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
-+ uid_set[j] = uid_set[j - 1];
-+ j--;
-+ }
-+ uid_set[j] = index;
-+ }
-+
-+ return;
-+}
-+
-+static void
-+gr_insert_uid(const uid_t uid, const unsigned long expires)
-+{
-+ int loc;
-+
-+ if (uid_used == GR_UIDTABLE_MAX)
-+ return;
-+
-+ loc = gr_find_uid(uid);
-+
-+ if (loc >= 0) {
-+ uid_set[loc].expires = expires;
-+ return;
-+ }
-+
-+ uid_set[uid_used].uid = uid;
-+ uid_set[uid_used].expires = expires;
-+ uid_used++;
-+
-+ gr_insertsort();
-+
-+ return;
-+}
-+
-+void
-+gr_remove_uid(const unsigned short loc)
-+{
-+ unsigned short i;
-+
-+ for (i = loc + 1; i < uid_used; i++)
-+ uid_set[i - 1] = uid_set[i];
-+
-+ uid_used--;
-+
-+ return;
-+}
-+
-+int
-+gr_check_crash_uid(const uid_t uid)
-+{
-+ int loc;
-+ int ret = 0;
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return 0;
-+
-+ spin_lock(&gr_uid_lock);
-+ loc = gr_find_uid(uid);
-+
-+ if (loc < 0)
-+ goto out_unlock;
-+
-+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
-+ gr_remove_uid(loc);
-+ else
-+ ret = 1;
-+
-+out_unlock:
-+ spin_unlock(&gr_uid_lock);
-+ return ret;
-+}
-+
-+static int
-+proc_is_setxid(const struct cred *cred)
-+{
-+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
-+ cred->uid != cred->fsuid)
-+ return 1;
-+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
-+ cred->gid != cred->fsgid)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+extern int gr_fake_force_sig(int sig, struct task_struct *t);
-+
-+void
-+gr_handle_crash(struct task_struct *task, const int sig)
-+{
-+ struct acl_subject_label *curr;
-+ struct task_struct *tsk, *tsk2;
-+ const struct cred *cred;
-+ const struct cred *cred2;
-+
-+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
-+ return;
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return;
-+
-+ curr = task->acl;
-+
-+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
-+ return;
-+
-+ if (time_before_eq(curr->expires, get_seconds())) {
-+ curr->expires = 0;
-+ curr->crashes = 0;
-+ }
-+
-+ curr->crashes++;
-+
-+ if (!curr->expires)
-+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
-+
-+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
-+ time_after(curr->expires, get_seconds())) {
-+ rcu_read_lock();
-+ cred = __task_cred(task);
-+ if (cred->uid && proc_is_setxid(cred)) {
-+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
-+ spin_lock(&gr_uid_lock);
-+ gr_insert_uid(cred->uid, curr->expires);
-+ spin_unlock(&gr_uid_lock);
-+ curr->expires = 0;
-+ curr->crashes = 0;
-+ read_lock(&tasklist_lock);
-+ do_each_thread(tsk2, tsk) {
-+ cred2 = __task_cred(tsk);
-+ if (tsk != task && cred2->uid == cred->uid)
-+ gr_fake_force_sig(SIGKILL, tsk);
-+ } while_each_thread(tsk2, tsk);
-+ read_unlock(&tasklist_lock);
-+ } else {
-+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+ do_each_thread(tsk2, tsk) {
-+ if (likely(tsk != task)) {
-+ // if this thread has the same subject as the one that triggered
-+ // RES_CRASH and it's the same binary, kill it
-+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
-+ gr_fake_force_sig(SIGKILL, tsk);
-+ }
-+ } while_each_thread(tsk2, tsk);
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ }
-+ rcu_read_unlock();
-+ }
-+
-+ return;
-+}
-+
-+int
-+gr_check_crash_exec(const struct file *filp)
-+{
-+ struct acl_subject_label *curr;
-+ struct dentry *dentry;
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return 0;
-+
-+ read_lock(&gr_inode_lock);
-+ dentry = filp->f_path.dentry;
-+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
-+ current->role);
-+ read_unlock(&gr_inode_lock);
-+
-+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
-+ (!curr->crashes && !curr->expires))
-+ return 0;
-+
-+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
-+ time_after(curr->expires, get_seconds()))
-+ return 1;
-+ else if (time_before_eq(curr->expires, get_seconds())) {
-+ curr->crashes = 0;
-+ curr->expires = 0;
-+ }
-+
-+ return 0;
-+}
-+
-+void
-+gr_handle_alertkill(struct task_struct *task)
-+{
-+ struct acl_subject_label *curracl;
-+ __u32 curr_ip;
-+ struct task_struct *p, *p2;
-+
-+ if (unlikely(!gr_acl_is_enabled()))
-+ return;
-+
-+ curracl = task->acl;
-+ curr_ip = task->signal->curr_ip;
-+
-+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
-+ read_lock(&tasklist_lock);
-+ do_each_thread(p2, p) {
-+ if (p->signal->curr_ip == curr_ip)
-+ gr_fake_force_sig(SIGKILL, p);
-+ } while_each_thread(p2, p);
-+ read_unlock(&tasklist_lock);
-+ } else if (curracl->mode & GR_KILLPROC)
-+ gr_fake_force_sig(SIGKILL, task);
-+
-+ return;
-+}
-diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
-new file mode 100644
-index 0000000..9d83a69
---- /dev/null
-+++ b/grsecurity/gracl_shm.c
-@@ -0,0 +1,40 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/ipc.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+ const time_t shm_createtime, const uid_t cuid, const int shmid)
-+{
-+ struct task_struct *task;
-+
-+ if (!gr_acl_is_enabled())
-+ return 1;
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+
-+ task = find_task_by_vpid(shm_cprid);
-+
-+ if (unlikely(!task))
-+ task = find_task_by_vpid(shm_lapid);
-+
-+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
-+ (task->pid == shm_lapid)) &&
-+ (task->acl->mode & GR_PROTSHM) &&
-+ (task->acl != current->acl))) {
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
-+ return 0;
-+ }
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+
-+ return 1;
-+}
-diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
-new file mode 100644
-index 0000000..bc0be01
---- /dev/null
-+++ b/grsecurity/grsec_chdir.c
-@@ -0,0 +1,19 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+ if ((grsec_enable_chdir && grsec_enable_group &&
-+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
-+ !grsec_enable_group)) {
-+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
-+ }
-+#endif
-+ return;
-+}
-diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
-new file mode 100644
-index 0000000..bf944ab
---- /dev/null
-+++ b/grsecurity/grsec_chroot.c
-@@ -0,0 +1,455 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/lglock.h>
-+#include <linux/mount.h>
-+#include <linux/types.h>
-+#include <linux/pid_namespace.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
-+int gr_init_ran;
-+#endif
-+
-+DECLARE_BRLOCK(vfsmount_lock);
-+
-+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ struct dentry *tmpd = dentry;
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+
-+ while (tmpd != mnt->mnt_root) {
-+ atomic_inc(&tmpd->chroot_refcnt);
-+ tmpd = tmpd->d_parent;
-+ }
-+ atomic_inc(&tmpd->chroot_refcnt);
-+
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+#endif
-+}
-+
-+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ struct dentry *tmpd = dentry;
-+
-+ br_read_lock(vfsmount_lock);
-+ write_seqlock(&rename_lock);
-+
-+ while (tmpd != mnt->mnt_root) {
-+ atomic_dec(&tmpd->chroot_refcnt);
-+ tmpd = tmpd->d_parent;
-+ }
-+ atomic_dec(&tmpd->chroot_refcnt);
-+
-+ write_sequnlock(&rename_lock);
-+ br_read_unlock(vfsmount_lock);
-+#endif
-+}
-+
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+static struct dentry *get_closest_chroot(struct dentry *dentry)
-+{
-+ write_seqlock(&rename_lock);
-+ do {
-+ if (atomic_read(&dentry->chroot_refcnt)) {
-+ write_sequnlock(&rename_lock);
-+ return dentry;
-+ }
-+ dentry = dentry->d_parent;
-+ } while (!IS_ROOT(dentry));
-+ write_sequnlock(&rename_lock);
-+ return NULL;
-+}
-+#endif
-+
-+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
-+ struct dentry *newdentry, struct vfsmount *newmnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ struct dentry *chroot;
-+
-+ if (unlikely(!grsec_enable_chroot_rename))
-+ return 0;
-+
-+ if (likely(!proc_is_chrooted(current) && !current_uid()))
-+ return 0;
-+
-+ chroot = get_closest_chroot(olddentry);
-+
-+ if (chroot == NULL)
-+ return 0;
-+
-+ if (is_subdir(newdentry, chroot))
-+ return 0;
-+
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
-+
-+ return 1;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
-+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root
-+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
-+ && gr_init_ran
-+#endif
-+ )
-+ task->gr_is_chrooted = 1;
-+ else {
-+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
-+ if (task->pid == 1 && !gr_init_ran)
-+ gr_init_ran = 1;
-+#endif
-+ task->gr_is_chrooted = 0;
-+ }
-+
-+ task->gr_chroot_dentry = path->dentry;
-+#endif
-+ return;
-+}
-+
-+void gr_clear_chroot_entries(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ task->gr_is_chrooted = 0;
-+ task->gr_chroot_dentry = NULL;
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_chroot_unix(const pid_t pid)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+ struct task_struct *p;
-+
-+ if (unlikely(!grsec_enable_chroot_unix))
-+ return 1;
-+
-+ if (likely(!proc_is_chrooted(current)))
-+ return 1;
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ p = find_task_by_vpid_unrestricted(pid);
-+ if (unlikely(p && !have_same_root(current, p))) {
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
-+ return 0;
-+ }
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+#endif
-+ return 1;
-+}
-+
-+int
-+gr_handle_chroot_nice(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
-+ && proc_is_chrooted(current)) {
-+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+ struct task_struct *p;
-+ int ret = 0;
-+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
-+ return ret;
-+
-+ read_lock(&tasklist_lock);
-+ do_each_pid_task(pid, type, p) {
-+ if (!have_same_root(current, p)) {
-+ ret = 1;
-+ goto out;
-+ }
-+ } while_each_pid_task(pid, type, p);
-+out:
-+ read_unlock(&tasklist_lock);
-+ return ret;
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_pid_is_chrooted(struct task_struct *p)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
-+ return 0;
-+
-+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
-+ !have_same_root(current, p)) {
-+ return 1;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
-+
-+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
-+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
-+{
-+ struct path path, currentroot;
-+ int ret = 0;
-+
-+ path.dentry = (struct dentry *)u_dentry;
-+ path.mnt = (struct vfsmount *)u_mnt;
-+ get_fs_root(current->fs, &currentroot);
-+ if (path_is_under(&path, &currentroot))
-+ ret = 1;
-+ path_put(&currentroot);
-+
-+ return ret;
-+}
-+#endif
-+
-+int
-+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+ if (!grsec_enable_chroot_fchdir)
-+ return 1;
-+
-+ if (!proc_is_chrooted(current))
-+ return 1;
-+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
-+ return 0;
-+ }
-+#endif
-+ return 1;
-+}
-+
-+int
-+gr_chroot_fhandle(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+ if (!grsec_enable_chroot_fchdir)
-+ return 1;
-+
-+ if (!proc_is_chrooted(current))
-+ return 1;
-+ else {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
-+ return 0;
-+ }
-+#endif
-+ return 1;
-+}
-+
-+int
-+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+ const time_t shm_createtime)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+ struct task_struct *p;
-+ time_t starttime;
-+
-+ if (unlikely(!grsec_enable_chroot_shmat))
-+ return 1;
-+
-+ if (likely(!proc_is_chrooted(current)))
-+ return 1;
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+
-+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
-+ starttime = p->start_time.tv_sec;
-+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
-+ if (have_same_root(current, p)) {
-+ goto allow;
-+ } else {
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
-+ return 0;
-+ }
-+ }
-+ /* creator exited, pid reuse, fall through to next check */
-+ }
-+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
-+ if (unlikely(!have_same_root(current, p))) {
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
-+ return 0;
-+ }
-+ }
-+
-+allow:
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+#endif
-+ return 1;
-+}
-+
-+void
-+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
-+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_chroot_mknod(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const int mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
-+ proc_is_chrooted(current)) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_chroot_mount(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const char *dev_name)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
-+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_chroot_pivot(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
-+ !gr_is_outside_chroot(dentry, mnt)) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+extern const char *captab_log[];
-+extern int captab_log_entries;
-+
-+int
-+gr_chroot_is_capable(const int cap)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
-+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
-+ if (cap_raised(chroot_caps, cap)) {
-+ const struct cred *creds = current_cred();
-+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
-+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
-+ }
-+ return 0;
-+ }
-+ }
-+#endif
-+ return 1;
-+}
-+
-+int
-+gr_chroot_is_capable_nolog(const int cap)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
-+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
-+ if (cap_raised(chroot_caps, cap)) {
-+ return 0;
-+ }
-+ }
-+#endif
-+ return 1;
-+}
-+
-+int
-+gr_handle_chroot_sysctl(const int op)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
-+ proc_is_chrooted(current))
-+ return -EACCES;
-+#endif
-+ return 0;
-+}
-+
-+void
-+gr_handle_chroot_chdir(struct path *path)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+ if (grsec_enable_chroot_chdir)
-+ set_fs_pwd(current->fs, path);
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_chroot_chmod(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const int mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+ /* allow chmod +s on directories, but not files */
-+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
-+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
-+ proc_is_chrooted(current)) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
-new file mode 100644
-index 0000000..7ef20f0
---- /dev/null
-+++ b/grsecurity/grsec_disabled.c
-@@ -0,0 +1,452 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/kdev_t.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/sysctl.h>
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+void
-+pax_set_initial_flags(struct linux_binprm *bprm)
-+{
-+ return;
-+}
-+#endif
-+
-+#ifdef CONFIG_SYSCTL
-+__u32
-+gr_handle_sysctl(const struct ctl_table * table, const int op)
-+{
-+ return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_TASKSTATS
-+int gr_is_taskstats_denied(int pid)
-+{
-+ return 0;
-+}
-+#endif
-+
-+int
-+gr_acl_is_enabled(void)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
-+{
-+ return;
-+}
-+
-+int
-+gr_handle_rawio(const struct inode *inode)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_acl_handle_psacct(struct task_struct *task, const long code)
-+{
-+ return;
-+}
-+
-+int
-+gr_handle_ptrace(struct task_struct *task, const long request)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_handle_proc_ptrace(struct task_struct *task)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_learn_resource(const struct task_struct *task,
-+ const int res, const unsigned long wanted, const int gt)
-+{
-+ return;
-+}
-+
-+int
-+gr_set_acls(const int type)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_hidden_task(const struct task_struct *tsk)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_protected_task(const struct task_struct *task)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_copy_label(struct task_struct *tsk)
-+{
-+ return;
-+}
-+
-+void
-+gr_set_pax_flags(struct task_struct *task)
-+{
-+ return;
-+}
-+
-+int
-+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
-+ const int unsafe_share)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_handle_delete(const u64 ino, const dev_t dev)
-+{
-+ return;
-+}
-+
-+void
-+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+ return;
-+}
-+
-+void
-+gr_handle_crash(struct task_struct *task, const int sig)
-+{
-+ return;
-+}
-+
-+int
-+gr_check_crash_exec(const struct file *filp)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_crash_uid(const uid_t uid)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+ struct dentry *old_dentry,
-+ struct dentry *new_dentry,
-+ struct vfsmount *mnt, const __u8 replace)
-+{
-+ return;
-+}
-+
-+int
-+gr_search_socket(const int family, const int type, const int protocol)
-+{
-+ return 1;
-+}
-+
-+int
-+gr_search_connectbind(const int mode, const struct socket *sock,
-+ const struct sockaddr_in *addr)
-+{
-+ return 0;
-+}
-+
-+void
-+gr_handle_alertkill(struct task_struct *task)
-+{
-+ return;
-+}
-+
-+__u32
-+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_hidden_file(const struct dentry * dentry,
-+ const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
-+ int acc_mode)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+int
-+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
-+ unsigned int *vm_flags)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_truncate(const struct dentry * dentry,
-+ const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_access(const struct dentry * dentry,
-+ const struct vfsmount * mnt, const int fmode)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
-+ umode_t *mode)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+void
-+grsecurity_init(void)
-+{
-+ return;
-+}
-+
-+umode_t gr_acl_umask(void)
-+{
-+ return 0;
-+}
-+
-+__u32
-+gr_acl_handle_mknod(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt,
-+ const int mode)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_mkdir(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_symlink(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt, const char *from)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_link(const struct dentry * new_dentry,
-+ const struct dentry * parent_dentry,
-+ const struct vfsmount * parent_mnt,
-+ const struct dentry * old_dentry,
-+ const struct vfsmount * old_mnt, const char *to)
-+{
-+ return 1;
-+}
-+
-+int
-+gr_acl_handle_rename(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ const struct dentry *old_dentry,
-+ const struct inode *old_parent_inode,
-+ const struct vfsmount *old_mnt, const char *newname)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_acl_handle_filldir(const struct file *file, const char *name,
-+ const int namelen, const u64 ino)
-+{
-+ return 1;
-+}
-+
-+int
-+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+ const time_t shm_createtime, const uid_t cuid, const int shmid)
-+{
-+ return 1;
-+}
-+
-+int
-+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_search_accept(const struct socket *sock)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_search_listen(const struct socket *sock)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+ return 0;
-+}
-+
-+__u32
-+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+ return 1;
-+}
-+
-+__u32
-+gr_acl_handle_creat(const struct dentry * dentry,
-+ const struct dentry * p_dentry,
-+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
-+ const int imode)
-+{
-+ return 1;
-+}
-+
-+void
-+gr_acl_handle_exit(void)
-+{
-+ return;
-+}
-+
-+int
-+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
-+{
-+ return 1;
-+}
-+
-+void
-+gr_set_role_label(const uid_t uid, const gid_t gid)
-+{
-+ return;
-+}
-+
-+int
-+gr_acl_handle_procpidmem(const struct task_struct *task)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_user_change(int real, int effective, int fs)
-+{
-+ return 0;
-+}
-+
-+int
-+gr_check_group_change(int real, int effective, int fs)
-+{
-+ return 0;
-+}
-+
-+int gr_acl_enable_at_secure(void)
-+{
-+ return 0;
-+}
-+
-+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
-+{
-+ return dentry->d_sb->s_dev;
-+}
-+
-+u64 gr_get_ino_from_dentry(struct dentry *dentry)
-+{
-+ return dentry->d_inode->i_ino;
-+}
-+
-+void gr_put_exec_file(struct task_struct *task)
-+{
-+ return;
-+}
-+
-+EXPORT_SYMBOL_GPL(gr_learn_resource);
-+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL_GPL(gr_check_user_change);
-+EXPORT_SYMBOL_GPL(gr_check_group_change);
-+#endif
-diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
-new file mode 100644
-index 0000000..c6db3ee
---- /dev/null
-+++ b/grsecurity/grsec_exec.c
-@@ -0,0 +1,159 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/binfmts.h>
-+#include <linux/fs.h>
-+#include <linux/types.h>
-+#include <linux/grdefs.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/capability.h>
-+#include <linux/module.h>
-+#include <linux/compat.h>
-+
-+#include <asm/uaccess.h>
-+
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+static char gr_exec_arg_buf[132];
-+static DEFINE_MUTEX(gr_exec_arg_mutex);
-+#endif
-+
-+struct user_arg_ptr {
-+#ifdef CONFIG_COMPAT
-+ bool is_compat;
-+#endif
-+ union {
-+ const char __user *const __user *native;
-+#ifdef CONFIG_COMPAT
-+ const compat_uptr_t __user *compat;
-+#endif
-+ } ptr;
-+};
-+
-+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
-+
-+void
-+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
-+{
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+ char *grarg = gr_exec_arg_buf;
-+ unsigned int i, x, execlen = 0;
-+ char c;
-+
-+ if (!((grsec_enable_execlog && grsec_enable_group &&
-+ in_group_p(grsec_audit_gid))
-+ || (grsec_enable_execlog && !grsec_enable_group)))
-+ return;
-+
-+ mutex_lock(&gr_exec_arg_mutex);
-+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
-+
-+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
-+ const char __user *p;
-+ unsigned int len;
-+
-+ p = get_user_arg_ptr(argv, i);
-+ if (IS_ERR(p))
-+ goto log;
-+
-+ len = strnlen_user(p, 128 - execlen);
-+ if (len > 128 - execlen)
-+ len = 128 - execlen;
-+ else if (len > 0)
-+ len--;
-+ if (copy_from_user(grarg + execlen, p, len))
-+ goto log;
-+
-+ /* rewrite unprintable characters */
-+ for (x = 0; x < len; x++) {
-+ c = *(grarg + execlen + x);
-+ if (c < 32 || c > 126)
-+ *(grarg + execlen + x) = ' ';
-+ }
-+
-+ execlen += len;
-+ *(grarg + execlen) = ' ';
-+ *(grarg + execlen + 1) = '\0';
-+ execlen++;
-+ }
-+
-+ log:
-+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
-+ bprm->file->f_path.mnt, grarg);
-+ mutex_unlock(&gr_exec_arg_mutex);
-+#endif
-+ return;
-+}
-+
-+#ifdef CONFIG_GRKERNSEC
-+extern int gr_acl_is_capable(const int cap);
-+extern int gr_acl_is_capable_nolog(const int cap);
-+extern int gr_chroot_is_capable(const int cap);
-+extern int gr_chroot_is_capable_nolog(const int cap);
-+#endif
-+
-+const char *captab_log[] = {
-+ "CAP_CHOWN",
-+ "CAP_DAC_OVERRIDE",
-+ "CAP_DAC_READ_SEARCH",
-+ "CAP_FOWNER",
-+ "CAP_FSETID",
-+ "CAP_KILL",
-+ "CAP_SETGID",
-+ "CAP_SETUID",
-+ "CAP_SETPCAP",
-+ "CAP_LINUX_IMMUTABLE",
-+ "CAP_NET_BIND_SERVICE",
-+ "CAP_NET_BROADCAST",
-+ "CAP_NET_ADMIN",
-+ "CAP_NET_RAW",
-+ "CAP_IPC_LOCK",
-+ "CAP_IPC_OWNER",
-+ "CAP_SYS_MODULE",
-+ "CAP_SYS_RAWIO",
-+ "CAP_SYS_CHROOT",
-+ "CAP_SYS_PTRACE",
-+ "CAP_SYS_PACCT",
-+ "CAP_SYS_ADMIN",
-+ "CAP_SYS_BOOT",
-+ "CAP_SYS_NICE",
-+ "CAP_SYS_RESOURCE",
-+ "CAP_SYS_TIME",
-+ "CAP_SYS_TTY_CONFIG",
-+ "CAP_MKNOD",
-+ "CAP_LEASE",
-+ "CAP_AUDIT_WRITE",
-+ "CAP_AUDIT_CONTROL",
-+ "CAP_SETFCAP",
-+ "CAP_MAC_OVERRIDE",
-+ "CAP_MAC_ADMIN",
-+ "CAP_SYSLOG",
-+ "CAP_WAKE_ALARM"
-+};
-+
-+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
-+
-+int gr_is_capable(const int cap)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
-+ return 1;
-+ return 0;
-+#else
-+ return 1;
-+#endif
-+}
-+
-+int gr_is_capable_nolog(const int cap)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
-+ return 1;
-+ return 0;
-+#else
-+ return 1;
-+#endif
-+}
-+
-+EXPORT_SYMBOL_GPL(gr_is_capable);
-+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
-diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
-new file mode 100644
-index 0000000..d3ee748
---- /dev/null
-+++ b/grsecurity/grsec_fifo.c
-@@ -0,0 +1,24 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
-+ const struct dentry *dir, const int flag, const int acc_mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+ const struct cred *cred = current_cred();
-+
-+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
-+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
-+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
-+ (cred->fsuid != dentry->d_inode->i_uid)) {
-+ if (!inode_permission(dentry->d_inode, acc_mode))
-+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
-new file mode 100644
-index 0000000..8ca18bf
---- /dev/null
-+++ b/grsecurity/grsec_fork.c
-@@ -0,0 +1,23 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/errno.h>
-+
-+void
-+gr_log_forkfail(const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
-+ switch (retval) {
-+ case -EAGAIN:
-+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
-+ break;
-+ case -ENOMEM:
-+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
-+ break;
-+ }
-+ }
-+#endif
-+ return;
-+}
-diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
-new file mode 100644
-index 0000000..68121e2
---- /dev/null
-+++ b/grsecurity/grsec_init.c
-@@ -0,0 +1,290 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/gracl.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/percpu.h>
-+#include <linux/module.h>
-+
-+int grsec_enable_ptrace_readexec __read_only;
-+int grsec_enable_setxid __read_only;
-+int grsec_enable_symlinkown __read_only;
-+int grsec_symlinkown_gid __read_only;
-+int grsec_enable_brute __read_only;
-+int grsec_enable_link __read_only;
-+int grsec_enable_dmesg __read_only;
-+int grsec_enable_harden_ptrace __read_only;
-+int grsec_enable_harden_ipc __read_only;
-+int grsec_enable_fifo __read_only;
-+int grsec_enable_execlog __read_only;
-+int grsec_enable_signal __read_only;
-+int grsec_enable_forkfail __read_only;
-+int grsec_enable_audit_ptrace __read_only;
-+int grsec_enable_time __read_only;
-+int grsec_enable_group __read_only;
-+int grsec_audit_gid __read_only;
-+int grsec_enable_chdir __read_only;
-+int grsec_enable_mount __read_only;
-+int grsec_enable_rofs __read_only;
-+int grsec_deny_new_usb __read_only;
-+int grsec_enable_chroot_findtask __read_only;
-+int grsec_enable_chroot_mount __read_only;
-+int grsec_enable_chroot_shmat __read_only;
-+int grsec_enable_chroot_fchdir __read_only;
-+int grsec_enable_chroot_double __read_only;
-+int grsec_enable_chroot_pivot __read_only;
-+int grsec_enable_chroot_chdir __read_only;
-+int grsec_enable_chroot_chmod __read_only;
-+int grsec_enable_chroot_mknod __read_only;
-+int grsec_enable_chroot_nice __read_only;
-+int grsec_enable_chroot_execlog __read_only;
-+int grsec_enable_chroot_caps __read_only;
-+int grsec_enable_chroot_rename __read_only;
-+int grsec_enable_chroot_sysctl __read_only;
-+int grsec_enable_chroot_unix __read_only;
-+int grsec_enable_tpe __read_only;
-+int grsec_tpe_gid __read_only;
-+int grsec_enable_blackhole __read_only;
-+#ifdef CONFIG_IPV6_MODULE
-+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
-+#endif
-+int grsec_lastack_retries __read_only;
-+int grsec_enable_tpe_all __read_only;
-+int grsec_enable_tpe_invert __read_only;
-+int grsec_enable_socket_all __read_only;
-+int grsec_socket_all_gid __read_only;
-+int grsec_enable_socket_client __read_only;
-+int grsec_socket_client_gid __read_only;
-+int grsec_enable_socket_server __read_only;
-+int grsec_socket_server_gid __read_only;
-+int grsec_resource_logging __read_only;
-+int grsec_disable_privio __read_only;
-+int grsec_enable_log_rwxmaps __read_only;
-+int grsec_lock __read_only;
-+
-+DEFINE_SPINLOCK(grsec_alert_lock);
-+unsigned long grsec_alert_wtime = 0;
-+unsigned long grsec_alert_fyet = 0;
-+
-+DEFINE_SPINLOCK(grsec_audit_lock);
-+
-+DEFINE_RWLOCK(grsec_exec_file_lock);
-+
-+char *gr_shared_page[4];
-+
-+char *gr_alert_log_fmt;
-+char *gr_audit_log_fmt;
-+char *gr_alert_log_buf;
-+char *gr_audit_log_buf;
-+
-+extern struct gr_arg *gr_usermode;
-+extern unsigned char *gr_system_salt;
-+extern unsigned char *gr_system_sum;
-+
-+void __init
-+grsecurity_init(void)
-+{
-+ int j;
-+ /* create the per-cpu shared pages */
-+
-+#ifdef CONFIG_X86
-+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
-+#endif
-+
-+ for (j = 0; j < 4; j++) {
-+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
-+ if (gr_shared_page[j] == NULL) {
-+ panic("Unable to allocate grsecurity shared page");
-+ return;
-+ }
-+ }
-+
-+ /* allocate log buffers */
-+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
-+ if (!gr_alert_log_fmt) {
-+ panic("Unable to allocate grsecurity alert log format buffer");
-+ return;
-+ }
-+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
-+ if (!gr_audit_log_fmt) {
-+ panic("Unable to allocate grsecurity audit log format buffer");
-+ return;
-+ }
-+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
-+ if (!gr_alert_log_buf) {
-+ panic("Unable to allocate grsecurity alert log buffer");
-+ return;
-+ }
-+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
-+ if (!gr_audit_log_buf) {
-+ panic("Unable to allocate grsecurity audit log buffer");
-+ return;
-+ }
-+
-+ /* allocate memory for authentication structure */
-+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
-+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
-+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
-+
-+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
-+ panic("Unable to allocate grsecurity authentication structure");
-+ return;
-+ }
-+
-+#ifdef CONFIG_GRKERNSEC_IO
-+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
-+ grsec_disable_privio = 1;
-+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
-+ grsec_disable_privio = 1;
-+#else
-+ grsec_disable_privio = 0;
-+#endif
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ /* for backward compatibility, tpe_invert always defaults to on if
-+ enabled in the kernel
-+ */
-+ grsec_enable_tpe_invert = 1;
-+#endif
-+
-+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
-+#ifndef CONFIG_GRKERNSEC_SYSCTL
-+ grsec_lock = 1;
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ grsec_enable_log_rwxmaps = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
-+ grsec_enable_group = 1;
-+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
-+ grsec_enable_ptrace_readexec = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+ grsec_enable_chdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ grsec_enable_harden_ptrace = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
-+ grsec_enable_harden_ipc = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+ grsec_enable_mount = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_LINK
-+ grsec_enable_link = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ grsec_enable_brute = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+ grsec_enable_dmesg = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ grsec_enable_blackhole = 1;
-+ grsec_lastack_retries = 4;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+ grsec_enable_fifo = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+ grsec_enable_execlog = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ grsec_enable_setxid = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+ grsec_enable_signal = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+ grsec_enable_forkfail = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TIME
-+ grsec_enable_time = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+ grsec_resource_logging = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+ grsec_enable_chroot_findtask = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+ grsec_enable_chroot_unix = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+ grsec_enable_chroot_mount = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+ grsec_enable_chroot_fchdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+ grsec_enable_chroot_shmat = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
-+ grsec_enable_audit_ptrace = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+ grsec_enable_chroot_double = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+ grsec_enable_chroot_pivot = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+ grsec_enable_chroot_chdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+ grsec_enable_chroot_chmod = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+ grsec_enable_chroot_mknod = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+ grsec_enable_chroot_nice = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+ grsec_enable_chroot_execlog = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+ grsec_enable_chroot_caps = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ grsec_enable_chroot_rename = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+ grsec_enable_chroot_sysctl = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
-+ grsec_enable_symlinkown = 1;
-+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE
-+ grsec_enable_tpe = 1;
-+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+ grsec_enable_tpe_all = 1;
-+#endif
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+ grsec_enable_socket_all = 1;
-+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+ grsec_enable_socket_client = 1;
-+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+ grsec_enable_socket_server = 1;
-+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
-+#endif
-+#endif
-+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
-+ grsec_deny_new_usb = 1;
-+#endif
-+
-+ return;
-+}
-diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
-new file mode 100644
-index 0000000..28dbb82
---- /dev/null
-+++ b/grsecurity/grsec_ipc.c
-@@ -0,0 +1,48 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/ipc.h>
-+#include <linux/ipc_namespace.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
-+ int write;
-+ int orig_granted_mode;
-+ uid_t euid;
-+ gid_t egid;
-+
-+ if (!grsec_enable_harden_ipc)
-+ return 1;
-+
-+ euid = current_euid();
-+ egid = current_egid();
-+
-+ write = requested_mode & 00002;
-+ orig_granted_mode = ipcp->mode;
-+
-+ if ((euid == ipcp->cuid) || (euid == ipcp->uid))
-+ orig_granted_mode >>= 6;
-+ else {
-+ /* if likely wrong permissions, lock to user */
-+ if (orig_granted_mode & 0007)
-+ orig_granted_mode = 0;
-+ /* otherwise do a egid-only check */
-+ else if ((egid == ipcp->cgid) || (egid == ipcp->gid))
-+ orig_granted_mode >>= 3;
-+ /* otherwise, no access */
-+ else
-+ orig_granted_mode = 0;
-+ }
-+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
-+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
-+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", ipcp->cuid);
-+ return 0;
-+ }
-+#endif
-+ return 1;
-+}
-diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
-new file mode 100644
-index 0000000..8598e7f
---- /dev/null
-+++ b/grsecurity/grsec_link.c
-@@ -0,0 +1,58 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+
-+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
-+{
-+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
-+ const struct inode *link_inode = link->dentry->d_inode;
-+
-+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
-+ /* ignore root-owned links, e.g. /proc/self */
-+ link_inode->i_uid && target &&
-+ link_inode->i_uid != target->i_uid) {
-+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
-+ return 1;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_follow_link(const struct inode *parent,
-+ const struct inode *inode,
-+ const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_LINK
-+ const struct cred *cred = current_cred();
-+
-+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
-+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
-+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
-+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_hardlink(const struct dentry *dentry,
-+ const struct vfsmount *mnt,
-+ struct inode *inode, const int mode, const char *to)
-+{
-+#ifdef CONFIG_GRKERNSEC_LINK
-+ const struct cred *cred = current_cred();
-+
-+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
-+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
-+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
-+ !capable(CAP_FOWNER) && cred->uid) {
-+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
-new file mode 100644
-index 0000000..56b5e9d
---- /dev/null
-+++ b/grsecurity/grsec_log.c
-@@ -0,0 +1,337 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/tty.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+#include <linux/grinternal.h>
-+
-+#ifdef CONFIG_TREE_PREEMPT_RCU
-+#define DISABLE_PREEMPT() preempt_disable()
-+#define ENABLE_PREEMPT() preempt_enable()
-+#else
-+#define DISABLE_PREEMPT()
-+#define ENABLE_PREEMPT()
-+#endif
-+
-+#define BEGIN_LOCKS(x) \
-+ DISABLE_PREEMPT(); \
-+ rcu_read_lock(); \
-+ read_lock(&tasklist_lock); \
-+ read_lock(&grsec_exec_file_lock); \
-+ if (x != GR_DO_AUDIT) \
-+ spin_lock(&grsec_alert_lock); \
-+ else \
-+ spin_lock(&grsec_audit_lock)
-+
-+#define END_LOCKS(x) \
-+ if (x != GR_DO_AUDIT) \
-+ spin_unlock(&grsec_alert_lock); \
-+ else \
-+ spin_unlock(&grsec_audit_lock); \
-+ read_unlock(&grsec_exec_file_lock); \
-+ read_unlock(&tasklist_lock); \
-+ rcu_read_unlock(); \
-+ ENABLE_PREEMPT(); \
-+ if (x == GR_DONT_AUDIT) \
-+ gr_handle_alertkill(current)
-+
-+enum {
-+ FLOODING,
-+ NO_FLOODING
-+};
-+
-+extern char *gr_alert_log_fmt;
-+extern char *gr_audit_log_fmt;
-+extern char *gr_alert_log_buf;
-+extern char *gr_audit_log_buf;
-+
-+static int gr_log_start(int audit)
-+{
-+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
-+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
-+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
-+ unsigned long curr_secs = get_seconds();
-+
-+ if (audit == GR_DO_AUDIT)
-+ goto set_fmt;
-+
-+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
-+ grsec_alert_wtime = curr_secs;
-+ grsec_alert_fyet = 0;
-+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
-+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
-+ grsec_alert_fyet++;
-+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
-+ grsec_alert_wtime = curr_secs;
-+ grsec_alert_fyet++;
-+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
-+ return FLOODING;
-+ }
-+ else return FLOODING;
-+
-+set_fmt:
-+#endif
-+ memset(buf, 0, PAGE_SIZE);
-+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
-+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
-+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
-+ } else if (current->signal->curr_ip) {
-+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
-+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
-+ } else if (gr_acl_is_enabled()) {
-+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
-+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
-+ } else {
-+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
-+ strcpy(buf, fmt);
-+ }
-+
-+ return NO_FLOODING;
-+}
-+
-+static void gr_log_middle(int audit, const char *msg, va_list ap)
-+ __attribute__ ((format (printf, 2, 0)));
-+
-+static void gr_log_middle(int audit, const char *msg, va_list ap)
-+{
-+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+ unsigned int len = strlen(buf);
-+
-+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
-+
-+ return;
-+}
-+
-+static void gr_log_middle_varargs(int audit, const char *msg, ...)
-+ __attribute__ ((format (printf, 2, 3)));
-+
-+static void gr_log_middle_varargs(int audit, const char *msg, ...)
-+{
-+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+ unsigned int len = strlen(buf);
-+ va_list ap;
-+
-+ va_start(ap, msg);
-+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
-+ va_end(ap);
-+
-+ return;
-+}
-+
-+static void gr_log_end(int audit, int append_default)
-+{
-+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+
-+ if (append_default) {
-+ unsigned int len = strlen(buf);
-+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
-+ }
-+
-+ printk("%s\n", buf);
-+
-+ return;
-+}
-+
-+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
-+{
-+ int logtype;
-+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
-+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
-+ void *voidptr = NULL;
-+ int num1 = 0, num2 = 0;
-+ unsigned long ulong1 = 0, ulong2 = 0;
-+ struct dentry *dentry = NULL;
-+ struct vfsmount *mnt = NULL;
-+ struct file *file = NULL;
-+ struct task_struct *task = NULL;
-+ struct vm_area_struct *vma = NULL;
-+ const struct cred *cred, *pcred;
-+ va_list ap;
-+
-+ BEGIN_LOCKS(audit);
-+ logtype = gr_log_start(audit);
-+ if (logtype == FLOODING) {
-+ END_LOCKS(audit);
-+ return;
-+ }
-+ va_start(ap, argtypes);
-+ switch (argtypes) {
-+ case GR_TTYSNIFF:
-+ task = va_arg(ap, struct task_struct *);
-+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
-+ break;
-+ case GR_SYSCTL_HIDDEN:
-+ str1 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, result, str1);
-+ break;
-+ case GR_RBAC:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
-+ break;
-+ case GR_RBAC_STR:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ str1 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
-+ break;
-+ case GR_STR_RBAC:
-+ str1 = va_arg(ap, char *);
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
-+ break;
-+ case GR_RBAC_MODE2:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ str1 = va_arg(ap, char *);
-+ str2 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
-+ break;
-+ case GR_RBAC_MODE3:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ str1 = va_arg(ap, char *);
-+ str2 = va_arg(ap, char *);
-+ str3 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
-+ break;
-+ case GR_FILENAME:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
-+ break;
-+ case GR_STR_FILENAME:
-+ str1 = va_arg(ap, char *);
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
-+ break;
-+ case GR_FILENAME_STR:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ str1 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
-+ break;
-+ case GR_FILENAME_TWO_INT:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ num1 = va_arg(ap, int);
-+ num2 = va_arg(ap, int);
-+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
-+ break;
-+ case GR_FILENAME_TWO_INT_STR:
-+ dentry = va_arg(ap, struct dentry *);
-+ mnt = va_arg(ap, struct vfsmount *);
-+ num1 = va_arg(ap, int);
-+ num2 = va_arg(ap, int);
-+ str1 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
-+ break;
-+ case GR_TEXTREL:
-+ file = va_arg(ap, struct file *);
-+ ulong1 = va_arg(ap, unsigned long);
-+ ulong2 = va_arg(ap, unsigned long);
-+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
-+ break;
-+ case GR_PTRACE:
-+ task = va_arg(ap, struct task_struct *);
-+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
-+ break;
-+ case GR_RESOURCE:
-+ task = va_arg(ap, struct task_struct *);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+ ulong1 = va_arg(ap, unsigned long);
-+ str1 = va_arg(ap, char *);
-+ ulong2 = va_arg(ap, unsigned long);
-+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
-+ break;
-+ case GR_CAP:
-+ task = va_arg(ap, struct task_struct *);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+ str1 = va_arg(ap, char *);
-+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
-+ break;
-+ case GR_SIG:
-+ str1 = va_arg(ap, char *);
-+ voidptr = va_arg(ap, void *);
-+ gr_log_middle_varargs(audit, msg, str1, voidptr);
-+ break;
-+ case GR_SIG2:
-+ task = va_arg(ap, struct task_struct *);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+ num1 = va_arg(ap, int);
-+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
-+ break;
-+ case GR_CRASH1:
-+ task = va_arg(ap, struct task_struct *);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+ ulong1 = va_arg(ap, unsigned long);
-+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
-+ break;
-+ case GR_CRASH2:
-+ task = va_arg(ap, struct task_struct *);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+ ulong1 = va_arg(ap, unsigned long);
-+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
-+ break;
-+ case GR_RWXMAP:
-+ file = va_arg(ap, struct file *);
-+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
-+ break;
-+ case GR_RWXMAPVMA:
-+ vma = va_arg(ap, struct vm_area_struct *);
-+ if (vma->vm_file)
-+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
-+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
-+ str1 = "<stack>";
-+ else if (vma->vm_start <= current->mm->brk &&
-+ vma->vm_end >= current->mm->start_brk)
-+ str1 = "<heap>";
-+ else
-+ str1 = "<anonymous mapping>";
-+ gr_log_middle_varargs(audit, msg, str1);
-+ break;
-+ case GR_PSACCT:
-+ {
-+ unsigned int wday, cday;
-+ __u8 whr, chr;
-+ __u8 wmin, cmin;
-+ __u8 wsec, csec;
-+ char cur_tty[64] = { 0 };
-+ char parent_tty[64] = { 0 };
-+
-+ task = va_arg(ap, struct task_struct *);
-+ wday = va_arg(ap, unsigned int);
-+ cday = va_arg(ap, unsigned int);
-+ whr = va_arg(ap, int);
-+ chr = va_arg(ap, int);
-+ wmin = va_arg(ap, int);
-+ cmin = va_arg(ap, int);
-+ wsec = va_arg(ap, int);
-+ csec = va_arg(ap, int);
-+ ulong1 = va_arg(ap, unsigned long);
-+ cred = __task_cred(task);
-+ pcred = __task_cred(task->real_parent);
-+
-+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
-+ }
-+ break;
-+ default:
-+ gr_log_middle(audit, msg, ap);
-+ }
-+ va_end(ap);
-+ // these don't need DEFAULTSECARGS printed on the end
-+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
-+ gr_log_end(audit, 0);
-+ else
-+ gr_log_end(audit, 1);
-+ END_LOCKS(audit);
-+}
-diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
-new file mode 100644
-index 0000000..0e39d8c
---- /dev/null
-+++ b/grsecurity/grsec_mem.c
-@@ -0,0 +1,48 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/module.h>
-+#include <linux/grinternal.h>
-+
-+void gr_handle_msr_write(void)
-+{
-+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
-+ return;
-+}
-+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
-+
-+void
-+gr_handle_ioperm(void)
-+{
-+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
-+ return;
-+}
-+
-+void
-+gr_handle_iopl(void)
-+{
-+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
-+ return;
-+}
-+
-+void
-+gr_handle_mem_readwrite(u64 from, u64 to)
-+{
-+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
-+ return;
-+}
-+
-+void
-+gr_handle_vm86(void)
-+{
-+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
-+ return;
-+}
-+
-+void
-+gr_log_badprocpid(const char *entry)
-+{
-+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
-+ return;
-+}
-diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
-new file mode 100644
-index 0000000..cd9e124
---- /dev/null
-+++ b/grsecurity/grsec_mount.c
-@@ -0,0 +1,65 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mount.h>
-+#include <linux/major.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_remount(const char *devname, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+ if (grsec_enable_mount && (retval >= 0))
-+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
-+#endif
-+ return;
-+}
-+
-+void
-+gr_log_unmount(const char *devname, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+ if (grsec_enable_mount && (retval >= 0))
-+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
-+#endif
-+ return;
-+}
-+
-+void
-+gr_log_mount(const char *from, const char *to, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+ if (grsec_enable_mount && (retval >= 0))
-+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
-+{
-+#ifdef CONFIG_GRKERNSEC_ROFS
-+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
-+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
-+ return -EPERM;
-+ } else
-+ return 0;
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_ROFS
-+ struct inode *inode = dentry->d_inode;
-+
-+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
-+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
-+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
-+ return -EPERM;
-+ } else
-+ return 0;
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
-new file mode 100644
-index 0000000..6ee9d50
---- /dev/null
-+++ b/grsecurity/grsec_pax.c
-@@ -0,0 +1,45 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+#include <linux/grsecurity.h>
-+
-+void
-+gr_log_textrel(struct vm_area_struct * vma)
-+{
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (grsec_enable_log_rwxmaps)
-+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
-+#endif
-+ return;
-+}
-+
-+void gr_log_ptgnustack(struct file *file)
-+{
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (grsec_enable_log_rwxmaps)
-+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
-+#endif
-+ return;
-+}
-+
-+void
-+gr_log_rwxmmap(struct file *file)
-+{
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (grsec_enable_log_rwxmaps)
-+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
-+#endif
-+ return;
-+}
-+
-+void
-+gr_log_rwxmprotect(struct vm_area_struct *vma)
-+{
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (grsec_enable_log_rwxmaps)
-+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
-+#endif
-+ return;
-+}
-diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
-new file mode 100644
-index 0000000..381864d
---- /dev/null
-+++ b/grsecurity/grsec_proc.c
-@@ -0,0 +1,20 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int gr_proc_is_restricted(void)
-+{
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ const struct cred *cred = current_cred();
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ if (cred->fsuid)
-+ return -EACCES;
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ if (cred->fsuid && !in_group_p(grsec_proc_gid))
-+ return -EACCES;
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
-new file mode 100644
-index 0000000..f7f29aa
---- /dev/null
-+++ b/grsecurity/grsec_ptrace.c
-@@ -0,0 +1,30 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grinternal.h>
-+#include <linux/security.h>
-+
-+void
-+gr_audit_ptrace(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
-+ if (grsec_enable_audit_ptrace)
-+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
-+#endif
-+ return;
-+}
-+
-+int
-+gr_ptrace_readexec(struct file *file, int unsafe_flags)
-+{
-+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
-+ const struct dentry *dentry = file->f_path.dentry;
-+ const struct vfsmount *mnt = file->f_path.mnt;
-+
-+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
-+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
-+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
-new file mode 100644
-index 0000000..c6a07aa
---- /dev/null
-+++ b/grsecurity/grsec_sig.c
-@@ -0,0 +1,245 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/delay.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/hardirq.h>
-+
-+char *signames[] = {
-+ [SIGSEGV] = "Segmentation fault",
-+ [SIGILL] = "Illegal instruction",
-+ [SIGABRT] = "Abort",
-+ [SIGBUS] = "Invalid alignment/Bus error"
-+};
-+
-+void
-+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
-+{
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
-+ (sig == SIGABRT) || (sig == SIGBUS))) {
-+ if (t->pid == current->pid) {
-+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
-+ } else {
-+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
-+ }
-+ }
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_signal(const struct task_struct *p, const int sig)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ /* ignore the 0 signal for protected task checks */
-+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
-+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
-+ return -EPERM;
-+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+#ifdef CONFIG_GRKERNSEC
-+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
-+
-+int gr_fake_force_sig(int sig, struct task_struct *t)
-+{
-+ unsigned long int flags;
-+ int ret, blocked, ignored;
-+ struct k_sigaction *action;
-+
-+ spin_lock_irqsave(&t->sighand->siglock, flags);
-+ action = &t->sighand->action[sig-1];
-+ ignored = action->sa.sa_handler == SIG_IGN;
-+ blocked = sigismember(&t->blocked, sig);
-+ if (blocked || ignored) {
-+ action->sa.sa_handler = SIG_DFL;
-+ if (blocked) {
-+ sigdelset(&t->blocked, sig);
-+ recalc_sigpending_and_wake(t);
-+ }
-+ }
-+ if (action->sa.sa_handler == SIG_DFL)
-+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
-+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
-+
-+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
-+
-+ return ret;
-+}
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+#define GR_USER_BAN_TIME (15 * 60)
-+#define GR_DAEMON_BRUTE_TIME (30 * 60)
-+
-+static int __get_dumpable(unsigned long mm_flags)
-+{
-+ int ret;
-+
-+ ret = mm_flags & MMF_DUMPABLE_MASK;
-+ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
-+}
-+#endif
-+
-+void gr_handle_brute_attach(unsigned long mm_flags)
-+{
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ struct task_struct *p = current;
-+ uid_t uid = 0;
-+ int daemon = 0;
-+
-+ if (!grsec_enable_brute)
-+ return;
-+
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ read_lock(&grsec_exec_file_lock);
-+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
-+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
-+ p->real_parent->brute = 1;
-+ daemon = 1;
-+ } else {
-+ const struct cred *cred = __task_cred(p), *cred2;
-+ struct task_struct *tsk, *tsk2;
-+ int dumpable = __get_dumpable(mm_flags);
-+
-+ if (dumpable != SUID_DUMPABLE_ENABLED && cred->uid) {
-+ struct user_struct *user;
-+
-+ uid = cred->uid;
-+
-+ /* this is put upon execution past expiration */
-+ user = find_user(uid);
-+ if (user == NULL)
-+ goto unlock;
-+ user->suid_banned = 1;
-+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
-+ if (user->suid_ban_expires == ~0UL)
-+ user->suid_ban_expires--;
-+
-+ /* only kill other threads of the same binary, from the same user */
-+ do_each_thread(tsk2, tsk) {
-+ cred2 = __task_cred(tsk);
-+ if (tsk != p && cred2->uid == uid && gr_is_same_file(tsk->exec_file, p->exec_file))
-+ gr_fake_force_sig(SIGKILL, tsk);
-+ } while_each_thread(tsk2, tsk);
-+ }
-+ }
-+unlock:
-+ read_unlock(&grsec_exec_file_lock);
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+
-+ if (uid)
-+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, uid, GR_USER_BAN_TIME / 60);
-+ else if (daemon)
-+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
-+
-+#endif
-+ return;
-+}
-+
-+void gr_handle_brute_check(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ struct task_struct *p = current;
-+
-+ if (unlikely(p->brute)) {
-+ if (!grsec_enable_brute)
-+ p->brute = 0;
-+ else if (time_before(get_seconds(), p->brute_expires))
-+ msleep(30 * 1000);
-+ }
-+#endif
-+ return;
-+}
-+
-+void gr_handle_kernel_exploit(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
-+ const struct cred *cred;
-+ struct task_struct *tsk, *tsk2;
-+ struct user_struct *user;
-+ uid_t uid;
-+
-+ if (in_irq() || in_serving_softirq() || in_nmi())
-+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
-+
-+ uid = current_uid();
-+
-+ if (uid == 0)
-+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
-+ else {
-+ /* kill all the processes of this user, hold a reference
-+ to their creds struct, and prevent them from creating
-+ another process until system reset
-+ */
-+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
-+ /* we intentionally leak this ref */
-+ user = get_uid(current->cred->user);
-+ if (user)
-+ user->kernel_banned = 1;
-+
-+ /* kill all processes of this user */
-+ read_lock(&tasklist_lock);
-+ do_each_thread(tsk2, tsk) {
-+ cred = __task_cred(tsk);
-+ if (cred->uid == uid)
-+ gr_fake_force_sig(SIGKILL, tsk);
-+ } while_each_thread(tsk2, tsk);
-+ read_unlock(&tasklist_lock);
-+ }
-+#endif
-+}
-+
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+static bool suid_ban_expired(struct user_struct *user)
-+{
-+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
-+ user->suid_banned = 0;
-+ user->suid_ban_expires = 0;
-+ free_uid(user);
-+ return true;
-+ }
-+
-+ return false;
-+}
-+#endif
-+
-+int gr_process_kernel_exec_ban(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
-+ if (unlikely(current->cred->user->kernel_banned))
-+ return -EPERM;
-+#endif
-+ return 0;
-+}
-+
-+int gr_process_kernel_setuid_ban(struct user_struct *user)
-+{
-+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
-+ if (unlikely(user->kernel_banned))
-+ gr_fake_force_sig(SIGKILL, current);
-+#endif
-+ return 0;
-+}
-+
-+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
-+{
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ struct user_struct *user = current->cred->user;
-+ if (unlikely(user->suid_banned)) {
-+ if (suid_ban_expired(user))
-+ return 0;
-+ /* disallow execution of suid binaries only */
-+ else if (bprm->cred->euid != current->cred->uid)
-+ return -EPERM;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
-new file mode 100644
-index 0000000..a523bd2
---- /dev/null
-+++ b/grsecurity/grsec_sock.c
-@@ -0,0 +1,244 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/ip.h>
-+#include <net/sock.h>
-+#include <net/inet_sock.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/gracl.h>
-+
-+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
-+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
-+
-+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
-+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
-+
-+#ifdef CONFIG_UNIX_MODULE
-+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
-+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
-+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
-+EXPORT_SYMBOL_GPL(gr_handle_create);
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+#define gr_conn_table_size 32749
-+struct conn_table_entry {
-+ struct conn_table_entry *next;
-+ struct signal_struct *sig;
-+};
-+
-+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
-+DEFINE_SPINLOCK(gr_conn_table_lock);
-+
-+extern const char * gr_socktype_to_name(unsigned char type);
-+extern const char * gr_proto_to_name(unsigned char proto);
-+extern const char * gr_sockfamily_to_name(unsigned char family);
-+
-+static int
-+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
-+{
-+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
-+}
-+
-+static int
-+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
-+ __u16 sport, __u16 dport)
-+{
-+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
-+ sig->gr_sport == sport && sig->gr_dport == dport))
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
-+{
-+ struct conn_table_entry **match;
-+ unsigned int index;
-+
-+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
-+ sig->gr_sport, sig->gr_dport,
-+ gr_conn_table_size);
-+
-+ newent->sig = sig;
-+
-+ match = &gr_conn_table[index];
-+ newent->next = *match;
-+ *match = newent;
-+
-+ return;
-+}
-+
-+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
-+{
-+ struct conn_table_entry *match, *last = NULL;
-+ unsigned int index;
-+
-+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
-+ sig->gr_sport, sig->gr_dport,
-+ gr_conn_table_size);
-+
-+ match = gr_conn_table[index];
-+ while (match && !conn_match(match->sig,
-+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
-+ sig->gr_dport)) {
-+ last = match;
-+ match = match->next;
-+ }
-+
-+ if (match) {
-+ if (last)
-+ last->next = match->next;
-+ else
-+ gr_conn_table[index] = NULL;
-+ kfree(match);
-+ }
-+
-+ return;
-+}
-+
-+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
-+ __u16 sport, __u16 dport)
-+{
-+ struct conn_table_entry *match;
-+ unsigned int index;
-+
-+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
-+
-+ match = gr_conn_table[index];
-+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
-+ match = match->next;
-+
-+ if (match)
-+ return match->sig;
-+ else
-+ return NULL;
-+}
-+
-+#endif
-+
-+void gr_update_task_in_ip_table(const struct inet_sock *inet)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ struct signal_struct *sig = current->signal;
-+ struct conn_table_entry *newent;
-+
-+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
-+ if (newent == NULL)
-+ return;
-+ /* no bh lock needed since we are called with bh disabled */
-+ spin_lock(&gr_conn_table_lock);
-+ gr_del_task_from_ip_table_nolock(sig);
-+ sig->gr_saddr = inet->inet_rcv_saddr;
-+ sig->gr_daddr = inet->inet_daddr;
-+ sig->gr_sport = inet->inet_sport;
-+ sig->gr_dport = inet->inet_dport;
-+ gr_add_to_task_ip_table_nolock(sig, newent);
-+ spin_unlock(&gr_conn_table_lock);
-+#endif
-+ return;
-+}
-+
-+void gr_del_task_from_ip_table(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ spin_lock_bh(&gr_conn_table_lock);
-+ gr_del_task_from_ip_table_nolock(task->signal);
-+ spin_unlock_bh(&gr_conn_table_lock);
-+#endif
-+ return;
-+}
-+
-+void
-+gr_attach_curr_ip(const struct sock *sk)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ struct signal_struct *p, *set;
-+ const struct inet_sock *inet = inet_sk(sk);
-+
-+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
-+ return;
-+
-+ set = current->signal;
-+
-+ spin_lock_bh(&gr_conn_table_lock);
-+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
-+ inet->inet_dport, inet->inet_sport);
-+ if (unlikely(p != NULL)) {
-+ set->curr_ip = p->curr_ip;
-+ set->used_accept = 1;
-+ gr_del_task_from_ip_table_nolock(p);
-+ spin_unlock_bh(&gr_conn_table_lock);
-+ return;
-+ }
-+ spin_unlock_bh(&gr_conn_table_lock);
-+
-+ set->curr_ip = inet->inet_daddr;
-+ set->used_accept = 1;
-+#endif
-+ return;
-+}
-+
-+int
-+gr_handle_sock_all(const int family, const int type, const int protocol)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
-+ (family != AF_UNIX)) {
-+ if (family == AF_INET)
-+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
-+ else
-+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_sock_server(const struct sockaddr *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+ if (grsec_enable_socket_server &&
-+ in_group_p(grsec_socket_server_gid) &&
-+ sck && (sck->sa_family != AF_UNIX) &&
-+ (sck->sa_family != AF_LOCAL)) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_sock_server_other(const struct sock *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+ if (grsec_enable_socket_server &&
-+ in_group_p(grsec_socket_server_gid) &&
-+ sck && (sck->sk_family != AF_UNIX) &&
-+ (sck->sk_family != AF_LOCAL)) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+int
-+gr_handle_sock_client(const struct sockaddr *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
-+ sck && (sck->sa_family != AF_UNIX) &&
-+ (sck->sa_family != AF_LOCAL)) {
-+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
-new file mode 100644
-index 0000000..a3b8942
---- /dev/null
-+++ b/grsecurity/grsec_sysctl.c
-@@ -0,0 +1,486 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/sysctl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
-+{
-+#ifdef CONFIG_GRKERNSEC_SYSCTL
-+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
-+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
-+ return -EACCES;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
-+static int __maybe_unused __read_only one = 1;
-+#endif
-+
-+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
-+ defined(CONFIG_GRKERNSEC_DENYUSB)
-+struct ctl_table grsecurity_table[] = {
-+#ifdef CONFIG_GRKERNSEC_SYSCTL
-+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
-+#ifdef CONFIG_GRKERNSEC_IO
-+ {
-+ .procname = "disable_priv_io",
-+ .data = &grsec_disable_privio,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#endif
-+#ifdef CONFIG_GRKERNSEC_LINK
-+ {
-+ .procname = "linking_restrictions",
-+ .data = &grsec_enable_link,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
-+ {
-+ .procname = "enforce_symlinksifowner",
-+ .data = &grsec_enable_symlinkown,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "symlinkown_gid",
-+ .data = &grsec_symlinkown_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ {
-+ .procname = "deter_bruteforce",
-+ .data = &grsec_enable_brute,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+ {
-+ .procname = "fifo_restrictions",
-+ .data = &grsec_enable_fifo,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
-+ {
-+ .procname = "ptrace_readexec",
-+ .data = &grsec_enable_ptrace_readexec,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ {
-+ .procname = "consistent_setxid",
-+ .data = &grsec_enable_setxid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ {
-+ .procname = "ip_blackhole",
-+ .data = &grsec_enable_blackhole,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "lastack_retries",
-+ .data = &grsec_lastack_retries,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+ {
-+ .procname = "exec_logging",
-+ .data = &grsec_enable_execlog,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ {
-+ .procname = "rwxmap_logging",
-+ .data = &grsec_enable_log_rwxmaps,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+ {
-+ .procname = "signal_logging",
-+ .data = &grsec_enable_signal,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+ {
-+ .procname = "forkfail_logging",
-+ .data = &grsec_enable_forkfail,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TIME
-+ {
-+ .procname = "timechange_logging",
-+ .data = &grsec_enable_time,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+ {
-+ .procname = "chroot_deny_shmat",
-+ .data = &grsec_enable_chroot_shmat,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+ {
-+ .procname = "chroot_deny_unix",
-+ .data = &grsec_enable_chroot_unix,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+ {
-+ .procname = "chroot_deny_mount",
-+ .data = &grsec_enable_chroot_mount,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+ {
-+ .procname = "chroot_deny_fchdir",
-+ .data = &grsec_enable_chroot_fchdir,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+ {
-+ .procname = "chroot_deny_chroot",
-+ .data = &grsec_enable_chroot_double,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+ {
-+ .procname = "chroot_deny_pivot",
-+ .data = &grsec_enable_chroot_pivot,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+ {
-+ .procname = "chroot_enforce_chdir",
-+ .data = &grsec_enable_chroot_chdir,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+ {
-+ .procname = "chroot_deny_chmod",
-+ .data = &grsec_enable_chroot_chmod,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+ {
-+ .procname = "chroot_deny_mknod",
-+ .data = &grsec_enable_chroot_mknod,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+ {
-+ .procname = "chroot_restrict_nice",
-+ .data = &grsec_enable_chroot_nice,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+ {
-+ .procname = "chroot_execlog",
-+ .data = &grsec_enable_chroot_execlog,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+ {
-+ .procname = "chroot_caps",
-+ .data = &grsec_enable_chroot_caps,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ {
-+ .procname = "chroot_deny_bad_rename",
-+ .data = &grsec_enable_chroot_rename,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+ {
-+ .procname = "chroot_deny_sysctl",
-+ .data = &grsec_enable_chroot_sysctl,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE
-+ {
-+ .procname = "tpe",
-+ .data = &grsec_enable_tpe,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "tpe_gid",
-+ .data = &grsec_tpe_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ {
-+ .procname = "tpe_invert",
-+ .data = &grsec_enable_tpe_invert,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+ {
-+ .procname = "tpe_restrict_all",
-+ .data = &grsec_enable_tpe_all,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+ {
-+ .procname = "socket_all",
-+ .data = &grsec_enable_socket_all,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "socket_all_gid",
-+ .data = &grsec_socket_all_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+ {
-+ .procname = "socket_client",
-+ .data = &grsec_enable_socket_client,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "socket_client_gid",
-+ .data = &grsec_socket_client_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+ {
-+ .procname = "socket_server",
-+ .data = &grsec_enable_socket_server,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "socket_server_gid",
-+ .data = &grsec_socket_server_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
-+ {
-+ .procname = "audit_group",
-+ .data = &grsec_enable_group,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+ {
-+ .procname = "audit_gid",
-+ .data = &grsec_audit_gid,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+ {
-+ .procname = "audit_chdir",
-+ .data = &grsec_enable_chdir,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+ {
-+ .procname = "audit_mount",
-+ .data = &grsec_enable_mount,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+ {
-+ .procname = "dmesg",
-+ .data = &grsec_enable_dmesg,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+ {
-+ .procname = "chroot_findtask",
-+ .data = &grsec_enable_chroot_findtask,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+ {
-+ .procname = "resource_logging",
-+ .data = &grsec_resource_logging,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
-+ {
-+ .procname = "audit_ptrace",
-+ .data = &grsec_enable_audit_ptrace,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
-+ {
-+ .procname = "harden_ptrace",
-+ .data = &grsec_enable_harden_ptrace,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
-+ {
-+ .procname = "harden_ipc",
-+ .data = &grsec_enable_harden_ipc,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+ {
-+ .procname = "grsec_lock",
-+ .data = &grsec_lock,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+#ifdef CONFIG_GRKERNSEC_ROFS
-+ {
-+ .procname = "romount_protect",
-+ .data = &grsec_enable_rofs,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_minmax_secure,
-+ .extra1 = &one,
-+ .extra2 = &one,
-+ },
-+#endif
-+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
-+ {
-+ .procname = "deny_new_usb",
-+ .data = &grsec_deny_new_usb,
-+ .maxlen = sizeof(int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_secure,
-+ },
-+#endif
-+ { }
-+};
-+#endif
-diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
-new file mode 100644
-index 0000000..61b514e
---- /dev/null
-+++ b/grsecurity/grsec_time.c
-@@ -0,0 +1,16 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grinternal.h>
-+#include <linux/module.h>
-+
-+void
-+gr_log_timechange(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_TIME
-+ if (grsec_enable_time)
-+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
-+#endif
-+ return;
-+}
-+
-+EXPORT_SYMBOL_GPL(gr_log_timechange);
-diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
-new file mode 100644
-index 0000000..1b915bb
---- /dev/null
-+++ b/grsecurity/grsec_tpe.c
-@@ -0,0 +1,78 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/grinternal.h>
-+
-+extern int gr_acl_tpe_check(void);
-+
-+int
-+gr_tpe_allow(const struct file *file)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
-+ struct inode *file_inode = file->f_path.dentry->d_inode;
-+ const struct cred *cred = current_cred();
-+ char *msg = NULL;
-+ char *msg2 = NULL;
-+
-+ // never restrict root
-+ if (!cred->uid)
-+ return 1;
-+
-+ if (grsec_enable_tpe) {
-+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
-+ msg = "not being in trusted group";
-+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
-+ msg = "being in untrusted group";
-+#else
-+ if (in_group_p(grsec_tpe_gid))
-+ msg = "being in untrusted group";
-+#endif
-+ }
-+ if (!msg && gr_acl_tpe_check())
-+ msg = "being in untrusted role";
-+
-+ // not in any affected group/role
-+ if (!msg)
-+ goto next_check;
-+
-+ if (inode->i_uid)
-+ msg2 = "file in non-root-owned directory";
-+ else if (inode->i_mode & S_IWOTH)
-+ msg2 = "file in world-writable directory";
-+ else if (inode->i_mode & S_IWGRP)
-+ msg2 = "file in group-writable directory";
-+ else if (file_inode->i_mode & S_IWOTH)
-+ msg2 = "file is world-writable";
-+
-+ if (msg && msg2) {
-+ char fullmsg[70] = {0};
-+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
-+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
-+ return 0;
-+ }
-+ msg = NULL;
-+next_check:
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
-+ return 1;
-+
-+ if (inode->i_uid && (inode->i_uid != cred->uid))
-+ msg = "directory not owned by user";
-+ else if (inode->i_mode & S_IWOTH)
-+ msg = "file in world-writable directory";
-+ else if (inode->i_mode & S_IWGRP)
-+ msg = "file in group-writable directory";
-+ else if (file_inode->i_mode & S_IWOTH)
-+ msg = "file is world-writable";
-+
-+ if (msg) {
-+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
-+ return 0;
-+ }
-+#endif
-+#endif
-+ return 1;
-+}
-diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
-new file mode 100644
-index 0000000..ae02d8e
---- /dev/null
-+++ b/grsecurity/grsec_usb.c
-@@ -0,0 +1,15 @@
-+#include <linux/kernel.h>
-+#include <linux/grinternal.h>
-+#include <linux/module.h>
-+
-+int gr_handle_new_usb(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_DENYUSB
-+ if (grsec_deny_new_usb) {
-+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
-+ return 1;
-+ }
-+#endif
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
-diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
-new file mode 100644
-index 0000000..158b330
---- /dev/null
-+++ b/grsecurity/grsum.c
-@@ -0,0 +1,64 @@
-+#include <linux/err.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/scatterlist.h>
-+#include <linux/crypto.h>
-+#include <linux/gracl.h>
-+
-+
-+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
-+#error "crypto and sha256 must be built into the kernel"
-+#endif
-+
-+int
-+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
-+{
-+ struct crypto_hash *tfm;
-+ struct hash_desc desc;
-+ struct scatterlist sg[2];
-+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
-+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
-+ unsigned long *sumptr = (unsigned long *)sum;
-+ int cryptres;
-+ int retval = 1;
-+ volatile int mismatched = 0;
-+ volatile int dummy = 0;
-+ unsigned int i;
-+
-+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
-+ if (IS_ERR(tfm)) {
-+ /* should never happen, since sha256 should be built in */
-+ memset(entry->pw, 0, GR_PW_LEN);
-+ return 1;
-+ }
-+
-+ sg_init_table(sg, 2);
-+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
-+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
-+
-+ desc.tfm = tfm;
-+ desc.flags = 0;
-+
-+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
-+ temp_sum);
-+
-+ memset(entry->pw, 0, GR_PW_LEN);
-+
-+ if (cryptres)
-+ goto out;
-+
-+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
-+ if (sumptr[i] != tmpsumptr[i])
-+ mismatched = 1;
-+ else
-+ dummy = 1; // waste a cycle
-+
-+ if (!mismatched)
-+ retval = dummy - 1;
-+
-+out:
-+ crypto_free_hash(tfm);
-+
-+ return retval;
-+}
-diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
-index 77ff547..181834f 100644
---- a/include/asm-generic/4level-fixup.h
-+++ b/include/asm-generic/4level-fixup.h
-@@ -13,8 +13,10 @@
- #define pmd_alloc(mm, pud, address) \
- ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
- NULL: pmd_offset(pud, address))
-+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
-
- #define pud_alloc(mm, pgd, address) (pgd)
-+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
- #define pud_offset(pgd, start) (pgd)
- #define pud_none(pud) 0
- #define pud_bad(pud) 0
-diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
-index b7babf0..1e4b4f1 100644
---- a/include/asm-generic/atomic-long.h
-+++ b/include/asm-generic/atomic-long.h
-@@ -22,6 +22,12 @@
-
- typedef atomic64_t atomic_long_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef atomic64_unchecked_t atomic_long_unchecked_t;
-+#else
-+typedef atomic64_t atomic_long_unchecked_t;
-+#endif
-+
- #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-
- static inline long atomic_long_read(atomic_long_t *l)
-@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
- return (long)atomic64_read(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ return (long)atomic64_read_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_set(atomic_long_t *l, long i)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
- atomic64_set(v, i);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ atomic64_set_unchecked(v, i);
-+}
-+#endif
-+
- static inline void atomic_long_inc(atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
- atomic64_inc(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ atomic64_inc_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_dec(atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
- atomic64_dec(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ atomic64_dec_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_add(long i, atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
- atomic64_add(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ atomic64_add_unchecked(i, v);
-+}
-+#endif
-+
- static inline void atomic_long_sub(long i, atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
- atomic64_sub(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ atomic64_sub_unchecked(i, v);
-+}
-+#endif
-+
- static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
- return atomic64_add_negative(i, v);
- }
-
--static inline long atomic_long_add_return(long i, atomic_long_t *l)
-+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_return(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ return (long)atomic64_add_return_unchecked(i, v);
-+}
-+#endif
-+
- static inline long atomic_long_sub_return(long i, atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
- return (long)atomic64_inc_return(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
-+
-+ return (long)atomic64_inc_return_unchecked(v);
-+}
-+#endif
-+
- static inline long atomic_long_dec_return(atomic_long_t *l)
- {
- atomic64_t *v = (atomic64_t *)l;
-@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-
- typedef atomic_t atomic_long_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef atomic_unchecked_t atomic_long_unchecked_t;
-+#else
-+typedef atomic_t atomic_long_unchecked_t;
-+#endif
-+
- #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
- static inline long atomic_long_read(atomic_long_t *l)
- {
-@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
- return (long)atomic_read(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ return (long)atomic_read_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_set(atomic_long_t *l, long i)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
- atomic_set(v, i);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ atomic_set_unchecked(v, i);
-+}
-+#endif
-+
- static inline void atomic_long_inc(atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
- atomic_inc(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ atomic_inc_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_dec(atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
- atomic_dec(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ atomic_dec_unchecked(v);
-+}
-+#endif
-+
- static inline void atomic_long_add(long i, atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
- atomic_add(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ atomic_add_unchecked(i, v);
-+}
-+#endif
-+
- static inline void atomic_long_sub(long i, atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
- atomic_sub(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ atomic_sub_unchecked(i, v);
-+}
-+#endif
-+
- static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
- return atomic_add_negative(i, v);
- }
-
--static inline long atomic_long_add_return(long i, atomic_long_t *l)
-+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_add_return(i, v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ return (long)atomic_add_return_unchecked(i, v);
-+}
-+
-+#endif
-+
- static inline long atomic_long_sub_return(long i, atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
- return (long)atomic_inc_return(v);
- }
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
-+{
-+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
-+
-+ return (long)atomic_inc_return_unchecked(v);
-+}
-+#endif
-+
- static inline long atomic_long_dec_return(atomic_long_t *l)
- {
- atomic_t *v = (atomic_t *)l;
-@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-
- #endif /* BITS_PER_LONG == 64 */
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+static inline void pax_refcount_needs_these_functions(void)
-+{
-+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
-+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
-+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
-+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
-+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
-+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
-+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
-+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
-+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
-+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
-+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
-+#ifdef CONFIG_X86
-+ atomic_clear_mask_unchecked(0, NULL);
-+ atomic_set_mask_unchecked(0, NULL);
-+#endif
-+
-+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
-+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
-+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
-+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
-+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
-+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
-+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
-+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
-+}
-+#else
-+#define atomic_read_unchecked(v) atomic_read(v)
-+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
-+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
-+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
-+#define atomic_inc_unchecked(v) atomic_inc(v)
-+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
-+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
-+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
-+#define atomic_dec_unchecked(v) atomic_dec(v)
-+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
-+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
-+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
-+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
-+
-+#define atomic_long_read_unchecked(v) atomic_long_read(v)
-+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
-+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
-+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
-+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
-+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
-+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
-+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
-+#endif
-+
- #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
-index e37963c1..6f5b60b 100644
---- a/include/asm-generic/atomic.h
-+++ b/include/asm-generic/atomic.h
-@@ -158,7 +158,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- * Atomically clears the bits set in @mask from @v
- */
- #ifndef atomic_clear_mask
--static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
-+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
- {
- unsigned long flags;
-
-diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
-index b18ce4f..2ee2843 100644
---- a/include/asm-generic/atomic64.h
-+++ b/include/asm-generic/atomic64.h
-@@ -16,6 +16,8 @@ typedef struct {
- long long counter;
- } atomic64_t;
-
-+typedef atomic64_t atomic64_unchecked_t;
-+
- #define ATOMIC64_INIT(i) { (i) }
-
- extern long long atomic64_read(const atomic64_t *v);
-@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
- #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* _ASM_GENERIC_ATOMIC64_H */
-diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
-index a60a7cc..0fe12f2 100644
---- a/include/asm-generic/bitops/__fls.h
-+++ b/include/asm-generic/bitops/__fls.h
-@@ -9,7 +9,7 @@
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
--static __always_inline unsigned long __fls(unsigned long word)
-+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
- {
- int num = BITS_PER_LONG - 1;
-
-diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
-index 0576d1f..dad6c71 100644
---- a/include/asm-generic/bitops/fls.h
-+++ b/include/asm-generic/bitops/fls.h
-@@ -9,7 +9,7 @@
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-
--static __always_inline int fls(int x)
-+static __always_inline int __intentional_overflow(-1) fls(int x)
- {
- int r = 32;
-
-diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
-index b097cf8..3d40e14 100644
---- a/include/asm-generic/bitops/fls64.h
-+++ b/include/asm-generic/bitops/fls64.h
-@@ -15,7 +15,7 @@
- * at position 64.
- */
- #if BITS_PER_LONG == 32
--static __always_inline int fls64(__u64 x)
-+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
- {
- __u32 h = x >> 32;
- if (h)
-@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
- return fls(x);
- }
- #elif BITS_PER_LONG == 64
--static __always_inline int fls64(__u64 x)
-+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
- {
- if (x == 0)
- return 0;
-diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
-index 1bfcfe5..e04c5c9 100644
---- a/include/asm-generic/cache.h
-+++ b/include/asm-generic/cache.h
-@@ -6,7 +6,7 @@
- * cache lines need to provide their own cache.h.
- */
-
--#define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_SHIFT 5UL
-+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
-
- #endif /* __ASM_GENERIC_CACHE_H */
-diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
-index 0d68a1e..b74a761 100644
---- a/include/asm-generic/emergency-restart.h
-+++ b/include/asm-generic/emergency-restart.h
-@@ -1,7 +1,7 @@
- #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
- #define _ASM_GENERIC_EMERGENCY_RESTART_H
-
--static inline void machine_emergency_restart(void)
-+static inline __noreturn void machine_emergency_restart(void)
- {
- machine_restart(NULL);
- }
-diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
-index 0232ccb..13d9165 100644
---- a/include/asm-generic/kmap_types.h
-+++ b/include/asm-generic/kmap_types.h
-@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
- KMAP_D(17) KM_NMI,
- KMAP_D(18) KM_NMI_PTE,
- KMAP_D(19) KM_KDB,
-+KMAP_D(20) KM_CLEARPAGE,
- /*
- * Remember to update debug_kmap_atomic() when adding new kmap types!
- */
--KMAP_D(20) KM_TYPE_NR
-+KMAP_D(21) KM_TYPE_NR
- };
-
- #undef KMAP_D
-diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
-index 9ceb03b..62b0b8f 100644
---- a/include/asm-generic/local.h
-+++ b/include/asm-generic/local.h
-@@ -23,24 +23,37 @@ typedef struct
- atomic_long_t a;
- } local_t;
-
-+typedef struct {
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l,i) atomic_long_set((&(l)->a),(i))
-+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
- #define local_inc(l) atomic_long_inc(&(l)->a)
-+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
- #define local_dec(l) atomic_long_dec(&(l)->a)
-+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
- #define local_add(i,l) atomic_long_add((i),(&(l)->a))
-+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
- #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
-+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
-
- #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
- #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
- #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
- #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
- #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
-+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
- #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
- #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
-+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
-
- #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
-+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
- #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
- #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
- #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
-diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
-index 725612b..9cc513a 100644
---- a/include/asm-generic/pgtable-nopmd.h
-+++ b/include/asm-generic/pgtable-nopmd.h
-@@ -1,14 +1,19 @@
- #ifndef _PGTABLE_NOPMD_H
- #define _PGTABLE_NOPMD_H
-
--#ifndef __ASSEMBLY__
--
- #include <asm-generic/pgtable-nopud.h>
-
--struct mm_struct;
--
- #define __PAGETABLE_PMD_FOLDED
-
-+#define PMD_SHIFT PUD_SHIFT
-+#define PTRS_PER_PMD 1
-+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-+#define PMD_MASK (~(PMD_SIZE-1))
-+
-+#ifndef __ASSEMBLY__
-+
-+struct mm_struct;
-+
- /*
- * Having the pmd type consist of a pud gets the size right, and allows
- * us to conceptually access the pud entry that this pmd is folded into
-@@ -16,11 +21,6 @@ struct mm_struct;
- */
- typedef struct { pud_t pud; } pmd_t;
-
--#define PMD_SHIFT PUD_SHIFT
--#define PTRS_PER_PMD 1
--#define PMD_SIZE (1UL << PMD_SHIFT)
--#define PMD_MASK (~(PMD_SIZE-1))
--
- /*
- * The "pud_xxx()" functions here are trivial for a folded two-level
- * setup: the pmd is never bad, and a pmd always exists (as it's folded
-diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
-index 810431d..0ec4804f 100644
---- a/include/asm-generic/pgtable-nopud.h
-+++ b/include/asm-generic/pgtable-nopud.h
-@@ -1,10 +1,15 @@
- #ifndef _PGTABLE_NOPUD_H
- #define _PGTABLE_NOPUD_H
-
--#ifndef __ASSEMBLY__
--
- #define __PAGETABLE_PUD_FOLDED
-
-+#define PUD_SHIFT PGDIR_SHIFT
-+#define PTRS_PER_PUD 1
-+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-+#define PUD_MASK (~(PUD_SIZE-1))
-+
-+#ifndef __ASSEMBLY__
-+
- /*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
-@@ -12,11 +17,6 @@
- */
- typedef struct { pgd_t pgd; } pud_t;
-
--#define PUD_SHIFT PGDIR_SHIFT
--#define PTRS_PER_PUD 1
--#define PUD_SIZE (1UL << PUD_SHIFT)
--#define PUD_MASK (~(PUD_SIZE-1))
--
- /*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pud is never bad, and a pud always exists (as it's folded
-@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
- #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
-
- #define pgd_populate(mm, pgd, pud) do { } while (0)
-+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
- /*
- * (puds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
-diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index bc00876..9aa9b1f 100644
---- a/include/asm-generic/pgtable.h
-+++ b/include/asm-generic/pgtable.h
-@@ -530,6 +530,22 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
- #endif
- }
-
-+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
-+#ifdef CONFIG_PAX_KERNEXEC
-+#error KERNEXEC requires pax_open_kernel
-+#else
-+static inline unsigned long pax_open_kernel(void) { return 0; }
-+#endif
-+#endif
-+
-+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
-+#ifdef CONFIG_PAX_KERNEXEC
-+#error KERNEXEC requires pax_close_kernel
-+#else
-+static inline unsigned long pax_close_kernel(void) { return 0; }
-+#endif
-+#endif
-+
- #endif /* CONFIG_MMU */
-
- #endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
-index 0dd4e87..af5d035 100644
---- a/include/asm-generic/siginfo.h
-+++ b/include/asm-generic/siginfo.h
-@@ -90,9 +90,18 @@ typedef struct siginfo {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-+
-+ /* SIGSYS */
-+ struct {
-+ void __user *_call_addr; /* calling user insn */
-+ int _syscall; /* triggering system call number */
-+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
-+ } _sigsys;
- } _sifields;
- } siginfo_t;
-
-+/* If the arch shares siginfo, then it has SIGSYS. */
-+#define __ARCH_SIGSYS
- #endif
-
- /*
-@@ -116,6 +125,11 @@ typedef struct siginfo {
- #define si_addr_lsb _sifields._sigfault._addr_lsb
- #define si_band _sifields._sigpoll._band
- #define si_fd _sifields._sigpoll._fd
-+#ifdef __ARCH_SIGSYS
-+#define si_call_addr _sifields._sigsys._call_addr
-+#define si_syscall _sifields._sigsys._syscall
-+#define si_arch _sifields._sigsys._arch
-+#endif
-
- #ifdef __KERNEL__
- #define __SI_MASK 0xffff0000u
-@@ -126,6 +140,7 @@ typedef struct siginfo {
- #define __SI_CHLD (4 << 16)
- #define __SI_RT (5 << 16)
- #define __SI_MESGQ (6 << 16)
-+#define __SI_SYS (7 << 16)
- #define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
- #else
- #define __SI_KILL 0
-@@ -135,6 +150,7 @@ typedef struct siginfo {
- #define __SI_CHLD 0
- #define __SI_RT 0
- #define __SI_MESGQ 0
-+#define __SI_SYS 0
- #define __SI_CODE(T,N) (N)
- #endif
-
-@@ -232,6 +248,12 @@ typedef struct siginfo {
- #define NSIGPOLL 6
-
- /*
-+ * SIGSYS si_codes
-+ */
-+#define SYS_SECCOMP (__SI_SYS|1) /* seccomp triggered */
-+#define NSIGSYS 1
-+
-+/*
- * sigevent definitions
- *
- * It seems likely that SIGEV_THREAD will have to be handled from
-diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
-index 5c122ae..a2c13dc 100644
---- a/include/asm-generic/syscall.h
-+++ b/include/asm-generic/syscall.h
-@@ -142,4 +142,18 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n,
- const unsigned long *args);
-
-+/**
-+ * syscall_get_arch - return the AUDIT_ARCH for the current system call
-+ * @task: task of interest, must be in system call entry tracing
-+ * @regs: task_pt_regs() of @task
-+ *
-+ * Returns the AUDIT_ARCH_* based on the system call convention in use.
-+ *
-+ * It's only valid to call this when @task is stopped on entry to a system
-+ * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
-+ *
-+ * Note, at present this function is only required with
-+ * CONFIG_HAVE_ARCH_SECCOMP_FILTER.
-+ */
-+int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
- #endif /* _ASM_SYSCALL_H */
-diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
-index b5e2e4c..6a5373e 100644
---- a/include/asm-generic/vmlinux.lds.h
-+++ b/include/asm-generic/vmlinux.lds.h
-@@ -217,6 +217,7 @@
- .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
- VMLINUX_SYMBOL(__start_rodata) = .; \
- *(.rodata) *(.rodata.*) \
-+ *(.data..read_only) \
- *(__vermagic) /* Kernel version magic */ \
- . = ALIGN(8); \
- VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
-@@ -722,17 +723,18 @@
- * section in the linker script will go there too. @phdr should have
- * a leading colon.
- *
-- * Note that this macros defines __per_cpu_load as an absolute symbol.
-+ * Note that this macros defines per_cpu_load as an absolute symbol.
- * If there is no need to put the percpu section at a predetermined
- * address, use PERCPU_SECTION.
- */
- #define PERCPU_VADDR(cacheline, vaddr, phdr) \
-- VMLINUX_SYMBOL(__per_cpu_load) = .; \
-- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
-+ per_cpu_load = .; \
-+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
- - LOAD_OFFSET) { \
-+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
- PERCPU_INPUT(cacheline) \
- } phdr \
-- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
-+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
-
- /**
- * PERCPU_SECTION - define output section for percpu area, simple version
-diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
-index ecc721d..029cf5a 100644
---- a/include/crypto/algapi.h
-+++ b/include/crypto/algapi.h
-@@ -34,7 +34,7 @@ struct crypto_type {
- unsigned int maskclear;
- unsigned int maskset;
- unsigned int tfmsize;
--};
-+} __do_const;
-
- struct crypto_instance {
- struct crypto_alg alg;
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index bf4b2dc..2df6e61 100644
---- a/include/drm/drmP.h
-+++ b/include/drm/drmP.h
-@@ -72,6 +72,7 @@
- #include <linux/workqueue.h>
- #include <linux/poll.h>
- #include <asm/pgalloc.h>
-+#include <asm/local.h>
- #include "drm.h"
-
- #include <linux/idr.h>
-@@ -284,10 +285,12 @@ do { \
- * \param cmd command.
- * \param arg argument.
- */
--typedef int drm_ioctl_t(struct drm_device *dev, void *data,
-+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
--typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
-+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
- unsigned long arg);
-
- #define DRM_IOCTL_NR(n) _IOC_NR(n)
-@@ -302,9 +305,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
- struct drm_ioctl_desc {
- unsigned int cmd;
- int flags;
-- drm_ioctl_t *func;
-+ drm_ioctl_t func;
- unsigned int cmd_drv;
--};
-+} __do_const;
-
- /**
- * Creates a driver or general drm_ioctl_desc array entry for the given
-@@ -965,7 +968,7 @@ struct drm_info_list {
- int (*show)(struct seq_file*, void*); /** show callback */
- u32 driver_features; /**< Required driver features for this entry */
- void *data;
--};
-+} __do_const;
-
- /**
- * debugfs node structure. This structure represents a debugfs file.
-@@ -1038,7 +1041,7 @@ struct drm_device {
-
- /** \name Usage Counters */
- /*@{ */
-- int open_count; /**< Outstanding files open */
-+ local_t open_count; /**< Outstanding files open */
- atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
- atomic_t vma_count; /**< Outstanding vma areas open */
- int buf_use; /**< Buffers in use -- cannot alloc */
-@@ -1049,7 +1052,7 @@ struct drm_device {
- /*@{ */
- unsigned long counters;
- enum drm_stat_type types[15];
-- atomic_t counts[15];
-+ atomic_unchecked_t counts[15];
- /*@} */
-
- struct list_head filelist;
-diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
-index 73b0712..2e581af 100644
---- a/include/drm/drm_crtc_helper.h
-+++ b/include/drm/drm_crtc_helper.h
-@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
- struct drm_connector *connector);
- /* disable encoder when not in use - more explicit than dpms off */
- void (*disable)(struct drm_encoder *encoder);
--};
-+} __no_const;
-
- struct drm_connector_helper_funcs {
- int (*get_modes)(struct drm_connector *connector);
-diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
-index 26c1f78..6722682 100644
---- a/include/drm/ttm/ttm_memory.h
-+++ b/include/drm/ttm/ttm_memory.h
-@@ -47,7 +47,7 @@
-
- struct ttm_mem_shrink {
- int (*do_shrink) (struct ttm_mem_shrink *);
--};
-+} __no_const;
-
- /**
- * struct ttm_mem_global - Global memory accounting structure.
-diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
-index 129de12..d73359c 100644
---- a/include/drm/ttm/ttm_page_alloc.h
-+++ b/include/drm/ttm/ttm_page_alloc.h
-@@ -54,7 +54,7 @@ int ttm_get_pages(struct list_head *pages,
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
- */
- void ttm_put_pages(struct list_head *pages,
-- unsigned page_count,
-+ unsigned long page_count,
- int flags,
- enum ttm_caching_state cstate,
- dma_addr_t *dma_address);
-diff --git a/include/linux/Kbuild b/include/linux/Kbuild
-index a3ce901..fd50c75 100644
---- a/include/linux/Kbuild
-+++ b/include/linux/Kbuild
-@@ -329,6 +329,7 @@ header-y += scc.h
- header-y += sched.h
- header-y += screen_info.h
- header-y += sdla.h
-+header-y += seccomp.h
- header-y += securebits.h
- header-y += selinux_netlink.h
- header-y += sem.h
-diff --git a/include/linux/a.out.h b/include/linux/a.out.h
-index e86dfca..40cc55f 100644
---- a/include/linux/a.out.h
-+++ b/include/linux/a.out.h
-@@ -39,6 +39,14 @@ enum machine_type {
- M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
- };
-
-+/* Constants for the N_FLAGS field */
-+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
-+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
-+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
-+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
-+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
-+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
-+
- #if !defined (N_MAGIC)
- #define N_MAGIC(exec) ((exec).a_info & 0xffff)
- #endif
-diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
-index 49a83ca..d0a847e 100644
---- a/include/linux/atmdev.h
-+++ b/include/linux/atmdev.h
-@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
- #endif
-
- struct k_atm_aal_stats {
--#define __HANDLE_ITEM(i) atomic_t i
-+#define __HANDLE_ITEM(i) atomic_unchecked_t i
- __AAL_STAT_ITEMS
- #undef __HANDLE_ITEM
- };
-@@ -406,7 +406,7 @@ struct atmdev_ops { /* only send is required */
- int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
- int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
- struct module *owner;
--};
-+} __do_const ;
-
- struct atmphy_ops {
- int (*start)(struct atm_dev *dev);
-diff --git a/include/linux/audit.h b/include/linux/audit.h
-index 2f81c6f..225b4e4 100644
---- a/include/linux/audit.h
-+++ b/include/linux/audit.h
-@@ -430,6 +430,7 @@ extern void audit_putname(const char *name);
- extern void __audit_inode(const char *name, const struct dentry *dentry);
- extern void __audit_inode_child(const struct dentry *dentry,
- const struct inode *parent);
-+extern void __audit_seccomp(unsigned long syscall, long signr, int code);
- extern void __audit_ptrace(struct task_struct *t);
-
- static inline int audit_dummy_context(void)
-@@ -453,6 +454,12 @@ static inline void audit_inode_child(const struct dentry *dentry,
- }
- void audit_core_dumps(long signr);
-
-+static inline void audit_seccomp(unsigned long syscall, long signr, int code)
-+{
-+ if (unlikely(!audit_dummy_context()))
-+ __audit_seccomp(syscall, signr, code);
-+}
-+
- static inline void audit_ptrace(struct task_struct *t)
- {
- if (unlikely(!audit_dummy_context()))
-@@ -558,6 +565,8 @@ extern int audit_signals;
- #define audit_inode(n,d) do { (void)(d); } while (0)
- #define audit_inode_child(i,p) do { ; } while (0)
- #define audit_core_dumps(i) do { ; } while (0)
-+#define audit_seccomp(i,s,c) do { ; } while (0)
-+#define __audit_seccomp(i,s,c) do { ; } while (0)
- #define auditsc_get_stamp(c,t,s) (0)
- #define audit_get_loginuid(t) (-1)
- #define audit_get_sessionid(t) (-1)
-diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
-index d337419..1d6a512f 100644
---- a/include/linux/binfmts.h
-+++ b/include/linux/binfmts.h
-@@ -18,7 +18,7 @@ struct pt_regs;
- #define BINPRM_BUF_SIZE 128
-
- #ifdef __KERNEL__
--#include <linux/list.h>
-+#include <linux/sched.h>
-
- #define CORENAME_MAX_SIZE 128
-
-@@ -58,7 +58,8 @@ struct linux_binprm {
- unsigned interp_flags;
- unsigned interp_data;
- unsigned long loader, exec;
--};
-+ char tcomm[TASK_COMM_LEN];
-+} __randomize_layout;
-
- #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
- #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-@@ -86,8 +87,10 @@ struct linux_binfmt {
- int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
- int (*load_shlib)(struct file *);
- int (*core_dump)(struct coredump_params *cprm);
-+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
-+ void (*handle_mmap)(struct file *);
- unsigned long min_coredump; /* minimal dump size */
--};
-+} __do_const __randomize_layout;
-
- extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
-
-diff --git a/include/linux/bitops.h b/include/linux/bitops.h
-index 87a375f..94c85dd 100644
---- a/include/linux/bitops.h
-+++ b/include/linux/bitops.h
-@@ -74,7 +74,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
- * @word: value to rotate
- * @shift: bits to roll
- */
--static inline __u32 rol32(__u32 word, unsigned int shift)
-+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
- {
- return (word << shift) | (word >> (32 - shift));
- }
-@@ -84,7 +84,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
- * @word: value to rotate
- * @shift: bits to roll
- */
--static inline __u32 ror32(__u32 word, unsigned int shift)
-+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
- {
- return (word >> shift) | (word << (32 - shift));
- }
-@@ -140,7 +140,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
- return (__s32)(value << shift) >> shift;
- }
-
--static inline unsigned fls_long(unsigned long l)
-+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
- {
- if (sizeof(l) == 4)
- return fls(l);
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index c7e834b..dec8d67 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -1315,7 +1315,7 @@ struct block_device_operations {
- /* this callback is with swap_lock and sometimes page table lock held */
- void (*swap_slot_free_notify) (struct block_device *, unsigned long);
- struct module *owner;
--};
-+} __do_const;
-
- extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
-diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
-index 4d1a074..88f929a 100644
---- a/include/linux/blktrace_api.h
-+++ b/include/linux/blktrace_api.h
-@@ -162,7 +162,7 @@ struct blk_trace {
- struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
-- atomic_t dropped;
-+ atomic_unchecked_t dropped;
- };
-
- extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
-diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
-index 83195fb..46fe38f 100644
---- a/include/linux/byteorder/little_endian.h
-+++ b/include/linux/byteorder/little_endian.h
-@@ -42,51 +42,51 @@
-
- static inline __le64 __cpu_to_le64p(const __u64 *p)
- {
-- return (__force __le64)*p;
-+ return (__force const __le64)*p;
- }
--static inline __u64 __le64_to_cpup(const __le64 *p)
-+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
- {
-- return (__force __u64)*p;
-+ return (__force const __u64)*p;
- }
- static inline __le32 __cpu_to_le32p(const __u32 *p)
- {
-- return (__force __le32)*p;
-+ return (__force const __le32)*p;
- }
- static inline __u32 __le32_to_cpup(const __le32 *p)
- {
-- return (__force __u32)*p;
-+ return (__force const __u32)*p;
- }
- static inline __le16 __cpu_to_le16p(const __u16 *p)
- {
-- return (__force __le16)*p;
-+ return (__force const __le16)*p;
- }
- static inline __u16 __le16_to_cpup(const __le16 *p)
- {
-- return (__force __u16)*p;
-+ return (__force const __u16)*p;
- }
- static inline __be64 __cpu_to_be64p(const __u64 *p)
- {
-- return (__force __be64)__swab64p(p);
-+ return (__force const __be64)__swab64p(p);
- }
- static inline __u64 __be64_to_cpup(const __be64 *p)
- {
-- return __swab64p((__u64 *)p);
-+ return __swab64p((const __u64 *)p);
- }
- static inline __be32 __cpu_to_be32p(const __u32 *p)
- {
-- return (__force __be32)__swab32p(p);
-+ return (__force const __be32)__swab32p(p);
- }
--static inline __u32 __be32_to_cpup(const __be32 *p)
-+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
- {
-- return __swab32p((__u32 *)p);
-+ return __swab32p((const __u32 *)p);
- }
- static inline __be16 __cpu_to_be16p(const __u16 *p)
- {
-- return (__force __be16)__swab16p(p);
-+ return (__force const __be16)__swab16p(p);
- }
- static inline __u16 __be16_to_cpup(const __be16 *p)
- {
-- return __swab16p((__u16 *)p);
-+ return __swab16p((const __u16 *)p);
- }
- #define __cpu_to_le64s(x) do { (void)(x); } while (0)
- #define __le64_to_cpus(x) do { (void)(x); } while (0)
-diff --git a/include/linux/cache.h b/include/linux/cache.h
-index 4c57065..40346da 100644
---- a/include/linux/cache.h
-+++ b/include/linux/cache.h
-@@ -16,6 +16,14 @@
- #define __read_mostly
- #endif
-
-+#ifndef __read_only
-+#ifdef CONFIG_PAX_KERNEXEC
-+#error KERNEXEC requires __read_only
-+#else
-+#define __read_only __read_mostly
-+#endif
-+#endif
-+
- #ifndef ____cacheline_aligned
- #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
- #endif
-diff --git a/include/linux/capability.h b/include/linux/capability.h
-index a63d13d..f15d415 100644
---- a/include/linux/capability.h
-+++ b/include/linux/capability.h
-@@ -548,10 +548,16 @@ extern bool capable(int cap);
- extern bool ns_capable(struct user_namespace *ns, int cap);
- extern bool task_ns_capable(struct task_struct *t, int cap);
- extern bool nsown_capable(int cap);
-+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
-+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
-+extern bool capable_nolog(int cap);
-
- /* audit system wants to get cap info from files as well */
- extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
-
-+extern int is_privileged_binary(const struct dentry *dentry);
-+extern int is_root_privileged_binary(const struct dentry *dentry);
-+
- #endif /* __KERNEL__ */
-
- #endif /* !_LINUX_CAPABILITY_H */
-diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
-index 35eae4b..5673e99 100644
---- a/include/linux/cdrom.h
-+++ b/include/linux/cdrom.h
-@@ -985,7 +985,6 @@ struct cdrom_device_ops {
-
- /* driver specifications */
- const int capability; /* capability flags */
-- int n_minors; /* number of active minor devices */
- /* handle uniform packets for scsi type devices (scsi,atapi) */
- int (*generic_packet) (struct cdrom_device_info *,
- struct packet_command *);
-diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
-index 04ffb2e..6799180 100644
---- a/include/linux/cleancache.h
-+++ b/include/linux/cleancache.h
-@@ -31,7 +31,7 @@ struct cleancache_ops {
- void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
- void (*flush_inode)(int, struct cleancache_filekey);
- void (*flush_fs)(int);
--};
-+} __no_const;
-
- extern struct cleancache_ops
- cleancache_register_ops(struct cleancache_ops *ops);
-diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
-index d9a4fd0..13edc9f 100644
---- a/include/linux/clkdev.h
-+++ b/include/linux/clkdev.h
-@@ -32,7 +32,7 @@ struct clk_lookup {
- }
-
- struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
-- const char *dev_fmt, ...);
-+ const char *dev_fmt, ...) __printf(3, 4);
-
- void clkdev_add(struct clk_lookup *cl);
- void clkdev_drop(struct clk_lookup *cl);
-diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
-index 081147d..da89543 100644
---- a/include/linux/clocksource.h
-+++ b/include/linux/clocksource.h
-@@ -284,7 +284,7 @@ extern struct clocksource* clocksource_get_next(void);
- extern void clocksource_change_rating(struct clocksource *cs, int rating);
- extern void clocksource_suspend(void);
- extern void clocksource_resume(void);
--extern struct clocksource * __init __weak clocksource_default_clock(void);
-+extern struct clocksource * __init clocksource_default_clock(void);
- extern void clocksource_mark_unstable(struct clocksource *cs);
-
- extern void
-diff --git a/include/linux/compat.h b/include/linux/compat.h
-index d42bd48..f651bd9 100644
---- a/include/linux/compat.h
-+++ b/include/linux/compat.h
-@@ -240,10 +240,10 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
- int version, void __user *uptr);
- long compat_sys_msgctl(int first, int second, void __user *uptr);
- long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
-- void __user *uptr);
-+ void __user *uptr) __intentional_overflow(0);
- long compat_sys_shmctl(int first, int second, void __user *uptr);
- long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
-- unsigned nsems, const struct compat_timespec __user *timeout);
-+ compat_long_t nsems, const struct compat_timespec __user *timeout);
- asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5);
- asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
-@@ -320,7 +320,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
-
- asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-
--extern int compat_printk(const char *fmt, ...);
-+extern __printf(1, 2) int compat_printk(const char *fmt, ...);
- extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
-
- asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
-@@ -334,7 +334,7 @@ extern int compat_ptrace_request(struct task_struct *child,
- extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
- compat_ulong_t addr, compat_ulong_t data);
- asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-- compat_long_t addr, compat_long_t data);
-+ compat_ulong_t addr, compat_ulong_t data);
-
- /*
- * epoll (fs/eventpoll.c) compat bits follow ...
-diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
-index 59a7e4c..8feb590 100644
---- a/include/linux/compiler-gcc4.h
-+++ b/include/linux/compiler-gcc4.h
-@@ -44,6 +44,26 @@
- #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
- #if __GNUC_MINOR__ >= 5
-+
-+#ifdef RANDSTRUCT_PLUGIN
-+#define __randomize_layout __attribute__((randomize_layout))
-+#define __no_randomize_layout __attribute__((no_randomize_layout))
-+#endif
-+
-+#ifdef CONSTIFY_PLUGIN
-+#define __no_const __attribute__((no_const))
-+#define __do_const __attribute__((do_const))
-+#endif
-+
-+#ifdef SIZE_OVERFLOW_PLUGIN
-+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
-+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
-+#endif
-+
-+#ifdef LATENT_ENTROPY_PLUGIN
-+#define __latent_entropy __attribute__((latent_entropy))
-+#endif
-+
- /*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
-@@ -59,6 +79,11 @@
- #define __noclone __attribute__((__noclone__))
-
- #endif
-+
-+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
-+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
-+#define __bos0(ptr) __bos((ptr), 0)
-+#define __bos1(ptr) __bos((ptr), 1)
- #endif
-
- #if __GNUC_MINOR__ > 0
-diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
-index cdd1cc2..d062745 100644
---- a/include/linux/compiler-gcc5.h
-+++ b/include/linux/compiler-gcc5.h
-@@ -28,6 +28,30 @@
- # define __compiletime_error(message) __attribute__((error(message)))
- #endif /* __CHECKER__ */
-
-+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
-+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
-+#define __bos0(ptr) __bos((ptr), 0)
-+#define __bos1(ptr) __bos((ptr), 1)
-+
-+#ifdef RANDSTRUCT_PLUGIN
-+#define __randomize_layout __attribute__((randomize_layout))
-+#define __no_randomize_layout __attribute__((no_randomize_layout))
-+#endif
-+
-+#ifdef CONSTIFY_PLUGIN
-+#define __no_const __attribute__((no_const))
-+#define __do_const __attribute__((do_const))
-+#endif
-+
-+#ifdef SIZE_OVERFLOW_PLUGIN
-+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
-+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
-+#endif
-+
-+#ifdef LATENT_ENTROPY_PLUGIN
-+#define __latent_entropy __attribute__((latent_entropy))
-+#endif
-+
- /*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
-@@ -53,7 +77,6 @@
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
-- * Fixed in GCC 4.8.2 and later versions.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-diff --git a/include/linux/compiler.h b/include/linux/compiler.h
-index 65c8b78..5d8ba14 100644
---- a/include/linux/compiler.h
-+++ b/include/linux/compiler.h
-@@ -5,31 +5,51 @@
-
- #ifdef __CHECKER__
- # define __user __attribute__((noderef, address_space(1)))
-+# define __force_user __force __user
- # define __kernel __attribute__((address_space(0)))
-+# define __force_kernel __force __kernel
- # define __safe __attribute__((safe))
- # define __force __attribute__((force))
- # define __nocast __attribute__((nocast))
- # define __iomem __attribute__((noderef, address_space(2)))
-+# define __force_iomem __force __iomem
- # define __acquires(x) __attribute__((context(x,0,1)))
- # define __releases(x) __attribute__((context(x,1,0)))
- # define __acquire(x) __context__(x,1)
- # define __release(x) __context__(x,-1)
- # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
- # define __percpu __attribute__((noderef, address_space(3)))
-+# define __force_percpu __force __percpu
- #ifdef CONFIG_SPARSE_RCU_POINTER
- # define __rcu __attribute__((noderef, address_space(4)))
-+# define __force_rcu __force __rcu
- #else
- # define __rcu
-+# define __force_rcu
- #endif
- extern void __chk_user_ptr(const volatile void __user *);
- extern void __chk_io_ptr(const volatile void __iomem *);
- #else
--# define __user
--# define __kernel
-+# ifdef CHECKER_PLUGIN
-+//# define __user
-+//# define __force_user
-+//# define __kernel
-+//# define __force_kernel
-+# else
-+# ifdef STRUCTLEAK_PLUGIN
-+# define __user __attribute__((user))
-+# else
-+# define __user
-+# endif
-+# define __force_user
-+# define __kernel
-+# define __force_kernel
-+# endif
- # define __safe
- # define __force
- # define __nocast
- # define __iomem
-+# define __force_iomem
- # define __chk_user_ptr(x) (void)0
- # define __chk_io_ptr(x) (void)0
- # define __builtin_warning(x, y...) (1)
-@@ -39,7 +59,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
- # define __release(x) (void)0
- # define __cond_lock(x,c) (c)
- # define __percpu
-+# define __force_percpu
- # define __rcu
-+# define __force_rcu
- #endif
-
- #ifdef __KERNEL__
-@@ -268,6 +290,30 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- # define __attribute_const__ /* unimplemented */
- #endif
-
-+#ifndef __randomize_layout
-+# define __randomize_layout
-+#endif
-+
-+#ifndef __no_randomize_layout
-+# define __no_randomize_layout
-+#endif
-+
-+#ifndef __no_const
-+# define __no_const
-+#endif
-+
-+#ifndef __do_const
-+# define __do_const
-+#endif
-+
-+#ifndef __size_overflow
-+# define __size_overflow(...)
-+#endif
-+
-+#ifndef __latent_entropy
-+# define __latent_entropy
-+#endif
-+
- /*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
-@@ -277,6 +323,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- #define __cold
- #endif
-
-+#ifndef __alloc_size
-+#define __alloc_size(...)
-+#endif
-+
-+#ifndef __bos
-+#define __bos(ptr, arg)
-+#endif
-+
-+#ifndef __bos0
-+#define __bos0(ptr)
-+#endif
-+
-+#ifndef __bos1
-+#define __bos1(ptr)
-+#endif
-+
- /* Simple shorthand for a section definition */
- #ifndef __section
- # define __section(S) __attribute__ ((__section__(#S)))
-@@ -287,6 +349,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
- #endif
-
-+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
-+
- /* Compile time object size, -1 for unknown */
- #ifndef __compiletime_object_size
- # define __compiletime_object_size(obj) -1
-@@ -296,10 +360,24 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- #endif
- #ifndef __compiletime_error
- # define __compiletime_error(message)
-+# define __compiletime_error_fallback(condition) \
-+ do { ((void)sizeof(char[1 - 2*!!(condition)])); } while (0)
-+#else
-+# define __compiletime_error_fallback(condition) do { } while (0)
- #endif
-+
- #ifndef __linktime_error
- # define __linktime_error(message)
- #endif
-+
-+#ifndef __size_overflow
-+# define __size_overflow(...)
-+#endif
-+
-+#ifndef __intentional_overflow
-+# define __intentional_overflow(...)
-+#endif
-+
- /*
- * Prevent the compiler from merging or refetching accesses. The compiler
- * is also forbidden from reordering successive instances of ACCESS_ONCE(),
-@@ -312,6 +390,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
- * use is to mediate communication between process-level code and irq/NMI
- * handlers, all running on the same CPU.
- */
--#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
-+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
-+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
-
- #endif /* __LINUX_COMPILER_H */
-diff --git a/include/linux/completion.h b/include/linux/completion.h
-index 51494e6..340575ab 100644
---- a/include/linux/completion.h
-+++ b/include/linux/completion.h
-@@ -77,14 +77,14 @@ static inline void init_completion(struct completion *x)
- }
-
- extern void wait_for_completion(struct completion *);
--extern int wait_for_completion_interruptible(struct completion *x);
--extern int wait_for_completion_killable(struct completion *x);
-+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
-+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
- extern unsigned long wait_for_completion_timeout(struct completion *x,
-- unsigned long timeout);
-+ unsigned long timeout) __intentional_overflow(-1);
- extern long wait_for_completion_interruptible_timeout(
-- struct completion *x, unsigned long timeout);
-+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
- extern long wait_for_completion_killable_timeout(
-- struct completion *x, unsigned long timeout);
-+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
- extern bool try_wait_for_completion(struct completion *x);
- extern bool completion_done(struct completion *x);
-
-diff --git a/include/linux/configfs.h b/include/linux/configfs.h
-index 3081c58..80789a0 100644
---- a/include/linux/configfs.h
-+++ b/include/linux/configfs.h
-@@ -64,7 +64,8 @@ struct config_item {
- struct dentry *ci_dentry;
- };
-
--extern int config_item_set_name(struct config_item *, const char *, ...);
-+extern __printf(2, 3)
-+int config_item_set_name(struct config_item *, const char *, ...);
-
- static inline char *config_item_name(struct config_item * item)
- {
-@@ -125,7 +126,7 @@ struct configfs_attribute {
- const char *ca_name;
- struct module *ca_owner;
- mode_t ca_mode;
--};
-+} __do_const;
-
- /*
- * Users often need to create attribute structures for their configurable
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index 9c3e071..8a8ebea 100644
---- a/include/linux/cpu.h
-+++ b/include/linux/cpu.h
-@@ -108,7 +108,7 @@ enum {
- /* Need to know about CPUs going up/down? */
- #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
- #define cpu_notifier(fn, pri) { \
-- static struct notifier_block fn##_nb __cpuinitdata = \
-+ static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = pri }; \
- register_cpu_notifier(&fn##_nb); \
- }
-diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
-index 6216115..68bf1d0 100644
---- a/include/linux/cpufreq.h
-+++ b/include/linux/cpufreq.h
-@@ -236,7 +236,7 @@ struct cpufreq_driver {
- int (*suspend) (struct cpufreq_policy *policy);
- int (*resume) (struct cpufreq_policy *policy);
- struct freq_attr **attr;
--};
-+} __do_const;
-
- /* flags */
-
-@@ -295,6 +295,7 @@ struct global_attr {
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
- };
-+typedef struct global_attr __no_const global_attr_no_const;
-
- #define define_one_global_ro(_name) \
- static struct global_attr _name = \
-diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
-index 7408af8..8d6f9dd 100644
---- a/include/linux/cpuidle.h
-+++ b/include/linux/cpuidle.h
-@@ -49,7 +49,8 @@ struct cpuidle_state {
- int (*enter) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
--};
-+} __do_const;
-+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
-
- /* Idle State Flags */
- #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
-@@ -181,7 +182,7 @@ struct cpuidle_governor {
- void (*reflect) (struct cpuidle_device *dev, int index);
-
- struct module *owner;
--};
-+} __do_const;
-
- #ifdef CONFIG_CPU_IDLE
-
-diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
-index 4f7a6323..b9e6f95 100644
---- a/include/linux/cpumask.h
-+++ b/include/linux/cpumask.h
-@@ -117,17 +117,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
- }
-
- /* Valid inputs for n are -1 and 0. */
--static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
- {
- return n+1;
- }
-
--static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
-+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
- {
- return n+1;
- }
-
--static inline unsigned int cpumask_next_and(int n,
-+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
- {
-@@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
- *
- * Returns >= nr_cpu_ids if no further cpus set.
- */
--static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
- {
- /* -1 is a legal arg here. */
- if (n != -1)
-@@ -181,7 +181,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
- *
- * Returns >= nr_cpu_ids if no further cpus unset.
- */
--static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
-+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
- {
- /* -1 is a legal arg here. */
- if (n != -1)
-@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
- return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
- }
-
--int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
- int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-
- /**
-diff --git a/include/linux/cred.h b/include/linux/cred.h
-index 4030896..4d2c309 100644
---- a/include/linux/cred.h
-+++ b/include/linux/cred.h
-@@ -34,7 +34,7 @@ struct group_info {
- int nblocks;
- gid_t small_block[NGROUPS_SMALL];
- gid_t *blocks[0];
--};
-+} __randomize_layout;
-
- /**
- * get_group_info - Get a reference to a group info structure
-@@ -149,7 +149,7 @@ struct cred {
- struct user_namespace *user_ns; /* cached user->user_ns */
- struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
--};
-+} __randomize_layout;
-
- extern void __put_cred(struct cred *);
- extern void exit_creds(struct task_struct *);
-@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
- static inline void validate_process_creds(void)
- {
- }
-+static inline void validate_task_creds(struct task_struct *task)
-+{
-+}
- #endif
-
- /**
-@@ -339,6 +342,7 @@ static inline void put_cred(const struct cred *_cred)
-
- #define task_uid(task) (task_cred_xxx((task), uid))
- #define task_euid(task) (task_cred_xxx((task), euid))
-+#define task_securebits(task) (task_cred_xxx((task), securebits))
-
- #define current_cred_xxx(xxx) \
- ({ \
-diff --git a/include/linux/crypto.h b/include/linux/crypto.h
-index ca01ea8..daaa939 100644
---- a/include/linux/crypto.h
-+++ b/include/linux/crypto.h
-@@ -378,7 +378,7 @@ struct cipher_tfm {
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
--};
-+} __no_const;
-
- struct hash_tfm {
- int (*init)(struct hash_desc *desc);
-@@ -399,13 +399,13 @@ struct compress_tfm {
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
--};
-+} __no_const;
-
- struct rng_tfm {
- int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen);
- int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
--};
-+} __no_const;
-
- #define crt_ablkcipher crt_u.ablkcipher
- #define crt_aead crt_u.aead
-diff --git a/include/linux/ctype.h b/include/linux/ctype.h
-index 8acfe31..6ffccd63 100644
---- a/include/linux/ctype.h
-+++ b/include/linux/ctype.h
-@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
- * Fast implementation of tolower() for internal usage. Do not use in your
- * code.
- */
--static inline char _tolower(const char c)
-+static inline unsigned char _tolower(const unsigned char c)
- {
- return c | 0x20;
- }
-diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 99374de..01feff6 100644
---- a/include/linux/dcache.h
-+++ b/include/linux/dcache.h
-@@ -132,6 +132,9 @@ struct dentry {
- unsigned long d_time; /* used by d_revalidate */
- void *d_fsdata; /* fs-specific data */
-
-+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
-+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
-+#endif
- struct list_head d_lru; /* LRU list */
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
-@@ -142,7 +145,7 @@ struct dentry {
- struct list_head d_alias; /* inode alias list */
- struct rcu_head d_rcu;
- } d_u;
--};
-+} __randomize_layout;
-
- /*
- * dentry->d_lock spinlock nesting subclasses:
-@@ -340,7 +343,8 @@ extern int d_validate(struct dentry *, struct dentry *);
- /*
- * helper function for dentry_operations.d_dname() members
- */
--extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-+extern __printf(4, 5)
-+char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-
- extern char *__d_path(const struct path *, const struct path *, char *, int);
- extern char *d_absolute_path(const struct path *, char *, int);
-diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
-index 7925bf0..d5143d2 100644
---- a/include/linux/decompress/mm.h
-+++ b/include/linux/decompress/mm.h
-@@ -77,7 +77,7 @@ static void free(void *where)
- * warnings when not needed (indeed large_malloc / large_free are not
- * needed by inflate */
-
--#define malloc(a) kmalloc(a, GFP_KERNEL)
-+#define malloc(a) kmalloc((a), GFP_KERNEL)
- #define free(a) kfree(a)
-
- #define large_malloc(a) vmalloc(a)
-diff --git a/include/linux/device.h b/include/linux/device.h
-index a31c5d0..e9e8aac 100644
---- a/include/linux/device.h
-+++ b/include/linux/device.h
-@@ -427,7 +427,7 @@ struct device_type {
- void (*release)(struct device *dev);
-
- const struct dev_pm_ops *pm;
--};
-+} __do_const;
-
- /* interface for exporting device attributes */
- struct device_attribute {
-@@ -437,6 +437,7 @@ struct device_attribute {
- ssize_t (*store)(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
- };
-+typedef struct device_attribute __no_const device_attribute_no_const;
-
- #define DEVICE_ATTR(_name, _mode, _show, _store) \
- struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
-@@ -757,12 +758,10 @@ extern int __must_check device_reprobe(struct device *dev);
- /*
- * Easy functions for dynamically creating devices on the fly
- */
--extern struct device *device_create_vargs(struct class *cls,
-- struct device *parent,
-- dev_t devt,
-- void *drvdata,
-- const char *fmt,
-- va_list vargs);
-+extern __printf(5, 0)
-+struct device *device_create_vargs(struct class *cls, struct device *parent,
-+ dev_t devt, void *drvdata,
-+ const char *fmt, va_list vargs);
- extern __printf(5, 6)
- struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
-diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
-index e13117c..e9fc938 100644
---- a/include/linux/dma-mapping.h
-+++ b/include/linux/dma-mapping.h
-@@ -46,7 +46,7 @@ struct dma_map_ops {
- u64 (*get_required_mask)(struct device *dev);
- #endif
- int is_phys;
--};
-+} __do_const;
-
- #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
-
-diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
-index 75f53f8..5c7972d 100644
---- a/include/linux/dmaengine.h
-+++ b/include/linux/dmaengine.h
-@@ -881,9 +881,9 @@ struct dma_pinned_list {
- struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
- void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
-
--dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
-+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
--dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
-+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
- struct dma_pinned_list *pinned_list, struct page *page,
- unsigned int offset, size_t len);
-
-diff --git a/include/linux/efi.h b/include/linux/efi.h
-index 88c953d..48685de 100644
---- a/include/linux/efi.h
-+++ b/include/linux/efi.h
-@@ -486,6 +486,7 @@ struct efivar_operations {
- efi_set_variable_t *set_variable;
- efi_query_variable_store_t *query_variable_store;
- };
-+typedef struct efivar_operations __no_const efivar_operations_no_const;
-
- struct efivars {
- /*
-diff --git a/include/linux/elf.h b/include/linux/elf.h
-index 31f0508..5421c01 100644
---- a/include/linux/elf.h
-+++ b/include/linux/elf.h
-@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
- #define PT_GNU_EH_FRAME 0x6474e550
-
- #define PT_GNU_STACK (PT_LOOS + 0x474e551)
-+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
-+
-+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
-+
-+/* Constants for the e_flags field */
-+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
-+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
-+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
-+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
-+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
-+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
-
- /*
- * Extended Numbering
-@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
- #define DT_DEBUG 21
- #define DT_TEXTREL 22
- #define DT_JMPREL 23
-+#define DT_FLAGS 30
-+ #define DF_TEXTREL 0x00000004
- #define DT_ENCODING 32
- #define OLD_DT_LOOS 0x60000000
- #define DT_LOOS 0x6000000d
-@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
- #define PF_W 0x2
- #define PF_X 0x1
-
-+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
-+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
-+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
-+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
-+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
-+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
-+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
-+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
-+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
-+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
-+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
-+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
-+
- typedef struct elf32_phdr{
- Elf32_Word p_type;
- Elf32_Off p_offset;
-@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
- #define EI_OSABI 7
- #define EI_PAD 8
-
-+#define EI_PAX 14
-+
- #define ELFMAG0 0x7f /* EI_MAG */
- #define ELFMAG1 'E'
- #define ELFMAG2 'L'
-@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
- #define elf_note elf32_note
- #define elf_addr_t Elf32_Off
- #define Elf_Half Elf32_Half
-+#define elf_dyn Elf32_Dyn
-
- #else
-
-@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
- #define elf_note elf64_note
- #define elf_addr_t Elf64_Off
- #define Elf_Half Elf64_Half
-+#define elf_dyn Elf64_Dyn
-
- #endif
-
-diff --git a/include/linux/err.h b/include/linux/err.h
-index f2edce2..cc2082c 100644
---- a/include/linux/err.h
-+++ b/include/linux/err.h
-@@ -19,12 +19,12 @@
-
- #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
-
--static inline void * __must_check ERR_PTR(long error)
-+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
- {
- return (void *) error;
- }
-
--static inline long __must_check PTR_ERR(const void *ptr)
-+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
- {
- return (long) ptr;
- }
-diff --git a/include/linux/fb.h b/include/linux/fb.h
-index 73845ce..e5678a7 100644
---- a/include/linux/fb.h
-+++ b/include/linux/fb.h
-@@ -691,7 +691,7 @@ struct fb_ops {
- /* called at KDB enter and leave time to prepare the console */
- int (*fb_debug_enter)(struct fb_info *info);
- int (*fb_debug_leave)(struct fb_info *info);
--};
-+} __do_const;
-
- #ifdef CONFIG_FB_TILEBLITTING
- #define FB_TILE_CURSOR_NONE 0
-diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
-index 82163c4..bd0f588 100644
---- a/include/linux/fdtable.h
-+++ b/include/linux/fdtable.h
-@@ -71,7 +71,7 @@ struct file_operations;
- struct vfsmount;
- struct dentry;
-
--extern int expand_files(struct files_struct *, int nr);
-+extern int expand_files(struct files_struct *, unsigned int nr);
- extern void free_fdtable_rcu(struct rcu_head *rcu);
- extern void __init files_defer_init(void);
-
-@@ -101,7 +101,7 @@ struct files_struct *get_files_struct(struct task_struct *);
- void put_files_struct(struct files_struct *fs);
- void reset_files_struct(struct files_struct *);
- int unshare_files(struct files_struct **);
--struct files_struct *dup_fd(struct files_struct *, int *);
-+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
-
- extern struct kmem_cache *files_cachep;
-
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index 8eeb205..14c2a1c 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -10,6 +10,7 @@
-
- #ifdef __KERNEL__
- #include <linux/atomic.h>
-+#include <linux/compat.h>
- #endif
-
- /*
-@@ -132,8 +133,19 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
-
- #ifdef __KERNEL__
-
-+#ifdef CONFIG_COMPAT
-+/*
-+ * A struct sock_filter is architecture independent.
-+ */
-+struct compat_sock_fprog {
-+ u16 len;
-+ compat_uptr_t filter; /* struct sock_filter * */
-+};
-+#endif
-+
- struct sk_buff;
- struct sock;
-+struct bpf_jit_work;
-
- struct sk_filter
- {
-@@ -141,9 +153,12 @@ struct sk_filter
- unsigned int len; /* Number of filter blocks */
- unsigned int (*bpf_func)(const struct sk_buff *skb,
- const struct sock_filter *filter);
-+#ifdef CONFIG_BPF_JIT
-+ struct bpf_jit_work *work;
-+#endif
- struct rcu_head rcu;
- struct sock_filter insns[0];
--};
-+} __randomize_layout;
-
- static inline unsigned int sk_filter_len(const struct sk_filter *fp)
- {
-@@ -228,6 +243,7 @@ enum {
- BPF_S_ANC_HATYPE,
- BPF_S_ANC_RXHASH,
- BPF_S_ANC_CPU,
-+ BPF_S_ANC_SECCOMP_LD_W,
- };
-
- #endif /* __KERNEL__ */
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index dd74385..1388ed4 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -658,7 +658,7 @@ struct address_space {
- spinlock_t private_lock; /* for use by the address_space */
- struct list_head private_list; /* ditto */
- struct address_space *assoc_mapping; /* ditto */
--} __attribute__((aligned(sizeof(long))));
-+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
- /*
- * On most architectures that alignment is already the case; but
- * must be enforced here for CRIS, to let the least significant bit
-@@ -699,7 +699,7 @@ struct block_device {
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
--};
-+} __randomize_layout;
-
- /*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
-@@ -843,7 +843,7 @@ struct inode {
- atomic_t i_readcount; /* struct files open RO */
- #endif
- void *i_private; /* fs or device private pointer */
--};
-+} __randomize_layout;
-
- static inline int inode_unhashed(struct inode *inode)
- {
-@@ -1017,7 +1017,7 @@ struct file {
- #ifdef CONFIG_DEBUG_WRITECOUNT
- unsigned long f_mnt_write_state;
- #endif
--};
-+} __randomize_layout;
-
- struct file_handle {
- __u32 handle_bytes;
-@@ -1162,7 +1162,7 @@ struct file_lock {
- int state; /* state of grant or error if -ve */
- } afs;
- } fl_u;
--};
-+} __randomize_layout;
-
- /* The following constant reflects the upper bound of the file/locking space */
- #ifndef OFFSET_MAX
-@@ -1497,7 +1497,7 @@ struct super_block {
- int cleancache_poolid;
-
- struct shrinker s_shrink; /* per-sb shrinker handle */
--};
-+} __randomize_layout;
-
- /* superblock cache pruning functions */
- extern void prune_icache_sb(struct super_block *sb, int nr_to_scan);
-@@ -1624,7 +1624,8 @@ struct file_operations {
- int (*setlease)(struct file *, long, struct file_lock **);
- long (*fallocate)(struct file *file, int mode, loff_t offset,
- loff_t len);
--};
-+} __do_const __randomize_layout;
-+typedef struct file_operations __no_const file_operations_no_const;
-
- struct inode_operations {
- struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
-@@ -1891,6 +1892,8 @@ struct file_system_type {
- struct lock_class_key i_mutex_dir_key;
- };
-
-+#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-+
- extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
- void *data, int (*fill_super)(struct super_block *, void *, int));
- extern struct dentry *mount_bdev(struct file_system_type *fs_type,
-@@ -2722,5 +2725,15 @@ static inline void inode_has_no_xattr(struct inode *inode)
- inode->i_flags |= S_NOSEC;
- }
-
-+static inline bool is_sidechannel_device(const struct inode *inode)
-+{
-+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
-+ umode_t mode = inode->i_mode;
-+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
-+#else
-+ return false;
-+#endif
-+}
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_FS_H */
-diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
-index 003dc0f..82513b5 100644
---- a/include/linux/fs_struct.h
-+++ b/include/linux/fs_struct.h
-@@ -6,13 +6,13 @@
- #include <linux/seqlock.h>
-
- struct fs_struct {
-- int users;
-+ atomic_t users;
- spinlock_t lock;
- seqcount_t seq;
- int umask;
- int in_exec;
- struct path root, pwd;
--};
-+} __randomize_layout;
-
- extern struct kmem_cache *fs_cachep;
-
-diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
-index ce31408..b1ad003 100644
---- a/include/linux/fscache-cache.h
-+++ b/include/linux/fscache-cache.h
-@@ -102,7 +102,7 @@ struct fscache_operation {
- fscache_operation_release_t release;
- };
-
--extern atomic_t fscache_op_debug_id;
-+extern atomic_unchecked_t fscache_op_debug_id;
- extern void fscache_op_work_func(struct work_struct *work);
-
- extern void fscache_enqueue_operation(struct fscache_operation *);
-@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
- {
- INIT_WORK(&op->work, fscache_op_work_func);
- atomic_set(&op->usage, 1);
-- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
-+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
- op->processor = processor;
- op->release = release;
- INIT_LIST_HEAD(&op->pend_link);
-diff --git a/include/linux/fscache.h b/include/linux/fscache.h
-index 9ec20de..8007b8a 100644
---- a/include/linux/fscache.h
-+++ b/include/linux/fscache.h
-@@ -152,7 +152,7 @@ struct fscache_cookie_def {
- * - this is mandatory for any object that may have data
- */
- void (*now_uncached)(void *cookie_netfs_data);
--};
-+} __do_const;
-
- /*
- * fscache cached network filesystem type
-diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
-index dad4f61..ce486de 100644
---- a/include/linux/fsnotify.h
-+++ b/include/linux/fsnotify.h
-@@ -196,6 +196,9 @@ static inline void fsnotify_access(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_ACCESS;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-@@ -214,6 +217,9 @@ static inline void fsnotify_modify(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_MODIFY;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-@@ -316,7 +322,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
- */
- static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
- {
-- return kstrdup(name, GFP_KERNEL);
-+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
- }
-
- /*
-diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
-index 82924bf..1aa58e7 100644
---- a/include/linux/ftrace_event.h
-+++ b/include/linux/ftrace_event.h
-@@ -256,7 +256,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
- extern int trace_add_event_call(struct ftrace_event_call *call);
- extern void trace_remove_event_call(struct ftrace_event_call *call);
-
--#define is_signed_type(type) (((type)(-1)) < 0)
-+#define is_signed_type(type) (((type)(-1)) < (type)1)
-
- int trace_set_clr_event(const char *system, const char *event, int set);
-
-diff --git a/include/linux/genhd.h b/include/linux/genhd.h
-index 4eec461..4ff5db5 100644
---- a/include/linux/genhd.h
-+++ b/include/linux/genhd.h
-@@ -185,7 +185,7 @@ struct gendisk {
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
-- atomic_t sync_io; /* RAID */
-+ atomic_unchecked_t sync_io; /* RAID */
- struct disk_events *ev;
- #ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity *integrity;
-@@ -420,7 +420,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
- extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
-
- /* drivers/char/random.c */
--extern void add_disk_randomness(struct gendisk *disk);
-+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
- extern void rand_initialize_disk(struct gendisk *disk);
-
- static inline sector_t get_start_sect(struct block_device *bdev)
-diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index 3a76faf..c0592c7 100644
---- a/include/linux/gfp.h
-+++ b/include/linux/gfp.h
-@@ -37,6 +37,12 @@ struct vm_area_struct;
- #define ___GFP_NO_KSWAPD 0x400000u
- #define ___GFP_OTHER_NODE 0x800000u
-
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+#define ___GFP_USERCOPY 0x1000000u
-+#else
-+#define ___GFP_USERCOPY 0
-+#endif
-+
- /*
- * GFP bitmasks..
- *
-@@ -85,6 +91,7 @@ struct vm_area_struct;
-
- #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
- #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
-+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
-
- /*
- * This may seem redundant, but it's a way of annotating false positives vs.
-@@ -92,7 +99,7 @@ struct vm_area_struct;
- */
- #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-
--#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
-+#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
- #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
- /* This equals 0, but use constants in case they ever change */
-@@ -146,6 +153,8 @@ struct vm_area_struct;
- /* 4GB DMA on some platforms */
- #define GFP_DMA32 __GFP_DMA32
-
-+#define GFP_USERCOPY __GFP_USERCOPY
-+
- /* Convert GFP flags to their corresponding migrate type */
- static inline int allocflags_to_migratetype(gfp_t gfp_flags)
- {
-diff --git a/include/linux/gracl.h b/include/linux/gracl.h
-new file mode 100644
-index 0000000..91858e4
---- /dev/null
-+++ b/include/linux/gracl.h
-@@ -0,0 +1,342 @@
-+#ifndef GR_ACL_H
-+#define GR_ACL_H
-+
-+#include <linux/grdefs.h>
-+#include <linux/resource.h>
-+#include <linux/capability.h>
-+#include <linux/dcache.h>
-+#include <asm/resource.h>
-+
-+/* Major status information */
-+
-+#define GR_VERSION "grsecurity 3.1"
-+#define GRSECURITY_VERSION 0x3100
-+
-+enum {
-+ GR_SHUTDOWN = 0,
-+ GR_ENABLE = 1,
-+ GR_SPROLE = 2,
-+ GR_OLDRELOAD = 3,
-+ GR_SEGVMOD = 4,
-+ GR_STATUS = 5,
-+ GR_UNSPROLE = 6,
-+ GR_PASSSET = 7,
-+ GR_SPROLEPAM = 8,
-+ GR_RELOAD = 9,
-+};
-+
-+/* Password setup definitions
-+ * kernel/grhash.c */
-+enum {
-+ GR_PW_LEN = 128,
-+ GR_SALT_LEN = 16,
-+ GR_SHA_LEN = 32,
-+};
-+
-+enum {
-+ GR_SPROLE_LEN = 64,
-+};
-+
-+enum {
-+ GR_NO_GLOB = 0,
-+ GR_REG_GLOB,
-+ GR_CREATE_GLOB
-+};
-+
-+#define GR_NLIMITS 32
-+
-+/* Begin Data Structures */
-+
-+struct sprole_pw {
-+ unsigned char *rolename;
-+ unsigned char salt[GR_SALT_LEN];
-+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
-+};
-+
-+struct name_entry {
-+ __u32 key;
-+ u64 inode;
-+ dev_t device;
-+ char *name;
-+ __u16 len;
-+ __u8 deleted;
-+ struct name_entry *prev;
-+ struct name_entry *next;
-+};
-+
-+struct inodev_entry {
-+ struct name_entry *nentry;
-+ struct inodev_entry *prev;
-+ struct inodev_entry *next;
-+};
-+
-+struct acl_role_db {
-+ struct acl_role_label **r_hash;
-+ __u32 r_size;
-+};
-+
-+struct inodev_db {
-+ struct inodev_entry **i_hash;
-+ __u32 i_size;
-+};
-+
-+struct name_db {
-+ struct name_entry **n_hash;
-+ __u32 n_size;
-+};
-+
-+struct crash_uid {
-+ uid_t uid;
-+ unsigned long expires;
-+};
-+
-+struct gr_hash_struct {
-+ void **table;
-+ void **nametable;
-+ void *first;
-+ __u32 table_size;
-+ __u32 used_size;
-+ int type;
-+};
-+
-+/* Userspace Grsecurity ACL data structures */
-+
-+struct acl_subject_label {
-+ char *filename;
-+ u64 inode;
-+ dev_t device;
-+ __u32 mode;
-+ kernel_cap_t cap_mask;
-+ kernel_cap_t cap_lower;
-+ kernel_cap_t cap_invert_audit;
-+
-+ struct rlimit res[GR_NLIMITS];
-+ __u32 resmask;
-+
-+ __u8 user_trans_type;
-+ __u8 group_trans_type;
-+ uid_t *user_transitions;
-+ gid_t *group_transitions;
-+ __u16 user_trans_num;
-+ __u16 group_trans_num;
-+
-+ __u32 sock_families[2];
-+ __u32 ip_proto[8];
-+ __u32 ip_type;
-+ struct acl_ip_label **ips;
-+ __u32 ip_num;
-+ __u32 inaddr_any_override;
-+
-+ __u32 crashes;
-+ unsigned long expires;
-+
-+ struct acl_subject_label *parent_subject;
-+ struct gr_hash_struct *hash;
-+ struct acl_subject_label *prev;
-+ struct acl_subject_label *next;
-+
-+ struct acl_object_label **obj_hash;
-+ __u32 obj_hash_size;
-+ __u16 pax_flags;
-+};
-+
-+struct role_allowed_ip {
-+ __u32 addr;
-+ __u32 netmask;
-+
-+ struct role_allowed_ip *prev;
-+ struct role_allowed_ip *next;
-+};
-+
-+struct role_transition {
-+ char *rolename;
-+
-+ struct role_transition *prev;
-+ struct role_transition *next;
-+};
-+
-+struct acl_role_label {
-+ char *rolename;
-+ uid_t uidgid;
-+ __u16 roletype;
-+
-+ __u16 auth_attempts;
-+ unsigned long expires;
-+
-+ struct acl_subject_label *root_label;
-+ struct gr_hash_struct *hash;
-+
-+ struct acl_role_label *prev;
-+ struct acl_role_label *next;
-+
-+ struct role_transition *transitions;
-+ struct role_allowed_ip *allowed_ips;
-+ uid_t *domain_children;
-+ __u16 domain_child_num;
-+
-+ umode_t umask;
-+
-+ struct acl_subject_label **subj_hash;
-+ __u32 subj_hash_size;
-+};
-+
-+struct user_acl_role_db {
-+ struct acl_role_label **r_table;
-+ __u32 num_pointers; /* Number of allocations to track */
-+ __u32 num_roles; /* Number of roles */
-+ __u32 num_domain_children; /* Number of domain children */
-+ __u32 num_subjects; /* Number of subjects */
-+ __u32 num_objects; /* Number of objects */
-+};
-+
-+struct acl_object_label {
-+ char *filename;
-+ u64 inode;
-+ dev_t device;
-+ __u32 mode;
-+
-+ struct acl_subject_label *nested;
-+ struct acl_object_label *globbed;
-+
-+ /* next two structures not used */
-+
-+ struct acl_object_label *prev;
-+ struct acl_object_label *next;
-+};
-+
-+struct acl_ip_label {
-+ char *iface;
-+ __u32 addr;
-+ __u32 netmask;
-+ __u16 low, high;
-+ __u8 mode;
-+ __u32 type;
-+ __u32 proto[8];
-+
-+ /* next two structures not used */
-+
-+ struct acl_ip_label *prev;
-+ struct acl_ip_label *next;
-+};
-+
-+struct gr_arg {
-+ struct user_acl_role_db role_db;
-+ unsigned char pw[GR_PW_LEN];
-+ unsigned char salt[GR_SALT_LEN];
-+ unsigned char sum[GR_SHA_LEN];
-+ unsigned char sp_role[GR_SPROLE_LEN];
-+ struct sprole_pw *sprole_pws;
-+ dev_t segv_device;
-+ u64 segv_inode;
-+ uid_t segv_uid;
-+ __u16 num_sprole_pws;
-+ __u16 mode;
-+};
-+
-+struct gr_arg_wrapper {
-+ struct gr_arg *arg;
-+ __u32 version;
-+ __u32 size;
-+};
-+
-+struct subject_map {
-+ struct acl_subject_label *user;
-+ struct acl_subject_label *kernel;
-+ struct subject_map *prev;
-+ struct subject_map *next;
-+};
-+
-+struct acl_subj_map_db {
-+ struct subject_map **s_hash;
-+ __u32 s_size;
-+};
-+
-+struct gr_policy_state {
-+ struct sprole_pw **acl_special_roles;
-+ __u16 num_sprole_pws;
-+ struct acl_role_label *kernel_role;
-+ struct acl_role_label *role_list;
-+ struct acl_role_label *default_role;
-+ struct acl_role_db acl_role_set;
-+ struct acl_subj_map_db subj_map_set;
-+ struct name_db name_set;
-+ struct inodev_db inodev_set;
-+};
-+
-+struct gr_alloc_state {
-+ unsigned long alloc_stack_next;
-+ unsigned long alloc_stack_size;
-+ void **alloc_stack;
-+};
-+
-+struct gr_reload_state {
-+ struct gr_policy_state oldpolicy;
-+ struct gr_alloc_state oldalloc;
-+ struct gr_policy_state newpolicy;
-+ struct gr_alloc_state newalloc;
-+ struct gr_policy_state *oldpolicy_ptr;
-+ struct gr_alloc_state *oldalloc_ptr;
-+ unsigned char oldmode;
-+};
-+
-+/* End Data Structures Section */
-+
-+/* Hash functions generated by empirical testing by Brad Spengler
-+ Makes good use of the low bits of the inode. Generally 0-1 times
-+ in loop for successful match. 0-3 for unsuccessful match.
-+ Shift/add algorithm with modulus of table size and an XOR*/
-+
-+static __inline__ unsigned int
-+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
-+{
-+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
-+}
-+
-+ static __inline__ unsigned int
-+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
-+{
-+ return ((const unsigned long)userp % sz);
-+}
-+
-+static __inline__ unsigned int
-+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
-+{
-+ unsigned int rem;
-+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
-+ return rem;
-+}
-+
-+static __inline__ unsigned int
-+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
-+{
-+ return full_name_hash((const unsigned char *)name, len) % sz;
-+}
-+
-+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
-+ subj = NULL; \
-+ iter = 0; \
-+ while (iter < role->subj_hash_size) { \
-+ if (subj == NULL) \
-+ subj = role->subj_hash[iter]; \
-+ if (subj == NULL) { \
-+ iter++; \
-+ continue; \
-+ }
-+
-+#define FOR_EACH_SUBJECT_END(subj,iter) \
-+ subj = subj->next; \
-+ if (subj == NULL) \
-+ iter++; \
-+ }
-+
-+
-+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
-+ subj = role->hash->first; \
-+ while (subj != NULL) {
-+
-+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
-+ subj = subj->next; \
-+ }
-+
-+#endif
-+
-diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
-new file mode 100644
-index 0000000..af64092
---- /dev/null
-+++ b/include/linux/gracl_compat.h
-@@ -0,0 +1,156 @@
-+#ifndef GR_ACL_COMPAT_H
-+#define GR_ACL_COMPAT_H
-+
-+#include <linux/resource.h>
-+#include <asm/resource.h>
-+
-+struct sprole_pw_compat {
-+ compat_uptr_t rolename;
-+ unsigned char salt[GR_SALT_LEN];
-+ unsigned char sum[GR_SHA_LEN];
-+};
-+
-+struct gr_hash_struct_compat {
-+ compat_uptr_t table;
-+ compat_uptr_t nametable;
-+ compat_uptr_t first;
-+ __u32 table_size;
-+ __u32 used_size;
-+ int type;
-+};
-+
-+struct acl_subject_label_compat {
-+ compat_uptr_t filename;
-+ compat_u64 inode;
-+ __u32 device;
-+ __u32 mode;
-+ kernel_cap_t cap_mask;
-+ kernel_cap_t cap_lower;
-+ kernel_cap_t cap_invert_audit;
-+
-+ struct compat_rlimit res[GR_NLIMITS];
-+ __u32 resmask;
-+
-+ __u8 user_trans_type;
-+ __u8 group_trans_type;
-+ compat_uptr_t user_transitions;
-+ compat_uptr_t group_transitions;
-+ __u16 user_trans_num;
-+ __u16 group_trans_num;
-+
-+ __u32 sock_families[2];
-+ __u32 ip_proto[8];
-+ __u32 ip_type;
-+ compat_uptr_t ips;
-+ __u32 ip_num;
-+ __u32 inaddr_any_override;
-+
-+ __u32 crashes;
-+ compat_ulong_t expires;
-+
-+ compat_uptr_t parent_subject;
-+ compat_uptr_t hash;
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+
-+ compat_uptr_t obj_hash;
-+ __u32 obj_hash_size;
-+ __u16 pax_flags;
-+};
-+
-+struct role_allowed_ip_compat {
-+ __u32 addr;
-+ __u32 netmask;
-+
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+};
-+
-+struct role_transition_compat {
-+ compat_uptr_t rolename;
-+
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+};
-+
-+struct acl_role_label_compat {
-+ compat_uptr_t rolename;
-+ uid_t uidgid;
-+ __u16 roletype;
-+
-+ __u16 auth_attempts;
-+ compat_ulong_t expires;
-+
-+ compat_uptr_t root_label;
-+ compat_uptr_t hash;
-+
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+
-+ compat_uptr_t transitions;
-+ compat_uptr_t allowed_ips;
-+ compat_uptr_t domain_children;
-+ __u16 domain_child_num;
-+
-+ umode_t umask;
-+
-+ compat_uptr_t subj_hash;
-+ __u32 subj_hash_size;
-+};
-+
-+struct user_acl_role_db_compat {
-+ compat_uptr_t r_table;
-+ __u32 num_pointers;
-+ __u32 num_roles;
-+ __u32 num_domain_children;
-+ __u32 num_subjects;
-+ __u32 num_objects;
-+};
-+
-+struct acl_object_label_compat {
-+ compat_uptr_t filename;
-+ compat_u64 inode;
-+ __u32 device;
-+ __u32 mode;
-+
-+ compat_uptr_t nested;
-+ compat_uptr_t globbed;
-+
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+};
-+
-+struct acl_ip_label_compat {
-+ compat_uptr_t iface;
-+ __u32 addr;
-+ __u32 netmask;
-+ __u16 low, high;
-+ __u8 mode;
-+ __u32 type;
-+ __u32 proto[8];
-+
-+ compat_uptr_t prev;
-+ compat_uptr_t next;
-+};
-+
-+struct gr_arg_compat {
-+ struct user_acl_role_db_compat role_db;
-+ unsigned char pw[GR_PW_LEN];
-+ unsigned char salt[GR_SALT_LEN];
-+ unsigned char sum[GR_SHA_LEN];
-+ unsigned char sp_role[GR_SPROLE_LEN];
-+ compat_uptr_t sprole_pws;
-+ __u32 segv_device;
-+ compat_u64 segv_inode;
-+ uid_t segv_uid;
-+ __u16 num_sprole_pws;
-+ __u16 mode;
-+};
-+
-+struct gr_arg_wrapper_compat {
-+ compat_uptr_t arg;
-+ __u32 version;
-+ __u32 size;
-+};
-+
-+#endif
-diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
-new file mode 100644
-index 0000000..323ecf2
---- /dev/null
-+++ b/include/linux/gralloc.h
-@@ -0,0 +1,9 @@
-+#ifndef __GRALLOC_H
-+#define __GRALLOC_H
-+
-+void acl_free_all(void);
-+int acl_alloc_stack_init(unsigned long size);
-+void *acl_alloc(unsigned long len);
-+void *acl_alloc_num(unsigned long num, unsigned long len);
-+
-+#endif
-diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
-new file mode 100644
-index 0000000..be66033
---- /dev/null
-+++ b/include/linux/grdefs.h
-@@ -0,0 +1,140 @@
-+#ifndef GRDEFS_H
-+#define GRDEFS_H
-+
-+/* Begin grsecurity status declarations */
-+
-+enum {
-+ GR_READY = 0x01,
-+ GR_STATUS_INIT = 0x00 // disabled state
-+};
-+
-+/* Begin ACL declarations */
-+
-+/* Role flags */
-+
-+enum {
-+ GR_ROLE_USER = 0x0001,
-+ GR_ROLE_GROUP = 0x0002,
-+ GR_ROLE_DEFAULT = 0x0004,
-+ GR_ROLE_SPECIAL = 0x0008,
-+ GR_ROLE_AUTH = 0x0010,
-+ GR_ROLE_NOPW = 0x0020,
-+ GR_ROLE_GOD = 0x0040,
-+ GR_ROLE_LEARN = 0x0080,
-+ GR_ROLE_TPE = 0x0100,
-+ GR_ROLE_DOMAIN = 0x0200,
-+ GR_ROLE_PAM = 0x0400,
-+ GR_ROLE_PERSIST = 0x0800
-+};
-+
-+/* ACL Subject and Object mode flags */
-+enum {
-+ GR_DELETED = 0x80000000
-+};
-+
-+/* ACL Object-only mode flags */
-+enum {
-+ GR_READ = 0x00000001,
-+ GR_APPEND = 0x00000002,
-+ GR_WRITE = 0x00000004,
-+ GR_EXEC = 0x00000008,
-+ GR_FIND = 0x00000010,
-+ GR_INHERIT = 0x00000020,
-+ GR_SETID = 0x00000040,
-+ GR_CREATE = 0x00000080,
-+ GR_DELETE = 0x00000100,
-+ GR_LINK = 0x00000200,
-+ GR_AUDIT_READ = 0x00000400,
-+ GR_AUDIT_APPEND = 0x00000800,
-+ GR_AUDIT_WRITE = 0x00001000,
-+ GR_AUDIT_EXEC = 0x00002000,
-+ GR_AUDIT_FIND = 0x00004000,
-+ GR_AUDIT_INHERIT= 0x00008000,
-+ GR_AUDIT_SETID = 0x00010000,
-+ GR_AUDIT_CREATE = 0x00020000,
-+ GR_AUDIT_DELETE = 0x00040000,
-+ GR_AUDIT_LINK = 0x00080000,
-+ GR_PTRACERD = 0x00100000,
-+ GR_NOPTRACE = 0x00200000,
-+ GR_SUPPRESS = 0x00400000,
-+ GR_NOLEARN = 0x00800000,
-+ GR_INIT_TRANSFER= 0x01000000
-+};
-+
-+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
-+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
-+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
-+
-+/* ACL subject-only mode flags */
-+enum {
-+ GR_KILL = 0x00000001,
-+ GR_VIEW = 0x00000002,
-+ GR_PROTECTED = 0x00000004,
-+ GR_LEARN = 0x00000008,
-+ GR_OVERRIDE = 0x00000010,
-+ /* just a placeholder, this mode is only used in userspace */
-+ GR_DUMMY = 0x00000020,
-+ GR_PROTSHM = 0x00000040,
-+ GR_KILLPROC = 0x00000080,
-+ GR_KILLIPPROC = 0x00000100,
-+ /* just a placeholder, this mode is only used in userspace */
-+ GR_NOTROJAN = 0x00000200,
-+ GR_PROTPROCFD = 0x00000400,
-+ GR_PROCACCT = 0x00000800,
-+ GR_RELAXPTRACE = 0x00001000,
-+ //GR_NESTED = 0x00002000,
-+ GR_INHERITLEARN = 0x00004000,
-+ GR_PROCFIND = 0x00008000,
-+ GR_POVERRIDE = 0x00010000,
-+ GR_KERNELAUTH = 0x00020000,
-+ GR_ATSECURE = 0x00040000,
-+ GR_SHMEXEC = 0x00080000
-+};
-+
-+enum {
-+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
-+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
-+ GR_PAX_ENABLE_MPROTECT = 0x0004,
-+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
-+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
-+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
-+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
-+ GR_PAX_DISABLE_MPROTECT = 0x0400,
-+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
-+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
-+};
-+
-+enum {
-+ GR_ID_USER = 0x01,
-+ GR_ID_GROUP = 0x02,
-+};
-+
-+enum {
-+ GR_ID_ALLOW = 0x01,
-+ GR_ID_DENY = 0x02,
-+};
-+
-+#define GR_CRASH_RES 31
-+#define GR_UIDTABLE_MAX 500
-+
-+/* begin resource learning section */
-+enum {
-+ GR_RLIM_CPU_BUMP = 60,
-+ GR_RLIM_FSIZE_BUMP = 50000,
-+ GR_RLIM_DATA_BUMP = 10000,
-+ GR_RLIM_STACK_BUMP = 1000,
-+ GR_RLIM_CORE_BUMP = 10000,
-+ GR_RLIM_RSS_BUMP = 500000,
-+ GR_RLIM_NPROC_BUMP = 1,
-+ GR_RLIM_NOFILE_BUMP = 5,
-+ GR_RLIM_MEMLOCK_BUMP = 50000,
-+ GR_RLIM_AS_BUMP = 500000,
-+ GR_RLIM_LOCKS_BUMP = 2,
-+ GR_RLIM_SIGPENDING_BUMP = 5,
-+ GR_RLIM_MSGQUEUE_BUMP = 10000,
-+ GR_RLIM_NICE_BUMP = 1,
-+ GR_RLIM_RTPRIO_BUMP = 1,
-+ GR_RLIM_RTTIME_BUMP = 1000000
-+};
-+
-+#endif
-diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
-new file mode 100644
-index 0000000..248956b
---- /dev/null
-+++ b/include/linux/grinternal.h
-@@ -0,0 +1,238 @@
-+#ifndef __GRINTERNAL_H
-+#define __GRINTERNAL_H
-+
-+#ifdef CONFIG_GRKERNSEC
-+
-+#include <linux/fs.h>
-+#include <linux/mnt_namespace.h>
-+#include <linux/nsproxy.h>
-+#include <linux/gracl.h>
-+#include <linux/grdefs.h>
-+#include <linux/grmsg.h>
-+
-+void gr_add_learn_entry(const char *fmt, ...)
-+ __attribute__ ((format (printf, 1, 2)));
-+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
-+ const struct vfsmount *mnt);
-+__u32 gr_check_create(const struct dentry *new_dentry,
-+ const struct dentry *parent,
-+ const struct vfsmount *mnt, const __u32 mode);
-+int gr_check_protected_task(const struct task_struct *task);
-+__u32 to_gr_audit(const __u32 reqmode);
-+int gr_set_acls(const int type);
-+int gr_acl_is_enabled(void);
-+char gr_roletype_to_char(void);
-+
-+void gr_handle_alertkill(struct task_struct *task);
-+char *gr_to_filename(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+char *gr_to_filename1(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+char *gr_to_filename2(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+char *gr_to_filename3(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+
-+extern int grsec_enable_ptrace_readexec;
-+extern int grsec_enable_harden_ptrace;
-+extern int grsec_enable_link;
-+extern int grsec_enable_fifo;
-+extern int grsec_enable_execve;
-+extern int grsec_enable_shm;
-+extern int grsec_enable_execlog;
-+extern int grsec_enable_signal;
-+extern int grsec_enable_audit_ptrace;
-+extern int grsec_enable_forkfail;
-+extern int grsec_enable_time;
-+extern int grsec_enable_rofs;
-+extern int grsec_deny_new_usb;
-+extern int grsec_enable_chroot_shmat;
-+extern int grsec_enable_chroot_mount;
-+extern int grsec_enable_chroot_double;
-+extern int grsec_enable_chroot_pivot;
-+extern int grsec_enable_chroot_chdir;
-+extern int grsec_enable_chroot_chmod;
-+extern int grsec_enable_chroot_mknod;
-+extern int grsec_enable_chroot_fchdir;
-+extern int grsec_enable_chroot_nice;
-+extern int grsec_enable_chroot_execlog;
-+extern int grsec_enable_chroot_caps;
-+extern int grsec_enable_chroot_rename;
-+extern int grsec_enable_chroot_sysctl;
-+extern int grsec_enable_chroot_unix;
-+extern int grsec_enable_symlinkown;
-+extern int grsec_symlinkown_gid;
-+extern int grsec_enable_tpe;
-+extern int grsec_tpe_gid;
-+extern int grsec_enable_tpe_all;
-+extern int grsec_enable_tpe_invert;
-+extern int grsec_enable_socket_all;
-+extern int grsec_socket_all_gid;
-+extern int grsec_enable_socket_client;
-+extern int grsec_socket_client_gid;
-+extern int grsec_enable_socket_server;
-+extern int grsec_socket_server_gid;
-+extern int grsec_audit_gid;
-+extern int grsec_enable_group;
-+extern int grsec_enable_log_rwxmaps;
-+extern int grsec_enable_mount;
-+extern int grsec_enable_chdir;
-+extern int grsec_resource_logging;
-+extern int grsec_enable_blackhole;
-+extern int grsec_lastack_retries;
-+extern int grsec_enable_brute;
-+extern int grsec_enable_harden_ipc;
-+extern int grsec_lock;
-+
-+extern spinlock_t grsec_alert_lock;
-+extern unsigned long grsec_alert_wtime;
-+extern unsigned long grsec_alert_fyet;
-+
-+extern spinlock_t grsec_audit_lock;
-+
-+extern rwlock_t grsec_exec_file_lock;
-+
-+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
-+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
-+ (tsk)->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
-+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
-+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
-+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
-+ (tsk)->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
-+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
-+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
-+
-+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
-+
-+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
-+
-+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
-+ (task)->pid, (cred)->uid, \
-+ (cred)->euid, (cred)->gid, (cred)->egid, \
-+ gr_parent_task_fullpath(task), \
-+ (task)->real_parent->comm, (task)->real_parent->pid, \
-+ (pcred)->uid, (pcred)->euid, \
-+ (pcred)->gid, (pcred)->egid
-+
-+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
-+{
-+ if (file1 && file2) {
-+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
-+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
-+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+#define GR_CHROOT_CAPS {{ \
-+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
-+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
-+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
-+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
-+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
-+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
-+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
-+
-+#define security_learn(normal_msg,args...) \
-+({ \
-+ read_lock(&grsec_exec_file_lock); \
-+ gr_add_learn_entry(normal_msg "\n", ## args); \
-+ read_unlock(&grsec_exec_file_lock); \
-+})
-+
-+enum {
-+ GR_DO_AUDIT,
-+ GR_DONT_AUDIT,
-+ /* used for non-audit messages that we shouldn't kill the task on */
-+ GR_DONT_AUDIT_GOOD
-+};
-+
-+enum {
-+ GR_TTYSNIFF,
-+ GR_RBAC,
-+ GR_RBAC_STR,
-+ GR_STR_RBAC,
-+ GR_RBAC_MODE2,
-+ GR_RBAC_MODE3,
-+ GR_FILENAME,
-+ GR_SYSCTL_HIDDEN,
-+ GR_NOARGS,
-+ GR_ONE_INT,
-+ GR_ONE_INT_TWO_STR,
-+ GR_ONE_STR,
-+ GR_STR_INT,
-+ GR_TWO_STR_INT,
-+ GR_TWO_INT,
-+ GR_TWO_U64,
-+ GR_THREE_INT,
-+ GR_FIVE_INT_TWO_STR,
-+ GR_TWO_STR,
-+ GR_THREE_STR,
-+ GR_FOUR_STR,
-+ GR_STR_FILENAME,
-+ GR_FILENAME_STR,
-+ GR_FILENAME_TWO_INT,
-+ GR_FILENAME_TWO_INT_STR,
-+ GR_TEXTREL,
-+ GR_PTRACE,
-+ GR_RESOURCE,
-+ GR_CAP,
-+ GR_SIG,
-+ GR_SIG2,
-+ GR_CRASH1,
-+ GR_CRASH2,
-+ GR_PSACCT,
-+ GR_RWXMAP,
-+ GR_RWXMAPVMA
-+};
-+
-+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
-+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
-+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
-+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
-+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
-+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
-+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
-+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
-+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
-+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
-+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
-+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
-+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
-+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
-+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
-+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
-+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
-+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
-+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
-+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
-+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
-+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
-+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
-+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
-+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
-+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
-+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
-+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
-+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
-+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
-+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
-+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
-+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
-+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
-+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
-+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
-+
-+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
-+
-+#endif
-+
-+#endif
-diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
-new file mode 100644
-index 0000000..26ef560
---- /dev/null
-+++ b/include/linux/grmsg.h
-@@ -0,0 +1,118 @@
-+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
-+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
-+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
-+#define GR_STOPMOD_MSG "denied modification of module state by "
-+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
-+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
-+#define GR_IOPERM_MSG "denied use of ioperm() by "
-+#define GR_IOPL_MSG "denied use of iopl() by "
-+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
-+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
-+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
-+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
-+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
-+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
-+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
-+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
-+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
-+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
-+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
-+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
-+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
-+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
-+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
-+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
-+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
-+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
-+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
-+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
-+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
-+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
-+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
-+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
-+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
-+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
-+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
-+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
-+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
-+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
-+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
-+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
-+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
-+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
-+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
-+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
-+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
-+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
-+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
-+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
-+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
-+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
-+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
-+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
-+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
-+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
-+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
-+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
-+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
-+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
-+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
-+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
-+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
-+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
-+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
-+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
-+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
-+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
-+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
-+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
-+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
-+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
-+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
-+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
-+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
-+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
-+#define GR_FAILFORK_MSG "failed fork with errno %s by "
-+#define GR_NICE_CHROOT_MSG "denied priority change by "
-+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
-+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
-+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
-+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
-+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
-+#define GR_TIME_MSG "time set by "
-+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
-+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
-+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
-+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
-+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
-+#define GR_BIND_MSG "denied bind() by "
-+#define GR_CONNECT_MSG "denied connect() by "
-+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
-+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
-+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
-+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
-+#define GR_CAP_ACL_MSG "use of %s denied for "
-+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
-+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
-+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
-+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
-+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
-+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
-+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
-+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
-+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
-+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
-+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
-+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
-+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
-+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
-+#define GR_VM86_MSG "denied use of vm86 by "
-+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
-+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
-+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
-+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
-+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
-+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
-+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
-+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
-+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
-diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
-new file mode 100644
-index 0000000..a9066b5
---- /dev/null
-+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,239 @@
-+#ifndef GR_SECURITY_H
-+#define GR_SECURITY_H
-+#include <linux/fs.h>
-+#include <linux/fs_struct.h>
-+#include <linux/binfmts.h>
-+#include <linux/gracl.h>
-+
-+/* notify of brain-dead configs */
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
-+#endif
-+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
-+#endif
-+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
-+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
-+#endif
-+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
-+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
-+#endif
-+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
-+#error "CONFIG_PAX enabled, but no PaX options are enabled."
-+#endif
-+
-+int gr_handle_new_usb(void);
-+
-+void gr_handle_brute_attach(unsigned long mm_flags);
-+void gr_handle_brute_check(void);
-+void gr_handle_kernel_exploit(void);
-+
-+char gr_roletype_to_char(void);
-+
-+int gr_acl_enable_at_secure(void);
-+
-+int gr_check_user_change(int real, int effective, int fs);
-+int gr_check_group_change(int real, int effective, int fs);
-+
-+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
-+
-+void gr_del_task_from_ip_table(struct task_struct *p);
-+
-+int gr_pid_is_chrooted(struct task_struct *p);
-+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
-+int gr_handle_chroot_nice(void);
-+int gr_handle_chroot_sysctl(const int op);
-+int gr_handle_chroot_setpriority(struct task_struct *p,
-+ const int niceval);
-+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
-+int gr_chroot_fhandle(void);
-+int gr_handle_chroot_chroot(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_handle_chroot_chdir(struct path *path);
-+int gr_handle_chroot_chmod(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const int mode);
-+int gr_handle_chroot_mknod(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const int mode);
-+int gr_handle_chroot_mount(const struct dentry *dentry,
-+ const struct vfsmount *mnt,
-+ const char *dev_name);
-+int gr_handle_chroot_pivot(void);
-+int gr_handle_chroot_unix(const pid_t pid);
-+
-+int gr_handle_rawio(const struct inode *inode);
-+
-+void gr_handle_ioperm(void);
-+void gr_handle_iopl(void);
-+void gr_handle_msr_write(void);
-+
-+umode_t gr_acl_umask(void);
-+
-+int gr_tpe_allow(const struct file *file);
-+
-+int gr_proc_is_restricted(void);
-+
-+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
-+void gr_clear_chroot_entries(struct task_struct *task);
-+
-+void gr_log_forkfail(const int retval);
-+void gr_log_timechange(void);
-+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
-+void gr_log_chdir(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_log_chroot_exec(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_log_remount(const char *devname, const int retval);
-+void gr_log_unmount(const char *devname, const int retval);
-+void gr_log_mount(const char *from, const char *to, const int retval);
-+void gr_log_textrel(struct vm_area_struct *vma);
-+void gr_log_ptgnustack(struct file *file);
-+void gr_log_rwxmmap(struct file *file);
-+void gr_log_rwxmprotect(struct vm_area_struct *vma);
-+
-+int gr_handle_follow_link(const struct inode *parent,
-+ const struct inode *inode,
-+ const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+int gr_handle_fifo(const struct dentry *dentry,
-+ const struct vfsmount *mnt,
-+ const struct dentry *dir, const int flag,
-+ const int acc_mode);
-+int gr_handle_hardlink(const struct dentry *dentry,
-+ const struct vfsmount *mnt,
-+ struct inode *inode,
-+ const int mode, const char *to);
-+
-+int gr_is_capable(const int cap);
-+int gr_is_capable_nolog(const int cap);
-+void gr_learn_resource(const struct task_struct *task, const int limit,
-+ const unsigned long wanted, const int gt);
-+void gr_copy_label(struct task_struct *tsk);
-+void gr_handle_crash(struct task_struct *task, const int sig);
-+int gr_handle_signal(const struct task_struct *p, const int sig);
-+int gr_check_crash_uid(const uid_t uid);
-+int gr_check_protected_task(const struct task_struct *task);
-+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
-+int gr_acl_handle_mmap(const struct file *file,
-+ const unsigned long prot);
-+int gr_acl_handle_mprotect(const struct file *file,
-+ const unsigned long prot);
-+int gr_check_hidden_task(const struct task_struct *tsk);
-+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_utime(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_access(const struct dentry *dentry,
-+ const struct vfsmount *mnt, const int fmode);
-+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
-+ const struct vfsmount *mnt, umode_t *mode);
-+__u32 gr_acl_handle_chown(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+int gr_handle_ptrace(struct task_struct *task, const long request);
-+int gr_handle_proc_ptrace(struct task_struct *task);
-+__u32 gr_acl_handle_execve(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+int gr_check_crash_exec(const struct file *filp);
-+int gr_acl_is_enabled(void);
-+void gr_set_role_label(struct task_struct *task, const uid_t uid,
-+ const gid_t gid);
-+int gr_set_proc_label(const struct dentry *dentry,
-+ const struct vfsmount *mnt,
-+ const int unsafe_flags);
-+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_open(const struct dentry *dentry,
-+ const struct vfsmount *mnt, int acc_mode);
-+__u32 gr_acl_handle_creat(const struct dentry *dentry,
-+ const struct dentry *p_dentry,
-+ const struct vfsmount *p_mnt,
-+ int open_flags, int acc_mode, const int imode);
-+void gr_handle_create(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_handle_proc_create(const struct dentry *dentry,
-+ const struct inode *inode);
-+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ const int mode);
-+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt);
-+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_handle_delete(const u64 ino, const dev_t dev);
-+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ const char *from);
-+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ const struct dentry *old_dentry,
-+ const struct vfsmount *old_mnt, const char *to);
-+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
-+int gr_acl_handle_rename(struct dentry *new_dentry,
-+ struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ struct dentry *old_dentry,
-+ struct inode *old_parent_inode,
-+ struct vfsmount *old_mnt, const char *newname);
-+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+ struct dentry *old_dentry,
-+ struct dentry *new_dentry,
-+ struct vfsmount *mnt, const __u8 replace);
-+__u32 gr_check_link(const struct dentry *new_dentry,
-+ const struct dentry *parent_dentry,
-+ const struct vfsmount *parent_mnt,
-+ const struct dentry *old_dentry,
-+ const struct vfsmount *old_mnt);
-+int gr_acl_handle_filldir(const struct file *file, const char *name,
-+ const unsigned int namelen, const u64 ino);
-+
-+__u32 gr_acl_handle_unix(const struct dentry *dentry,
-+ const struct vfsmount *mnt);
-+void gr_acl_handle_exit(void);
-+void gr_acl_handle_psacct(struct task_struct *task, const long code);
-+int gr_acl_handle_procpidmem(const struct task_struct *task);
-+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
-+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
-+void gr_audit_ptrace(struct task_struct *task);
-+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
-+u64 gr_get_ino_from_dentry(struct dentry *dentry);
-+void gr_put_exec_file(struct task_struct *task);
-+
-+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
-+
-+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
-+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
-+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
-+ struct dentry *newdentry, struct vfsmount *newmnt);
-+
-+#ifdef CONFIG_GRKERNSEC
-+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
-+void gr_handle_vm86(void);
-+void gr_handle_mem_readwrite(u64 from, u64 to);
-+
-+void gr_log_badprocpid(const char *entry);
-+
-+extern int grsec_enable_dmesg;
-+extern int grsec_disable_privio;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+extern int grsec_proc_gid;
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+extern int grsec_enable_chroot_findtask;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern int grsec_enable_setxid;
-+#endif
-+#endif
-+
-+#endif
-diff --git a/include/linux/grsock.h b/include/linux/grsock.h
-new file mode 100644
-index 0000000..e7ffaaf
---- /dev/null
-+++ b/include/linux/grsock.h
-@@ -0,0 +1,19 @@
-+#ifndef __GRSOCK_H
-+#define __GRSOCK_H
-+
-+extern void gr_attach_curr_ip(const struct sock *sk);
-+extern int gr_handle_sock_all(const int family, const int type,
-+ const int protocol);
-+extern int gr_handle_sock_server(const struct sockaddr *sck);
-+extern int gr_handle_sock_server_other(const struct sock *sck);
-+extern int gr_handle_sock_client(const struct sockaddr *sck);
-+extern int gr_search_connect(struct socket * sock,
-+ struct sockaddr_in * addr);
-+extern int gr_search_bind(struct socket * sock,
-+ struct sockaddr_in * addr);
-+extern int gr_search_listen(struct socket * sock);
-+extern int gr_search_accept(struct socket * sock);
-+extern int gr_search_socket(const int domain, const int type,
-+ const int protocol);
-+
-+#endif
-diff --git a/include/linux/highmem.h b/include/linux/highmem.h
-index 52e9620..26c34b1 100644
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -192,6 +192,18 @@ static inline void clear_highpage(struct page *page)
- kunmap_atomic(kaddr, KM_USER0);
- }
-
-+static inline void sanitize_highpage(struct page *page)
-+{
-+ void *kaddr;
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
-+ clear_page(kaddr);
-+ kunmap_atomic(kaddr, KM_CLEARPAGE);
-+ local_irq_restore(flags);
-+}
-+
- static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
-diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
-index a90c09d..15f7933 100644
---- a/include/linux/hwmon-sysfs.h
-+++ b/include/linux/hwmon-sysfs.h
-@@ -23,7 +23,8 @@
- struct sensor_device_attribute{
- struct device_attribute dev_attr;
- int index;
--};
-+} __do_const;
-+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
- #define to_sensor_dev_attr(_dev_attr) \
- container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
-
-@@ -39,7 +40,7 @@ struct sensor_device_attribute_2 {
- struct device_attribute dev_attr;
- u8 index;
- u8 nr;
--};
-+} __do_const;
- #define to_sensor_dev_attr_2(_dev_attr) \
- container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
-
-diff --git a/include/linux/i2c.h b/include/linux/i2c.h
-index 07d103a..04ec65b 100644
---- a/include/linux/i2c.h
-+++ b/include/linux/i2c.h
-@@ -364,6 +364,7 @@ struct i2c_algorithm {
- /* To determine what the adapter supports */
- u32 (*functionality) (struct i2c_adapter *);
- };
-+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
-
- /*
- * i2c_adapter is the structure used to identify a physical i2c bus along
-diff --git a/include/linux/i2o.h b/include/linux/i2o.h
-index a6deef4..c56a7f2 100644
---- a/include/linux/i2o.h
-+++ b/include/linux/i2o.h
-@@ -564,7 +564,7 @@ struct i2o_controller {
- struct i2o_device *exec; /* Executive */
- #if BITS_PER_LONG == 64
- spinlock_t context_list_lock; /* lock for context_list */
-- atomic_t context_list_counter; /* needed for unique contexts */
-+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
- struct list_head context_list; /* list of context id's
- and pointers */
- #endif
-diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
-index 732c962..61c3f70 100644
---- a/include/linux/if_pppox.h
-+++ b/include/linux/if_pppox.h
-@@ -203,7 +203,7 @@ struct pppox_proto {
- int (*ioctl)(struct socket *sock, unsigned int cmd,
- unsigned long arg);
- struct module *owner;
--};
-+} __do_const;
-
- extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
- extern void unregister_pppox_proto(int proto_num);
-diff --git a/include/linux/init.h b/include/linux/init.h
-index 9146f39..536519a 100644
---- a/include/linux/init.h
-+++ b/include/linux/init.h
-@@ -38,9 +38,29 @@
- * Also note, that this data cannot be "const".
- */
-
-+#define add_init_latent_entropy __latent_entropy
-+
-+#ifdef CONFIG_HOTPLUG
-+#define add_devinit_latent_entropy
-+#else
-+#define add_devinit_latent_entropy __latent_entropy
-+#endif
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define add_cpuinit_latent_entropy
-+#else
-+#define add_cpuinit_latent_entropy __latent_entropy
-+#endif
-+
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+#define add_meminit_latent_entropy
-+#else
-+#define add_meminit_latent_entropy __latent_entropy
-+#endif
-+
- /* These are for everybody (although not all archs will actually
- discard it in modules) */
--#define __init __section(.init.text) __cold notrace
-+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
- #define __initdata __section(.init.data)
- #define __initconst __section(.init.rodata)
- #define __exitdata __section(.exit.data)
-@@ -82,7 +102,7 @@
- #define __exit __section(.exit.text) __exitused __cold notrace
-
- /* Used for HOTPLUG */
--#define __devinit __section(.devinit.text) __cold notrace
-+#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
- #define __devinitdata __section(.devinit.data)
- #define __devinitconst __section(.devinit.rodata)
- #define __devexit __section(.devexit.text) __exitused __cold notrace
-@@ -90,7 +110,7 @@
- #define __devexitconst __section(.devexit.rodata)
-
- /* Used for HOTPLUG_CPU */
--#define __cpuinit __section(.cpuinit.text) __cold notrace
-+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
- #define __cpuinitdata __section(.cpuinit.data)
- #define __cpuinitconst __section(.cpuinit.rodata)
- #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
-@@ -98,7 +118,7 @@
- #define __cpuexitconst __section(.cpuexit.rodata)
-
- /* Used for MEMORY_HOTPLUG */
--#define __meminit __section(.meminit.text) __cold notrace
-+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
- #define __meminitdata __section(.meminit.data)
- #define __meminitconst __section(.meminit.rodata)
- #define __memexit __section(.memexit.text) __exitused __cold notrace
-diff --git a/include/linux/init_task.h b/include/linux/init_task.h
-index cdde2b37..d782954 100644
---- a/include/linux/init_task.h
-+++ b/include/linux/init_task.h
-@@ -144,6 +144,12 @@ extern struct task_group root_task_group;
-
- #define INIT_TASK_COMM "swapper"
-
-+#ifdef CONFIG_X86
-+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
-+#else
-+#define INIT_TASK_THREAD_INFO
-+#endif
-+
- /*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
-@@ -183,6 +189,7 @@ extern struct task_group root_task_group;
- RCU_INIT_POINTER(.cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
-+ INIT_TASK_THREAD_INFO \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
-diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
-index a64b00e..2ef3855f 100644
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -441,7 +441,7 @@ enum
- /* map softirq index to softirq name. update 'softirq_to_name' in
- * kernel/softirq.c when adding a new softirq.
- */
--extern char *softirq_to_name[NR_SOFTIRQS];
-+extern const char * const softirq_to_name[NR_SOFTIRQS];
-
- /* softirq mask and active fields moved to irq_cpustat_t in
- * asm/hardirq.h to get better cache usage. KAO
-@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
-
- struct softirq_action
- {
-- void (*action)(struct softirq_action *);
--};
-+ void (*action)(void);
-+} __no_const;
-
- asmlinkage void do_softirq(void);
- asmlinkage void __do_softirq(void);
--extern void open_softirq(int nr, void (*action)(struct softirq_action *));
-+extern void open_softirq(int nr, void (*action)(void));
- extern void softirq_init(void);
- static inline void __raise_softirq_irqoff(unsigned int nr)
- {
-diff --git a/include/linux/ioport.h b/include/linux/ioport.h
-index 9d57a71..8d0f701 100644
---- a/include/linux/ioport.h
-+++ b/include/linux/ioport.h
-@@ -166,7 +166,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
- int adjust_resource(struct resource *res, resource_size_t start,
- resource_size_t size);
- resource_size_t resource_alignment(struct resource *res);
--static inline resource_size_t resource_size(const struct resource *res)
-+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
- {
- return res->end - res->start + 1;
- }
-diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
-index 497c6cc..0c785f8 100644
---- a/include/linux/ipc_namespace.h
-+++ b/include/linux/ipc_namespace.h
-@@ -65,7 +65,7 @@ struct ipc_namespace {
-
- /* user_ns which owns the ipc ns */
- struct user_namespace *user_ns;
--};
-+} __randomize_layout;
-
- extern struct ipc_namespace init_ipc_ns;
- extern atomic_t nr_ipc_ns;
-diff --git a/include/linux/irq.h b/include/linux/irq.h
-index bff29c5..7437762 100644
---- a/include/linux/irq.h
-+++ b/include/linux/irq.h
-@@ -328,7 +328,7 @@ struct irq_chip {
- #ifdef CONFIG_IRQ_RELEASE_METHOD
- void (*release)(unsigned int irq, void *dev_id);
- #endif
--};
-+} __do_const;
-
- /*
- * irq_chip specific flags
-diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
-index e2e1ab5..eef4751 100644
---- a/include/linux/irqdesc.h
-+++ b/include/linux/irqdesc.h
-@@ -41,7 +41,6 @@ struct module;
- */
- struct irq_desc {
- struct irq_data irq_data;
-- struct timer_rand_state *timer_rand_state;
- unsigned int __percpu *kstat_irqs;
- irq_flow_handler_t handle_irq;
- #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
-@@ -55,7 +54,7 @@ struct irq_desc {
- unsigned int irq_count; /* For detecting broken IRQs */
- unsigned long last_unhandled; /* Aging timer for unhandled count */
- unsigned int irqs_unhandled;
-- atomic_t threads_handled;
-+ atomic_unchecked_t threads_handled;
- int threads_handled_last;
- raw_spinlock_t lock;
- struct cpumask *percpu_enabled;
-diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
-index f4e8578..cbfc9fc 100644
---- a/include/linux/jiffies.h
-+++ b/include/linux/jiffies.h
-@@ -283,9 +283,9 @@ extern unsigned long preset_lpj;
- */
- extern unsigned int jiffies_to_msecs(const unsigned long j);
- extern unsigned int jiffies_to_usecs(const unsigned long j);
--extern unsigned long msecs_to_jiffies(const unsigned int m);
--extern unsigned long usecs_to_jiffies(const unsigned int u);
--extern unsigned long timespec_to_jiffies(const struct timespec *value);
-+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
-+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
-+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
- extern void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value);
- extern unsigned long timeval_to_jiffies(const struct timeval *value);
-diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
-index 3875719..4663bc3 100644
---- a/include/linux/kallsyms.h
-+++ b/include/linux/kallsyms.h
-@@ -15,7 +15,8 @@
-
- struct module;
-
--#ifdef CONFIG_KALLSYMS
-+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
-+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- /* Lookup the address for a symbol. Returns 0 if not found. */
- unsigned long kallsyms_lookup_name(const char *name);
-
-@@ -99,6 +100,20 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
- /* Stupid that this does nothing, but I didn't create this mess. */
- #define __print_symbol(fmt, addr)
- #endif /*CONFIG_KALLSYMS*/
-+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
-+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
-+extern unsigned long kallsyms_lookup_name(const char *name);
-+extern void __print_symbol(const char *fmt, unsigned long address);
-+extern int sprint_backtrace(char *buffer, unsigned long address);
-+extern int sprint_symbol(char *buffer, unsigned long address);
-+const char *kallsyms_lookup(unsigned long addr,
-+ unsigned long *symbolsize,
-+ unsigned long *offset,
-+ char **modname, char *namebuf);
-+extern int kallsyms_lookup_size_offset(unsigned long addr,
-+ unsigned long *symbolsize,
-+ unsigned long *offset);
-+#endif
-
- /* This macro allows us to keep printk typechecking */
- static __printf(1, 2)
-diff --git a/include/linux/kernel.h b/include/linux/kernel.h
-index dcf6a8b..a182533 100644
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -326,7 +326,8 @@ extern __printf(3, 0)
- int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
- extern __printf(2, 3)
- char *kasprintf(gfp_t gfp, const char *fmt, ...);
--extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-+extern __printf(2, 0)
-+char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-
- extern int sscanf(const char *, const char *, ...)
- __attribute__ ((format (scanf, 2, 3)));
-@@ -514,10 +515,10 @@ do { \
- __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
- } while (0)
-
--extern int
-+extern __printf(2, 0) int
- __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
-
--extern int
-+extern __printf(2, 0) int
- __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-
- extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
-@@ -534,7 +535,7 @@ trace_printk(const char *fmt, ...)
- {
- return 0;
- }
--static inline int
-+static __printf(1, 0) inline int
- ftrace_vprintk(const char *fmt, va_list ap)
- {
- return 0;
-@@ -698,24 +699,30 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
- * @condition: the condition which the compiler should know is false.
- *
- * If you have some code which relies on certain constants being equal, or
-- * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
-+ * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
- * detect if someone changes it.
- *
-- * The implementation uses gcc's reluctance to create a negative array, but
-- * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
-- * to inline functions). So as a fallback we use the optimizer; if it can't
-- * prove the condition is false, it will cause a link error on the undefined
-- * "__build_bug_on_failed". This error message can be harder to track down
-- * though, hence the two different methods.
-+ * The implementation uses gcc's reluctance to create a negative array, but gcc
-+ * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
-+ * inline functions). Luckily, in 4.3 they added the "error" function
-+ * attribute just for this type of case. Thus, we use a negative sized array
-+ * (should always create an error on gcc versions older than 4.4) and then call
-+ * an undefined function with the error attribute (should always create an
-+ * error on gcc 4.3 and later). If for some reason, neither creates a
-+ * compile-time error, we'll still have a link-time error, which is harder to
-+ * track down.
- */
- #ifndef __OPTIMIZE__
- #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
- #else
--extern int __build_bug_on_failed;
--#define BUILD_BUG_ON(condition) \
-- do { \
-- ((void)sizeof(char[1 - 2*!!(condition)])); \
-- if (condition) __build_bug_on_failed = 1; \
-+#define BUILD_BUG_ON(condition) \
-+ do { \
-+ bool __cond = !!(condition); \
-+ extern void __build_bug_on_failed(void) \
-+ __compiletime_error("BUILD_BUG_ON failed"); \
-+ if (__cond) \
-+ __build_bug_on_failed(); \
-+ __compiletime_error_fallback(__cond); \
- } while(0)
- #endif
-
-diff --git a/include/linux/key-type.h b/include/linux/key-type.h
-index 9efd081..19f989c 100644
---- a/include/linux/key-type.h
-+++ b/include/linux/key-type.h
-@@ -92,7 +92,7 @@ struct key_type {
-
- /* internal fields */
- struct list_head link; /* link in types list */
--};
-+} __do_const;
-
- extern struct key_type key_type_keyring;
-
-diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
-index c4d2fc1..ef36389 100644
---- a/include/linux/kgdb.h
-+++ b/include/linux/kgdb.h
-@@ -53,7 +53,7 @@ extern int kgdb_connected;
- extern int kgdb_io_module_registered;
-
- extern atomic_t kgdb_setting_breakpoint;
--extern atomic_t kgdb_cpu_doing_single_step;
-+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
-
- extern struct task_struct *kgdb_usethread;
- extern struct task_struct *kgdb_contthread;
-@@ -252,7 +252,7 @@ struct kgdb_arch {
- void (*disable_hw_break)(struct pt_regs *regs);
- void (*remove_all_hw_break)(void);
- void (*correct_hw_break)(void);
--};
-+} __do_const;
-
- /**
- * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
-@@ -277,11 +277,11 @@ struct kgdb_io {
- void (*pre_exception) (void);
- void (*post_exception) (void);
- int is_console;
--};
-+} __do_const;
-
- extern struct kgdb_arch arch_kgdb_ops;
-
--extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
-+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-
- extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
- extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
-diff --git a/include/linux/kmod.h b/include/linux/kmod.h
-index f8d4b27..8560882 100644
---- a/include/linux/kmod.h
-+++ b/include/linux/kmod.h
-@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
- * usually useless though. */
- extern __printf(2, 3)
- int __request_module(bool wait, const char *name, ...);
-+extern __printf(3, 4)
-+int ___request_module(bool wait, char *param_name, const char *name, ...);
- #define request_module(mod...) __request_module(true, mod)
- #define request_module_nowait(mod...) __request_module(false, mod)
- #define try_then_request_module(x, mod...) \
-@@ -60,6 +62,9 @@ struct subprocess_info {
- struct work_struct work;
- struct completion *complete;
- char *path;
-+#ifdef CONFIG_GRKERNSEC
-+ char *origpath;
-+#endif
- char **argv;
- char **envp;
- enum umh_wait wait;
-diff --git a/include/linux/kobject.h b/include/linux/kobject.h
-index 445f978..6b3fc2c 100644
---- a/include/linux/kobject.h
-+++ b/include/linux/kobject.h
-@@ -74,8 +74,9 @@ struct kobject {
-
- extern __printf(2, 3)
- int kobject_set_name(struct kobject *kobj, const char *name, ...);
--extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
-- va_list vargs);
-+extern __printf(2, 0)
-+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
-+ va_list vargs);
-
- static inline const char *kobject_name(const struct kobject *kobj)
- {
-@@ -111,7 +112,7 @@ struct kobj_type {
- struct attribute **default_attrs;
- const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
- const void *(*namespace)(struct kobject *kobj);
--};
-+} __do_const;
-
- struct kobj_uevent_env {
- char *envp[UEVENT_NUM_ENVP];
-@@ -134,6 +135,7 @@ struct kobj_attribute {
- ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count);
- };
-+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
-
- extern const struct sysfs_ops kobj_sysfs_ops;
-
-@@ -161,7 +163,7 @@ struct kset {
- spinlock_t list_lock;
- struct kobject kobj;
- const struct kset_uevent_ops *uevent_ops;
--};
-+} __randomize_layout;
-
- extern void kset_init(struct kset *kset);
- extern int __must_check kset_register(struct kset *kset);
-diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
-index f66b065..c2c29b4 100644
---- a/include/linux/kobject_ns.h
-+++ b/include/linux/kobject_ns.h
-@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
- const void *(*netlink_ns)(struct sock *sk);
- const void *(*initial_ns)(void);
- void (*drop_ns)(void *);
--};
-+} __do_const;
-
- int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
- int kobj_ns_type_registered(enum kobj_ns_type type);
-diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
-index f93d8c1..71244f6 100644
---- a/include/linux/kvm_host.h
-+++ b/include/linux/kvm_host.h
-@@ -307,7 +307,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
- void vcpu_load(struct kvm_vcpu *vcpu);
- void vcpu_put(struct kvm_vcpu *vcpu);
-
--int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
-+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
- void kvm_exit(void);
-
-@@ -453,7 +453,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg);
- int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-
--int kvm_arch_init(void *opaque);
-+int kvm_arch_init(const void *opaque);
- void kvm_arch_exit(void);
-
- int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-diff --git a/include/linux/libata.h b/include/linux/libata.h
-index 42ac6ad..703f223 100644
---- a/include/linux/libata.h
-+++ b/include/linux/libata.h
-@@ -915,7 +915,7 @@ struct ata_port_operations {
- * fields must be pointers.
- */
- const struct ata_port_operations *inherits;
--};
-+} __do_const;
-
- struct ata_port_info {
- unsigned long flags;
-diff --git a/include/linux/linkage.h b/include/linux/linkage.h
-index 3f46aed..dee75fe 100644
---- a/include/linux/linkage.h
-+++ b/include/linux/linkage.h
-@@ -15,6 +15,7 @@
- #endif
-
- #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
-+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
- #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
-
- /*
-diff --git a/include/linux/list.h b/include/linux/list.h
-index cc6d2aa..c10ee83 100644
---- a/include/linux/list.h
-+++ b/include/linux/list.h
-@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
- extern void list_del(struct list_head *entry);
- #endif
-
-+extern void __pax_list_add(struct list_head *new,
-+ struct list_head *prev,
-+ struct list_head *next);
-+static inline void pax_list_add(struct list_head *new, struct list_head *head)
-+{
-+ __pax_list_add(new, head, head->next);
-+}
-+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
-+{
-+ __pax_list_add(new, head->prev, head);
-+}
-+extern void pax_list_del(struct list_head *entry);
-+
- /**
- * list_replace - replace old entry by new one
- * @old : the element to be replaced
-@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
- INIT_LIST_HEAD(entry);
- }
-
-+extern void pax_list_del_init(struct list_head *entry);
-+
- /**
- * list_move - delete from one list and add as another's head
- * @list: the entry to move
-diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
-index 88e78de..c63979a 100644
---- a/include/linux/lsm_audit.h
-+++ b/include/linux/lsm_audit.h
-@@ -124,6 +124,10 @@ struct common_audit_data {
- u32 denied;
- uid_t ouid;
- } fs;
-+ struct {
-+ int type, protocol;
-+ struct sock *sk;
-+ } net;
- };
- } apparmor_audit_data;
- #endif
-diff --git a/include/linux/math64.h b/include/linux/math64.h
-index 2913b86..fa383945 100644
---- a/include/linux/math64.h
-+++ b/include/linux/math64.h
-@@ -15,7 +15,7 @@
- * This is commonly provided by 32bit archs to provide an optimized 64bit
- * divide.
- */
--static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
-+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- *remainder = dividend % divisor;
- return dividend / divisor;
-@@ -33,7 +33,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
- /**
- * div64_u64 - unsigned 64bit divide with 64bit divisor
- */
--static inline u64 div64_u64(u64 dividend, u64 divisor)
-+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
- {
- return dividend / divisor;
- }
-@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
- #define div64_ul(x, y) div_u64((x), (y))
-
- #ifndef div_u64_rem
--static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
-+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- *remainder = do_div(dividend, divisor);
- return dividend;
-@@ -64,7 +64,7 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
- #endif
-
- #ifndef div64_u64
--extern u64 div64_u64(u64 dividend, u64 divisor);
-+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
- #endif
-
- #ifndef div64_s64
-@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
- * divide.
- */
- #ifndef div_u64
--static inline u64 div_u64(u64 dividend, u32 divisor)
-+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
- {
- u32 remainder;
- return div_u64_rem(dividend, divisor, &remainder);
-diff --git a/include/linux/mca.h b/include/linux/mca.h
-index 3797270..7765ede 100644
---- a/include/linux/mca.h
-+++ b/include/linux/mca.h
-@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
- int region);
- void * (*mca_transform_memory)(struct mca_device *,
- void *memory);
--};
-+} __no_const;
-
- struct mca_bus {
- u64 default_dma_mask;
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index e5ee683..61d2731 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
-
- #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
- #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
-+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
-+#else
- #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
-+#endif
-+
- #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
- #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
-
-@@ -213,8 +220,8 @@ struct vm_operations_struct {
- /* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
- */
-- int (*access)(struct vm_area_struct *vma, unsigned long addr,
-- void *buf, int len, int write);
-+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
-+ void *buf, size_t len, int write);
- #ifdef CONFIG_NUMA
- /*
- * set_policy() op must add a reference to any non-NULL @new mempolicy
-@@ -241,6 +248,7 @@ struct vm_operations_struct {
- const nodemask_t *to, unsigned long flags);
- #endif
- };
-+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
-
- struct mmu_gather;
- struct inode;
-@@ -942,8 +950,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
- unsigned long *pfn);
- int follow_phys(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags, unsigned long *prot, resource_size_t *phys);
--int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-- void *buf, int len, int write);
-+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-+ void *buf, size_t len, int write);
-
- static inline void unmap_shared_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen)
-@@ -986,10 +994,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
- }
- #endif
-
--extern int make_pages_present(unsigned long addr, unsigned long end);
--extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
--extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-- void *buf, int len, int write);
-+extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
-+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
-+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
-+ void *buf, size_t len, int write);
-
- int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, unsigned int foll_flags,
-@@ -1015,34 +1023,6 @@ int set_page_dirty(struct page *page);
- int set_page_dirty_lock(struct page *page);
- int clear_page_dirty_for_io(struct page *page);
-
--/* Is the vma a continuation of the stack vma above it? */
--static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
--{
-- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
--}
--
--static inline int stack_guard_page_start(struct vm_area_struct *vma,
-- unsigned long addr)
--{
-- return (vma->vm_flags & VM_GROWSDOWN) &&
-- (vma->vm_start == addr) &&
-- !vma_growsdown(vma->vm_prev, addr);
--}
--
--/* Is the vma a continuation of the stack vma below it? */
--static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
--{
-- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
--}
--
--static inline int stack_guard_page_end(struct vm_area_struct *vma,
-- unsigned long addr)
--{
-- return (vma->vm_flags & VM_GROWSUP) &&
-- (vma->vm_end == addr) &&
-- !vma_growsup(vma->vm_next, addr);
--}
--
- extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len);
-@@ -1137,6 +1117,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
- }
- #endif
-
-+#ifdef CONFIG_MMU
-+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
-+#else
-+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
-+{
-+ return __pgprot(0);
-+}
-+#endif
-+
- int vma_wants_writenotify(struct vm_area_struct *vma);
-
- extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-@@ -1155,8 +1144,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
- {
- return 0;
- }
-+
-+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
-+ unsigned long address)
-+{
-+ return 0;
-+}
- #else
- int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
-+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
- #endif
-
- #ifdef __PAGETABLE_PMD_FOLDED
-@@ -1165,8 +1161,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
- {
- return 0;
- }
-+
-+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
-+ unsigned long address)
-+{
-+ return 0;
-+}
- #else
- int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
-+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
- #endif
-
- int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-@@ -1184,11 +1187,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
- NULL: pud_offset(pgd, address);
- }
-
-+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-+{
-+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
-+ NULL: pud_offset(pgd, address);
-+}
-+
- static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
- {
- return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
- NULL: pmd_offset(pud, address);
- }
-+
-+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
-+{
-+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
-+ NULL: pmd_offset(pud, address);
-+}
- #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
-
- #if USE_SPLIT_PTLOCKS
-@@ -1399,7 +1414,7 @@ extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
-
--extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
-+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
-
- extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
-@@ -1422,6 +1437,7 @@ out:
- }
-
- extern int do_munmap(struct mm_struct *, unsigned long, size_t);
-+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
-
- extern unsigned long do_brk(unsigned long, unsigned long);
-
-@@ -1479,6 +1495,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
- extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
- struct vm_area_struct **pprev);
-
-+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
-+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
-+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
-+
- /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
- static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1495,15 +1515,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
- return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- }
-
--#ifdef CONFIG_MMU
--pgprot_t vm_get_page_prot(unsigned long vm_flags);
--#else
--static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
--{
-- return __pgprot(0);
--}
--#endif
--
- struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
- int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t);
-@@ -1539,6 +1550,12 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
- static inline void vm_stat_account(struct mm_struct *mm,
- unsigned long flags, struct file *file, long pages)
- {
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
-+#endif
-+
-+ mm->total_vm += pages;
- }
- #endif /* CONFIG_PROC_FS */
-
-@@ -1619,7 +1636,7 @@ extern int unpoison_memory(unsigned long pfn);
- extern int sysctl_memory_failure_early_kill;
- extern int sysctl_memory_failure_recovery;
- extern void shake_page(struct page *p, int access);
--extern atomic_long_t mce_bad_pages;
-+extern atomic_long_unchecked_t mce_bad_pages;
- extern int soft_offline_page(struct page *page, int flags);
-
- extern void dump_page(struct page *page);
-@@ -1633,5 +1650,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned int pages_per_huge_page);
- #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
-+#else
-+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
-+#endif
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_MM_H */
-diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index de3a321..8fb84fc 100644
---- a/include/linux/mm_types.h
-+++ b/include/linux/mm_types.h
-@@ -253,7 +253,9 @@ struct vm_area_struct {
- #ifdef CONFIG_NUMA
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
- #endif
--};
-+
-+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
-+} __randomize_layout;
-
- struct core_thread {
- struct task_struct *task;
-@@ -390,7 +392,25 @@ struct mm_struct {
- #ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
- #endif
--};
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+ unsigned long pax_flags;
-+#endif
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ unsigned long call_dl_resolve;
-+#endif
-+
-+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
-+ unsigned long call_syscall;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+ unsigned long delta_mmap; /* randomized offset */
-+ unsigned long delta_stack; /* randomized offset */
-+#endif
-+
-+} __randomize_layout;
-
- static inline void mm_init_cpumask(struct mm_struct *mm)
- {
-diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
-index 174a844..11483c2 100644
---- a/include/linux/mmc/core.h
-+++ b/include/linux/mmc/core.h
-@@ -76,7 +76,7 @@ struct mmc_command {
- #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
-
- unsigned int retries; /* max number of retries */
-- unsigned int error; /* command error */
-+ int error; /* command error */
-
- /*
- * Standard errno values are used for errors, but some have specific
-diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
-index c5d5278..85cd5ce 100644
---- a/include/linux/mmiotrace.h
-+++ b/include/linux/mmiotrace.h
-@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
- /* Called from ioremap.c */
- extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
- void __iomem *addr);
--extern void mmiotrace_iounmap(volatile void __iomem *addr);
-+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
-
- /* For anyone to insert markers. Remember trailing newline. */
- extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
-@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
- {
- }
-
--static inline void mmiotrace_iounmap(volatile void __iomem *addr)
-+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
- {
- }
-
-@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
- extern void disable_mmiotrace(void);
- extern void mmio_trace_rw(struct mmiotrace_rw *rw);
- extern void mmio_trace_mapping(struct mmiotrace_map *map);
--extern int mmio_trace_printk(const char *fmt, va_list args);
-+extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
-
- #endif /* _LINUX_MMIOTRACE_H */
-diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
-index ee2baf0..e24a58c 100644
---- a/include/linux/mmu_notifier.h
-+++ b/include/linux/mmu_notifier.h
-@@ -256,12 +256,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
- */
- #define ptep_clear_flush_notify(__vma, __address, __ptep) \
- ({ \
-- pte_t __pte; \
-+ pte_t ___pte; \
- struct vm_area_struct *___vma = __vma; \
- unsigned long ___address = __address; \
-- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
-+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
- mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
-- __pte; \
-+ ___pte; \
- })
-
- #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
-diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 25842b6..6e42df8 100644
---- a/include/linux/mmzone.h
-+++ b/include/linux/mmzone.h
-@@ -371,7 +371,7 @@ struct zone {
- unsigned long flags; /* zone flags, see below */
-
- /* Zone statistics */
-- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
-+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
-
- /*
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
-diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
-index 226e0ff..b498493 100644
---- a/include/linux/mod_devicetable.h
-+++ b/include/linux/mod_devicetable.h
-@@ -131,7 +131,7 @@ struct usb_device_id {
- #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
- #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
-
--#define HID_ANY_ID (~0)
-+#define HID_ANY_ID (~0U)
-
- struct hid_device_id {
- __u16 bus;
-@@ -480,7 +480,7 @@ struct dmi_system_id {
- const char *ident;
- struct dmi_strmatch matches[4];
- void *driver_data;
--};
-+} __do_const;
- /*
- * struct dmi_device_id appears during expansion of
- * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
-diff --git a/include/linux/module.h b/include/linux/module.h
-index 3cb7839..2e0d4ee4 100644
---- a/include/linux/module.h
-+++ b/include/linux/module.h
-@@ -17,9 +17,11 @@
- #include <linux/moduleparam.h>
- #include <linux/tracepoint.h>
- #include <linux/export.h>
-+#include <linux/fs.h>
-
- #include <linux/percpu.h>
- #include <asm/module.h>
-+#include <asm/pgtable.h>
-
- #include <trace/events/module.h>
-
-@@ -41,7 +43,7 @@ struct module_kobject {
- struct module *mod;
- struct kobject *drivers_dir;
- struct module_param_attrs *mp;
--};
-+} __randomize_layout;
-
- struct module_attribute {
- struct attribute attr;
-@@ -53,12 +55,13 @@ struct module_attribute {
- int (*test)(struct module *);
- void (*free)(struct module *);
- };
-+typedef struct module_attribute __no_const module_attribute_no_const;
-
- struct module_version_attribute {
- struct module_attribute mattr;
- const char *module_name;
- const char *version;
--} __attribute__ ((__aligned__(sizeof(void *))));
-+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
-
- extern ssize_t __modver_version_show(struct module_attribute *,
- struct module_kobject *, char *);
-@@ -217,7 +220,7 @@ struct module
-
- /* Sysfs stuff. */
- struct module_kobject mkobj;
-- struct module_attribute *modinfo_attrs;
-+ module_attribute_no_const *modinfo_attrs;
- const char *version;
- const char *srcversion;
- struct kobject *holders_dir;
-@@ -261,19 +264,16 @@ struct module
- int (*init)(void);
-
- /* If this is non-NULL, vfree after init() returns */
-- void *module_init;
-+ void *module_init_rx, *module_init_rw;
-
- /* Here is the actual code + data, vfree'd on unload. */
-- void *module_core;
-+ void *module_core_rx, *module_core_rw;
-
- /* Here are the sizes of the init and core sections */
-- unsigned int init_size, core_size;
-+ unsigned int init_size_rw, core_size_rw;
-
- /* The size of the executable code in each section. */
-- unsigned int init_text_size, core_text_size;
--
-- /* Size of RO sections of the module (text+rodata) */
-- unsigned int init_ro_size, core_ro_size;
-+ unsigned int init_size_rx, core_size_rx;
-
- /* Arch-specific module values */
- struct mod_arch_specific arch;
-@@ -329,6 +329,10 @@ struct module
- #ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call **trace_events;
- unsigned int num_trace_events;
-+ struct file_operations trace_id;
-+ struct file_operations trace_enable;
-+ struct file_operations trace_format;
-+ struct file_operations trace_filter;
- #endif
- #ifdef CONFIG_FTRACE_MCOUNT_RECORD
- unsigned int num_ftrace_callsites;
-@@ -358,7 +362,7 @@ struct module
- ctor_fn_t *ctors;
- unsigned int num_ctors;
- #endif
--};
-+} __randomize_layout;
- #ifndef MODULE_ARCH_INIT
- #define MODULE_ARCH_INIT {}
- #endif
-@@ -379,16 +383,46 @@ bool is_module_address(unsigned long addr);
- bool is_module_percpu_address(unsigned long addr);
- bool is_module_text_address(unsigned long addr);
-
-+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
-+{
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ if (ktla_ktva(addr) >= (unsigned long)start &&
-+ ktla_ktva(addr) < (unsigned long)start + size)
-+ return 1;
-+#endif
-+
-+ return ((void *)addr >= start && (void *)addr < start + size);
-+}
-+
-+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
-+{
-+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
-+}
-+
-+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
-+{
-+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
-+}
-+
-+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
-+{
-+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
-+}
-+
-+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
-+{
-+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
-+}
-+
- static inline int within_module_core(unsigned long addr, struct module *mod)
- {
-- return (unsigned long)mod->module_core <= addr &&
-- addr < (unsigned long)mod->module_core + mod->core_size;
-+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
- }
-
- static inline int within_module_init(unsigned long addr, struct module *mod)
- {
-- return (unsigned long)mod->module_init <= addr &&
-- addr < (unsigned long)mod->module_init + mod->init_size;
-+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
- }
-
- /* Search for module by name: must hold module_mutex. */
-diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
-index b2be02e..6a9fdb1 100644
---- a/include/linux/moduleloader.h
-+++ b/include/linux/moduleloader.h
-@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
- sections. Returns NULL on failure. */
- void *module_alloc(unsigned long size);
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+void *module_alloc_exec(unsigned long size);
-+#else
-+#define module_alloc_exec(x) module_alloc(x)
-+#endif
-+
- /* Free memory returned from module_alloc. */
- void module_free(struct module *mod, void *module_region);
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+void module_free_exec(struct module *mod, void *module_region);
-+#else
-+#define module_free_exec(x, y) module_free((x), (y))
-+#endif
-+
- /* Apply the given relocation to the (simplified) ELF. Return -error
- or 0. */
- int apply_relocate(Elf_Shdr *sechdrs,
-diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
-index 7939f63..c573d38 100644
---- a/include/linux/moduleparam.h
-+++ b/include/linux/moduleparam.h
-@@ -159,13 +159,13 @@ struct kparam_array
- { arg } }
-
- /* Obsolete - use module_param_cb() */
--#define module_param_call(name, set, get, arg, perm) \
-+#define module_param_call(name, _set, _get, arg, perm) \
- static struct kernel_param_ops __param_ops_##name = \
-- { (void *)set, (void *)get }; \
-+ { .set = (void *)_set, .get = (void *)_get }; \
- __module_param_call(MODULE_PARAM_PREFIX, \
- name, &__param_ops_##name, arg, \
- __same_type(arg, bool *), \
-- (perm) + sizeof(__check_old_set_param(set))*0)
-+ (perm) + sizeof(__check_old_set_param(_set))*0)
-
- /* We don't get oldget: it's often a new-style param_get_uint, etc. */
- static inline int
-@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
- * @len is usually just sizeof(string).
- */
- #define module_param_string(name, string, len, perm) \
-- static const struct kparam_string __param_string_##name \
-+ static const struct kparam_string __param_string_##name __used \
- = { len, string }; \
- __module_param_call(MODULE_PARAM_PREFIX, name, \
- &param_ops_string, \
-@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
- * module_param_named() for why this might be necessary.
- */
- #define module_param_array_named(name, array, type, nump, perm) \
-- static const struct kparam_array __param_arr_##name \
-+ static const struct kparam_array __param_arr_##name __used \
- = { .max = ARRAY_SIZE(array), .num = nump, \
- .ops = &param_ops_##type, \
- .elemsize = sizeof(array[0]), .elem = array }; \
-diff --git a/include/linux/mount.h b/include/linux/mount.h
-index fc17c4d..3f48d9a 100644
---- a/include/linux/mount.h
-+++ b/include/linux/mount.h
-@@ -87,7 +87,7 @@ struct vfsmount {
- int mnt_expiry_mark; /* true if marked for expiry */
- int mnt_pinned;
- int mnt_ghosts;
--};
-+} __randomize_layout;
-
- struct file; /* forward dec */
-
-diff --git a/include/linux/namei.h b/include/linux/namei.h
-index ffc0213..2c1f2cb 100644
---- a/include/linux/namei.h
-+++ b/include/linux/namei.h
-@@ -24,7 +24,7 @@ struct nameidata {
- unsigned seq;
- int last_type;
- unsigned depth;
-- char *saved_names[MAX_NESTED_LINKS + 1];
-+ const char *saved_names[MAX_NESTED_LINKS + 1];
-
- /* Intent data */
- union {
-@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
- extern struct dentry *lock_rename(struct dentry *, struct dentry *);
- extern void unlock_rename(struct dentry *, struct dentry *);
-
--static inline void nd_set_link(struct nameidata *nd, char *path)
-+static inline void nd_set_link(struct nameidata *nd, const char *path)
- {
- nd->saved_names[nd->depth] = path;
- }
-
--static inline char *nd_get_link(struct nameidata *nd)
-+static inline const char *nd_get_link(const struct nameidata *nd)
- {
- return nd->saved_names[nd->depth];
- }
-diff --git a/include/linux/net.h b/include/linux/net.h
-index bd4f6c7..e9b8bb8 100644
---- a/include/linux/net.h
-+++ b/include/linux/net.h
-@@ -224,7 +224,7 @@ struct net_proto_family {
- int (*create)(struct net *net, struct socket *sock,
- int protocol, int kern);
- struct module *owner;
--};
-+} __do_const;
-
- struct iovec;
- struct kvec;
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 4b04097..43bda9d 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -949,6 +949,7 @@ struct net_device_ops {
- int (*ndo_set_features)(struct net_device *dev,
- u32 features);
- };
-+typedef struct net_device_ops __no_const net_device_ops_no_const;
-
- /*
- * The DEVICE structure.
-@@ -1088,7 +1089,7 @@ struct net_device {
- int iflink;
-
- struct net_device_stats stats;
-- atomic_long_t rx_dropped; /* dropped packets by core network
-+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
- * Do not use this in drivers.
- */
-
-@@ -2594,7 +2595,7 @@ static inline int netif_is_bond_slave(struct net_device *dev)
- return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
- }
-
--extern struct pernet_operations __net_initdata loopback_net_ops;
-+extern struct pernet_operations __net_initconst loopback_net_ops;
-
- static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
- {
-diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
-index 857f502..350a113 100644
---- a/include/linux/netfilter.h
-+++ b/include/linux/netfilter.h
-@@ -141,7 +141,7 @@ struct nf_sockopt_ops {
- #endif
- /* Use the module struct to lock set/get code in place */
- struct module *owner;
--};
-+} __do_const;
-
- /* Function to register/unregister hook points. */
- int nf_register_hook(struct nf_hook_ops *reg);
-diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
-index 74d3386..e800dbf 100644
---- a/include/linux/netfilter/nfnetlink.h
-+++ b/include/linux/netfilter/nfnetlink.h
-@@ -65,7 +65,7 @@ struct nfnl_callback {
- const struct nlattr * const cda[]);
- const struct nla_policy *policy; /* netlink attribute policy */
- const u_int16_t attr_count; /* number of nlattr's */
--};
-+} __do_const;
-
- struct nfnetlink_subsystem {
- const char *name;
-diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
-new file mode 100644
-index 0000000..33f4af8
---- /dev/null
-+++ b/include/linux/netfilter/xt_gradm.h
-@@ -0,0 +1,9 @@
-+#ifndef _LINUX_NETFILTER_XT_GRADM_H
-+#define _LINUX_NETFILTER_XT_GRADM_H 1
-+
-+struct xt_gradm_mtinfo {
-+ __u16 flags;
-+ __u16 invflags;
-+};
-+
-+#endif
-diff --git a/include/linux/nls.h b/include/linux/nls.h
-index 5dc635f..35f5e11 100644
---- a/include/linux/nls.h
-+++ b/include/linux/nls.h
-@@ -31,7 +31,7 @@ struct nls_table {
- const unsigned char *charset2upper;
- struct module *owner;
- struct nls_table *next;
--};
-+} __do_const;
-
- /* this value hold the maximum octet of charset */
- #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
-diff --git a/include/linux/notifier.h b/include/linux/notifier.h
-index d65746e..62e72c2 100644
---- a/include/linux/notifier.h
-+++ b/include/linux/notifier.h
-@@ -51,7 +51,8 @@ struct notifier_block {
- int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block __rcu *next;
- int priority;
--};
-+} __do_const;
-+typedef struct notifier_block __no_const notifier_block_no_const;
-
- struct atomic_notifier_head {
- spinlock_t lock;
-diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
-index a4c5624..79d6d88 100644
---- a/include/linux/oprofile.h
-+++ b/include/linux/oprofile.h
-@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
- int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
- char const * name, ulong * val);
-
--/** Create a file for read-only access to an atomic_t. */
-+/** Create a file for read-only access to an atomic_unchecked_t. */
- int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
-- char const * name, atomic_t * val);
-+ char const * name, atomic_unchecked_t * val);
-
- /** create a directory */
- struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
-diff --git a/include/linux/padata.h b/include/linux/padata.h
-index 4633b2f..988bc08 100644
---- a/include/linux/padata.h
-+++ b/include/linux/padata.h
-@@ -129,7 +129,7 @@ struct parallel_data {
- struct padata_instance *pinst;
- struct padata_parallel_queue __percpu *pqueue;
- struct padata_serial_queue __percpu *squeue;
-- atomic_t seq_nr;
-+ atomic_unchecked_t seq_nr;
- atomic_t reorder_objects;
- atomic_t refcnt;
- unsigned int max_seq_nr;
-diff --git a/include/linux/path.h b/include/linux/path.h
-index edc98de..a2c707f 100644
---- a/include/linux/path.h
-+++ b/include/linux/path.h
-@@ -1,13 +1,15 @@
- #ifndef _LINUX_PATH_H
- #define _LINUX_PATH_H
-
-+#include <linux/compiler.h>
-+
- struct dentry;
- struct vfsmount;
-
- struct path {
- struct vfsmount *mnt;
- struct dentry *dentry;
--};
-+} __randomize_layout;
-
- extern void path_get(struct path *);
- extern void path_put(struct path *);
-diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
-index 45fc162..01a4068 100644
---- a/include/linux/pci_hotplug.h
-+++ b/include/linux/pci_hotplug.h
-@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
- int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
- int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
- int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
--};
-+} __do_const;
-+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
-
- /**
- * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
-diff --git a/include/linux/percpu.h b/include/linux/percpu.h
-index 9ca008f..d82f96a 100644
---- a/include/linux/percpu.h
-+++ b/include/linux/percpu.h
-@@ -58,7 +58,7 @@
- * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
- * larger than PERCPU_DYNAMIC_EARLY_SIZE.
- */
--#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
- #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
-
- /*
-diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 8d5b91e..9209ea4 100644
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -750,8 +750,8 @@ struct perf_event {
-
- enum perf_event_active_state state;
- unsigned int attach_state;
-- local64_t count;
-- atomic64_t child_count;
-+ local64_t count; /* PaX: fix it one day */
-+ atomic64_unchecked_t child_count;
-
- /*
- * These are the total time in nanoseconds that the event
-@@ -802,8 +802,8 @@ struct perf_event {
- * These accumulate total time (in nanoseconds) that children
- * events have been enabled and running, respectively.
- */
-- atomic64_t child_total_time_enabled;
-- atomic64_t child_total_time_running;
-+ atomic64_unchecked_t child_total_time_enabled;
-+ atomic64_unchecked_t child_total_time_running;
-
- /*
- * Protect attach/detach and child_list:
-@@ -1102,7 +1102,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
- entry->ip[entry->nr++] = ip;
- }
-
--extern int sysctl_perf_event_paranoid;
-+extern int sysctl_perf_event_legitimately_concerned;
- extern int sysctl_perf_event_mlock;
- extern int sysctl_perf_event_sample_rate;
-
-@@ -1110,19 +1110,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-+static inline bool perf_paranoid_any(void)
-+{
-+ return sysctl_perf_event_legitimately_concerned > 2;
-+}
-+
- static inline bool perf_paranoid_tracepoint_raw(void)
- {
-- return sysctl_perf_event_paranoid > -1;
-+ return sysctl_perf_event_legitimately_concerned > -1;
- }
-
- static inline bool perf_paranoid_cpu(void)
- {
-- return sysctl_perf_event_paranoid > 0;
-+ return sysctl_perf_event_legitimately_concerned > 0;
- }
-
- static inline bool perf_paranoid_kernel(void)
- {
-- return sysctl_perf_event_paranoid > 1;
-+ return sysctl_perf_event_legitimately_concerned > 1;
- }
-
- extern void perf_event_init(void);
-@@ -1200,7 +1205,7 @@ static inline void perf_restore_debug_store(void) { }
- */
- #define perf_cpu_notifier(fn) \
- do { \
-- static struct notifier_block fn##_nb __cpuinitdata = \
-+ static struct notifier_block fn##_nb = \
- { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
- fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
- (void *)(unsigned long)smp_processor_id()); \
-diff --git a/include/linux/personality.h b/include/linux/personality.h
-index 8fc7dd1a..c19d89e 100644
---- a/include/linux/personality.h
-+++ b/include/linux/personality.h
-@@ -44,6 +44,7 @@ enum {
- #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
- ADDR_NO_RANDOMIZE | \
- ADDR_COMPAT_LAYOUT | \
-+ ADDR_LIMIT_3GB | \
- MMAP_PAGE_ZERO)
-
- /*
-diff --git a/include/linux/phy.h b/include/linux/phy.h
-index 79f337c..228104f 100644
---- a/include/linux/phy.h
-+++ b/include/linux/phy.h
-@@ -471,7 +471,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
- return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
- }
-
--int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
-+int get_phy_id(struct mii_bus *bus, int addr, int *phy_id);
- struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
- int phy_device_register(struct phy_device *phy);
- int phy_init_hw(struct phy_device *phydev);
-diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
-index 38d1032..d3f6744 100644
---- a/include/linux/pid_namespace.h
-+++ b/include/linux/pid_namespace.h
-@@ -30,7 +30,7 @@ struct pid_namespace {
- #ifdef CONFIG_BSD_PROCESS_ACCT
- struct bsd_acct_struct *bacct;
- #endif
--};
-+} __randomize_layout;
-
- extern struct pid_namespace init_pid_ns;
-
-diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
-index 8778c26..732cf42 100644
---- a/include/linux/pipe_fs_i.h
-+++ b/include/linux/pipe_fs_i.h
-@@ -47,9 +47,9 @@ struct pipe_buffer {
- struct pipe_inode_info {
- wait_queue_head_t wait;
- unsigned int nrbufs, curbuf, buffers;
-- unsigned int readers;
-- unsigned int writers;
-- unsigned int waiting_writers;
-+ atomic_t readers;
-+ atomic_t writers;
-+ atomic_t waiting_writers;
- unsigned int r_counter;
- unsigned int w_counter;
- struct page *tmp_page;
-diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
-index d3085e7..fd01052 100644
---- a/include/linux/pm_runtime.h
-+++ b/include/linux/pm_runtime.h
-@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
-
- static inline void pm_runtime_mark_last_busy(struct device *dev)
- {
-- ACCESS_ONCE(dev->power.last_busy) = jiffies;
-+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
- }
-
- #else /* !CONFIG_PM_RUNTIME */
-diff --git a/include/linux/pnp.h b/include/linux/pnp.h
-index 195aafc..49a7bc2 100644
---- a/include/linux/pnp.h
-+++ b/include/linux/pnp.h
-@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
- struct pnp_fixup {
- char id[7];
- void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
--};
-+} __do_const;
-
- /* config parameters */
- #define PNP_CONFIG_NORMAL 0x0001
-diff --git a/include/linux/poison.h b/include/linux/poison.h
-index 79159de..f1233a9 100644
---- a/include/linux/poison.h
-+++ b/include/linux/poison.h
-@@ -19,8 +19,8 @@
- * under normal circumstances, used to verify that nobody uses
- * non-initialized list entries.
- */
--#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
--#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
-+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
-+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
-
- /********** include/linux/timer.h **********/
- /*
-diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
-index b8d4ddd..bb59d8b 100644
---- a/include/linux/ppp-comp.h
-+++ b/include/linux/ppp-comp.h
-@@ -111,7 +111,7 @@ struct compressor {
- struct module *owner;
- /* Extra skb space needed by the compressor algorithm */
- unsigned int comp_extra;
--};
-+} __do_const;
-
- /*
- * The return value from decompress routine is the length of the
-diff --git a/include/linux/prctl.h b/include/linux/prctl.h
-index a3baeb2..b527252 100644
---- a/include/linux/prctl.h
-+++ b/include/linux/prctl.h
-@@ -102,4 +102,19 @@
-
- #define PR_MCE_KILL_GET 34
-
-+/*
-+ * If no_new_privs is set, then operations that grant new privileges (i.e.
-+ * execve) will either fail or not grant them. This affects suid/sgid,
-+ * file capabilities, and LSMs.
-+ *
-+ * Operations that merely manipulate or drop existing privileges (setresuid,
-+ * capset, etc.) will still work. Drop those privileges if you want them gone.
-+ *
-+ * Changing LSM security domain is considered a new privilege. So, for example,
-+ * asking selinux for a specific new context (e.g. with runcon) will result
-+ * in execve returning -EPERM.
-+ */
-+#define PR_SET_NO_NEW_PRIVS 38
-+#define PR_GET_NO_NEW_PRIVS 39
-+
- #endif /* _LINUX_PRCTL_H */
-diff --git a/include/linux/printk.h b/include/linux/printk.h
-index f0e22f7..82dd544 100644
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
- extern int printk_needs_cpu(int cpu);
- extern void printk_tick(void);
-
-+extern int kptr_restrict;
-+
- #ifdef CONFIG_PRINTK
- asmlinkage __printf(1, 0)
- int vprintk(const char *fmt, va_list args);
-@@ -112,7 +114,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
-
- extern int printk_delay_msec;
- extern int dmesg_restrict;
--extern int kptr_restrict;
-
- void log_buf_kexec_setup(void);
- void __init setup_log_buf(int early);
-diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
-index 643b96c..c9bfc32 100644
---- a/include/linux/proc_fs.h
-+++ b/include/linux/proc_fs.h
-@@ -74,9 +74,10 @@ struct proc_dir_entry {
- struct completion *pde_unload_completion;
- struct list_head pde_openers; /* who did ->open, but not ->release */
- spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
-+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
- u8 namelen;
- char name[];
--};
-+} __randomize_layout;
-
- enum kcore_type {
- KCORE_TEXT,
-@@ -146,6 +147,7 @@ extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
- extern struct proc_dir_entry *proc_symlink(const char *,
- struct proc_dir_entry *, const char *);
- extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
-+extern struct proc_dir_entry *proc_mkdir_restrict(const char *,struct proc_dir_entry *);
- extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
- struct proc_dir_entry *parent);
-
-@@ -155,6 +157,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
- return proc_create_data(name, mode, parent, proc_fops, NULL);
- }
-
-+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
-+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
-+{
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
-+#else
-+ return proc_create_data(name, mode, parent, proc_fops, NULL);
-+#endif
-+}
-+
-+
- static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
- mode_t mode, struct proc_dir_entry *base,
- read_proc_t *read_proc, void * data)
-@@ -247,7 +262,7 @@ struct proc_ns_operations {
- void *(*get)(struct task_struct *task);
- void (*put)(void *ns);
- int (*install)(struct nsproxy *nsproxy, void *ns);
--};
-+} __do_const;
- extern const struct proc_ns_operations netns_operations;
- extern const struct proc_ns_operations utsns_operations;
- extern const struct proc_ns_operations ipcns_operations;
-@@ -273,7 +288,7 @@ struct proc_inode {
- void *ns;
- const struct proc_ns_operations *ns_ops;
- struct inode vfs_inode;
--};
-+} __randomize_layout;
-
- static inline struct proc_inode *PROC_I(const struct inode *inode)
- {
-diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
-index e49240b..842e20a 100644
---- a/include/linux/ptrace.h
-+++ b/include/linux/ptrace.h
-@@ -62,8 +62,9 @@
- #define PTRACE_O_TRACEEXEC 0x00000010
- #define PTRACE_O_TRACEVFORKDONE 0x00000020
- #define PTRACE_O_TRACEEXIT 0x00000040
-+#define PTRACE_O_TRACESECCOMP 0x00000080
-
--#define PTRACE_O_MASK 0x0000007f
-+#define PTRACE_O_MASK 0x000000ff
-
- /* Wait extended result codes for the above trace options. */
- #define PTRACE_EVENT_FORK 1
-@@ -73,6 +74,7 @@
- #define PTRACE_EVENT_VFORK_DONE 5
- #define PTRACE_EVENT_EXIT 6
- #define PTRACE_EVENT_STOP 7
-+#define PTRACE_EVENT_SECCOMP 8
-
- #include <asm/ptrace.h>
-
-@@ -101,8 +103,9 @@
- #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
- #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
- #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
-+#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
-
--#define PT_TRACE_MASK 0x000003f4
-+#define PT_TRACE_MASK 0x00000bf4
-
- /* single stepping state bits (used on ARM and PA-RISC) */
- #define PT_SINGLESTEP_BIT 31
-@@ -130,10 +133,12 @@ extern void __ptrace_unlink(struct task_struct *child);
- extern void exit_ptrace(struct task_struct *tracer);
- #define PTRACE_MODE_READ 1
- #define PTRACE_MODE_ATTACH 2
--/* Returns 0 on success, -errno on denial. */
--extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
- /* Returns true on success, false on denial. */
- extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
-+/* Returns true on success, false on denial. */
-+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
-+/* Returns true on success, false on denial. */
-+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
-
- static inline int ptrace_reparented(struct task_struct *child)
- {
-@@ -198,9 +203,10 @@ static inline void ptrace_event(int event, unsigned long message)
- if (unlikely(ptrace_event_enabled(current, event))) {
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
-- } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
-+ } else if (event == PTRACE_EVENT_EXEC) {
- /* legacy EXEC report via SIGTRAP */
-- send_sig(SIGTRAP, current, 0);
-+ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
-+ send_sig(SIGTRAP, current, 0);
- }
- }
-
-diff --git a/include/linux/random.h b/include/linux/random.h
-index f5e1311..d51eec7 100644
---- a/include/linux/random.h
-+++ b/include/linux/random.h
-@@ -41,19 +41,27 @@ struct rand_pool_info {
- };
-
- struct rnd_state {
-- __u32 s1, s2, s3;
-+ __u32 s1, s2, s3, s4;
- };
-
- /* Exported functions */
-
- #ifdef __KERNEL__
-
--extern void rand_initialize_irq(int irq);
--
- extern void add_device_randomness(const void *, unsigned int);
-+
-+static inline void add_latent_entropy(void)
-+{
-+
-+#ifdef LATENT_ENTROPY_PLUGIN
-+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-+#endif
-+
-+}
-+
- extern void add_input_randomness(unsigned int type, unsigned int code,
-- unsigned int value);
--extern void add_interrupt_randomness(int irq, int irq_flags);
-+ unsigned int value) __latent_entropy;
-+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-
- extern void get_random_bytes(void *buf, int nbytes);
- extern void get_random_bytes_arch(void *buf, int nbytes);
-@@ -67,10 +75,25 @@ extern const struct file_operations random_fops, urandom_fops;
- unsigned int get_random_int(void);
- unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
-
--u32 random32(void);
--void srandom32(u32 seed);
-+u32 prandom_u32(void) __intentional_overflow(-1);
-+void prandom_bytes(void *buf, int nbytes);
-+void prandom_seed(u32 seed);
-+void prandom_reseed_late(void);
-
--u32 prandom32(struct rnd_state *);
-+/*
-+ * These macros are preserved for backward compatibility and should be
-+ * removed as soon as a transition is finished.
-+ */
-+#define random32() prandom_u32()
-+#define srandom32(seed) prandom_seed(seed)
-+
-+u32 prandom_u32_state(struct rnd_state *state) __intentional_overflow(-1);
-+void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
-+
-+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
-+{
-+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
-+}
-
- /*
- * Handle minimum values for seeds
-@@ -81,17 +104,18 @@ static inline u32 __seed(u32 x, u32 m)
- }
-
- /**
-- * prandom32_seed - set seed for prandom32().
-+ * prandom_seed_state - set seed for prandom_u32_state().
- * @state: pointer to state structure to receive the seed.
- * @seed: arbitrary 64-bit value to use as a seed.
- */
--static inline void prandom32_seed(struct rnd_state *state, u64 seed)
-+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
- {
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
-
-- state->s1 = __seed(i, 2);
-- state->s2 = __seed(i, 8);
-- state->s3 = __seed(i, 16);
-+ state->s1 = __seed(i, 2U);
-+ state->s2 = __seed(i, 8U);
-+ state->s3 = __seed(i, 16U);
-+ state->s4 = __seed(i, 128U);
- }
-
- #ifdef CONFIG_ARCH_RANDOM
-@@ -107,6 +131,11 @@ static inline int arch_get_random_int(unsigned int *v)
- }
- #endif
-
-+static inline u32 next_pseudo_random32(u32 seed)
-+{
-+ return seed * 1664525 + 1013904223;
-+}
-+
- #endif /* __KERNEL___ */
-
- #endif /* _LINUX_RANDOM_H */
-diff --git a/include/linux/rculist.h b/include/linux/rculist.h
-index 3863352..4ec4bfb 100644
---- a/include/linux/rculist.h
-+++ b/include/linux/rculist.h
-@@ -39,6 +39,9 @@ static inline void __list_add_rcu(struct list_head *new,
- next->prev = new;
- }
-
-+extern void __pax_list_add_rcu(struct list_head *new,
-+ struct list_head *prev, struct list_head *next);
-+
- /**
- * list_add_rcu - add a new entry to rcu-protected list
- * @new: new entry to be added
-@@ -60,6 +63,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
- __list_add_rcu(new, head, head->next);
- }
-
-+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
-+{
-+ __pax_list_add_rcu(new, head, head->next);
-+}
-+
- /**
- * list_add_tail_rcu - add a new entry to rcu-protected list
- * @new: new entry to be added
-@@ -82,6 +90,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
- __list_add_rcu(new, head->prev, head);
- }
-
-+static inline void pax_list_add_tail_rcu(struct list_head *new,
-+ struct list_head *head)
-+{
-+ __pax_list_add_rcu(new, head->prev, head);
-+}
-+
- /**
- * list_del_rcu - deletes entry from list without re-initialization
- * @entry: the element to delete from the list.
-@@ -112,6 +126,8 @@ static inline void list_del_rcu(struct list_head *entry)
- entry->prev = LIST_POISON2;
- }
-
-+extern void pax_list_del_rcu(struct list_head *entry);
-+
- /**
- * hlist_del_init_rcu - deletes entry from hash list with re-initialization
- * @n: the element to delete from the hash list.
-diff --git a/include/linux/reboot.h b/include/linux/reboot.h
-index e0879a7..a12f962 100644
---- a/include/linux/reboot.h
-+++ b/include/linux/reboot.h
-@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
- * Architecture-specific implementations of sys_reboot commands.
- */
-
--extern void machine_restart(char *cmd);
--extern void machine_halt(void);
--extern void machine_power_off(void);
-+extern void machine_restart(char *cmd) __noreturn;
-+extern void machine_halt(void) __noreturn;
-+extern void machine_power_off(void) __noreturn;
-
- extern void machine_shutdown(void);
- struct pt_regs;
-@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
- */
-
- extern void kernel_restart_prepare(char *cmd);
--extern void kernel_restart(char *cmd);
--extern void kernel_halt(void);
--extern void kernel_power_off(void);
-+extern void kernel_restart(char *cmd) __noreturn;
-+extern void kernel_halt(void) __noreturn;
-+extern void kernel_power_off(void) __noreturn;
-
- extern int C_A_D; /* for sysctl */
- void ctrl_alt_del(void);
-@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
- * Emergency restart, callable from an interrupt handler.
- */
-
--extern void emergency_restart(void);
-+extern void emergency_restart(void) __noreturn;
- #include <asm/emergency-restart.h>
-
- #endif
-diff --git a/include/linux/regset.h b/include/linux/regset.h
-index 686f373..6ade19e 100644
---- a/include/linux/regset.h
-+++ b/include/linux/regset.h
-@@ -160,7 +160,8 @@ struct user_regset {
- unsigned int align;
- unsigned int bias;
- unsigned int core_note_type;
--};
-+} __do_const;
-+typedef struct user_regset __no_const user_regset_no_const;
-
- /**
- * struct user_regset_view - available regsets
-diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
-index 43c7251..dc187f8 100644
---- a/include/linux/reiserfs_fs.h
-+++ b/include/linux/reiserfs_fs.h
-@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
- #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
-
- #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
--#define get_generation(s) atomic_read (&fs_generation(s))
-+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
- #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
- #define __fs_changed(gen,s) (gen != get_generation (s))
- #define fs_changed(gen,s) \
-diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
-index 52c83b6..18ed7eb 100644
---- a/include/linux/reiserfs_fs_sb.h
-+++ b/include/linux/reiserfs_fs_sb.h
-@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
- /* Comment? -Hans */
- wait_queue_head_t s_wait;
- /* To be obsoleted soon by per buffer seals.. -Hans */
-- atomic_t s_generation_counter; // increased by one every time the
-+ atomic_unchecked_t s_generation_counter; // increased by one every time the
- // tree gets re-balanced
- unsigned long s_properties; /* File system properties. Currently holds
- on-disk FS format */
-diff --git a/include/linux/relay.h b/include/linux/relay.h
-index 14a86bc..17d0700 100644
---- a/include/linux/relay.h
-+++ b/include/linux/relay.h
-@@ -159,7 +159,7 @@ struct rchan_callbacks
- * The callback should return 0 if successful, negative if not.
- */
- int (*remove_buf_file)(struct dentry *dentry);
--};
-+} __no_const;
-
- /*
- * CONFIG_RELAY kernel API, kernel/relay.c
-diff --git a/include/linux/rio.h b/include/linux/rio.h
-index 4d50611..c6858a2 100644
---- a/include/linux/rio.h
-+++ b/include/linux/rio.h
-@@ -315,7 +315,7 @@ struct rio_ops {
- int mbox, void *buffer, size_t len);
- int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
- void *(*get_inb_message)(struct rio_mport *mport, int mbox);
--};
-+} __no_const;
-
- #define RIO_RESOURCE_MEM 0x00000100
- #define RIO_RESOURCE_DOORBELL 0x00000200
-diff --git a/include/linux/rmap.h b/include/linux/rmap.h
-index b0df05a..d0803b6 100644
---- a/include/linux/rmap.h
-+++ b/include/linux/rmap.h
-@@ -129,8 +129,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
- void anon_vma_init(void); /* create anon_vma_cachep */
- int anon_vma_prepare(struct vm_area_struct *);
- void unlink_anon_vmas(struct vm_area_struct *);
--int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
--int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
-+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
-+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
- void __anon_vma_link(struct vm_area_struct *);
-
- static inline void anon_vma_merge(struct vm_area_struct *vma,
-diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
-index 9aaf5bf..d5ee2a5 100644
---- a/include/linux/scatterlist.h
-+++ b/include/linux/scatterlist.h
-@@ -3,6 +3,7 @@
-
- #include <asm/types.h>
- #include <asm/scatterlist.h>
-+#include <linux/sched.h>
- #include <linux/mm.h>
- #include <linux/string.h>
- #include <asm/io.h>
-@@ -109,6 +110,12 @@ static inline struct page *sg_page(struct scatterlist *sg)
- static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
- unsigned int buflen)
- {
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ if (object_starts_on_stack(buf)) {
-+ void *adjbuf = buf - current->stack + current->lowmem_stack;
-+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
-+ } else
-+#endif
- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
- }
-
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index cb34ff4..c086c98 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -101,6 +101,7 @@ struct bio_list;
- struct fs_struct;
- struct perf_event_context;
- struct blk_plug;
-+struct linux_binprm;
-
- /*
- * List of flags we want to share for kernel threads,
-@@ -355,7 +356,7 @@ extern char __sched_text_start[], __sched_text_end[];
- extern int in_sched_functions(unsigned long addr);
-
- #define MAX_SCHEDULE_TIMEOUT LONG_MAX
--extern signed long schedule_timeout(signed long timeout);
-+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
- extern signed long schedule_timeout_interruptible(signed long timeout);
- extern signed long schedule_timeout_killable(signed long timeout);
- extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -381,10 +382,23 @@ struct user_namespace;
- #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-
- extern int sysctl_max_map_count;
-+extern unsigned long sysctl_heap_stack_gap;
-
- #include <linux/aio.h>
-
- #ifdef CONFIG_MMU
-+
-+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
-+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
-+#else
-+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
-+{
-+ return 0;
-+}
-+#endif
-+
-+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset);
-+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
- extern void arch_pick_mmap_layout(struct mm_struct *mm);
- extern unsigned long
- arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -403,6 +417,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
- extern void set_dumpable(struct mm_struct *mm, int value);
- extern int get_dumpable(struct mm_struct *mm);
-
-+/* get/set_dumpable() values */
-+#define SUID_DUMPABLE_DISABLED 0
-+#define SUID_DUMPABLE_ENABLED 1
-+#define SUID_DUMPABLE_SAFE 2
-+
- #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
- #define SUID_DUMP_USER 1 /* Dump as user of process */
- #define SUID_DUMP_ROOT 2 /* Dump as root */
-@@ -634,6 +653,17 @@ struct signal_struct {
- #ifdef CONFIG_TASKSTATS
- struct taskstats *stats;
- #endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+ u32 curr_ip;
-+ u32 saved_ip;
-+ u32 gr_saddr;
-+ u32 gr_daddr;
-+ u16 gr_sport;
-+ u16 gr_dport;
-+ u8 used_accept:1;
-+#endif
-+
- #ifdef CONFIG_AUDIT
- unsigned audit_tty;
- struct tty_audit_buf *tty_audit_buf;
-@@ -657,7 +687,7 @@ struct signal_struct {
- struct mutex cred_guard_mutex; /* guard against foreign influences on
- * credential calculations
- * (notably. ptrace) */
--};
-+} __randomize_layout;
-
- /* Context switch must be unlocked if interrupts are to be enabled */
- #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-@@ -715,6 +745,14 @@ struct user_struct {
- struct key *session_keyring; /* UID's default session keyring */
- #endif
-
-+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
-+ unsigned char kernel_banned;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+ unsigned char suid_banned;
-+ unsigned long suid_ban_expires;
-+#endif
-+
- /* Hash table maintenance information */
- struct hlist_node uidhash_node;
- uid_t uid;
-@@ -723,7 +761,7 @@ struct user_struct {
- #ifdef CONFIG_PERF_EVENTS
- atomic_long_t locked_vm;
- #endif
--};
-+} __randomize_layout;
-
- extern int uids_sysfs_init(void);
-
-@@ -1129,7 +1167,7 @@ struct sched_class {
- #ifdef CONFIG_FAIR_GROUP_SCHED
- void (*task_move_group) (struct task_struct *p, int on_rq);
- #endif
--};
-+} __do_const;
-
- struct load_weight {
- unsigned long weight, inv_weight;
-@@ -1226,6 +1264,9 @@ enum perf_event_task_context {
- struct task_struct {
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- void *stack;
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ void *lowmem_stack;
-+#endif
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
- unsigned int ptrace;
-@@ -1306,6 +1347,8 @@ struct task_struct {
- * execve */
- unsigned in_iowait:1;
-
-+ /* task may not gain privileges */
-+ unsigned no_new_privs:1;
-
- /* Revert to default priority/policy when forking */
- unsigned sched_reset_on_fork:1;
-@@ -1346,8 +1389,8 @@ struct task_struct {
- struct list_head thread_group;
-
- struct completion *vfork_done; /* for vfork() */
-- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
-- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
-+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
-+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
-
- cputime_t utime, stime, utimescaled, stimescaled;
- cputime_t gtime;
-@@ -1363,13 +1406,6 @@ struct task_struct {
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
-
--/* process credentials */
-- const struct cred __rcu *real_cred; /* objective and real subjective task
-- * credentials (COW) */
-- const struct cred __rcu *cred; /* effective (overridable) subjective task
-- * credentials (COW) */
-- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
--
- char comm[TASK_COMM_LEN]; /* executable name excluding path
- - access with [gs]et_task_comm (which lock
- it with task_lock())
-@@ -1386,8 +1422,16 @@ struct task_struct {
- #endif
- /* CPU-specific state of this task */
- struct thread_struct thread;
-+/* thread_info moved to task_struct */
-+#ifdef CONFIG_X86
-+ struct thread_info tinfo;
-+#endif
- /* filesystem information */
- struct fs_struct *fs;
-+
-+ const struct cred __rcu *cred; /* effective (overridable) subjective task
-+ * credentials (COW) */
-+
- /* open file information */
- struct files_struct *files;
- /* namespaces */
-@@ -1410,7 +1454,7 @@ struct task_struct {
- uid_t loginuid;
- unsigned int sessionid;
- #endif
-- seccomp_t seccomp;
-+ struct seccomp seccomp;
-
- /* Thread group tracking */
- u32 parent_exec_id;
-@@ -1434,6 +1478,11 @@ struct task_struct {
- struct rt_mutex_waiter *pi_blocked_on;
- #endif
-
-+/* process credentials */
-+ const struct cred __rcu *real_cred; /* objective and real subjective task
-+ * credentials (COW) */
-+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
-+
- #ifdef CONFIG_DEBUG_MUTEXES
- /* mutex deadlock detection */
- struct mutex_waiter *blocked_on;
-@@ -1549,6 +1598,30 @@ struct task_struct {
- unsigned long default_timer_slack_ns;
-
- struct list_head *scm_work_list;
-+
-+#ifdef CONFIG_GRKERNSEC
-+ /* grsecurity */
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ u64 exec_id;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ const struct cred *delayed_cred;
-+#endif
-+ struct dentry *gr_chroot_dentry;
-+ struct acl_subject_label *acl;
-+ struct acl_subject_label *tmpacl;
-+ struct acl_role_label *role;
-+ struct file *exec_file;
-+ unsigned long brute_expires;
-+ u16 acl_role_id;
-+ u8 inherited;
-+ /* is this the task that authenticated to the special role */
-+ u8 acl_sp_role;
-+ u8 is_writable;
-+ u8 brute;
-+ u8 gr_is_chrooted;
-+#endif
-+
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Index of current stored address in ret_stack */
- int curr_ret_stack;
-@@ -1560,7 +1633,7 @@ struct task_struct {
- * Number of functions that haven't been traced
- * because of depth overrun.
- */
-- atomic_t trace_overrun;
-+ atomic_unchecked_t trace_overrun;
- /* Pause for the tracing */
- atomic_t tracing_graph_pause;
- #endif
-@@ -1581,7 +1654,54 @@ struct task_struct {
- #ifdef CONFIG_HAVE_HW_BREAKPOINT
- atomic_t ptrace_bp_refcnt;
- #endif
--};
-+} __randomize_layout;
-+
-+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
-+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
-+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
-+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
-+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
-+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+extern int pax_softmode;
-+#endif
-+
-+extern int pax_check_flags(unsigned long *);
-+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
-+
-+/* if tsk != current then task_lock must be held on it */
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+static inline unsigned long pax_get_flags(struct task_struct *tsk)
-+{
-+ if (likely(tsk->mm))
-+ return tsk->mm->pax_flags;
-+ else
-+ return 0UL;
-+}
-+
-+/* if tsk != current then task_lock must be held on it */
-+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
-+{
-+ if (likely(tsk->mm)) {
-+ tsk->mm->pax_flags = flags;
-+ return 0;
-+ }
-+ return -EINVAL;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+extern void pax_set_initial_flags(struct linux_binprm *bprm);
-+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
-+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
-+#endif
-+
-+struct path;
-+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
-+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
-+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
-+extern void pax_report_refcount_overflow(struct pt_regs *regs);
-
- /* Future-safe accessor for struct task_struct's cpus_allowed. */
- #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1689,8 +1809,19 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
- return pid_vnr(task_tgid(tsk));
- }
-
-+/**
-+ * pid_alive - check that a task structure is not stale
-+ * @p: Task structure to be checked.
-+ *
-+ * Test if a process is not yet dead (at most zombie state)
-+ * If pid_alive fails, then pointers within the task structure
-+ * can be stale and must not be dereferenced.
-+ */
-+static inline int pid_alive(const struct task_struct *p)
-+{
-+ return p->pids[PIDTYPE_PID].pid != NULL;
-+}
-
--static int pid_alive(const struct task_struct *p);
- static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
- {
- pid_t pid = 0;
-@@ -1738,19 +1869,6 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
- }
-
- /**
-- * pid_alive - check that a task structure is not stale
-- * @p: Task structure to be checked.
-- *
-- * Test if a process is not yet dead (at most zombie state)
-- * If pid_alive fails, then pointers within the task structure
-- * can be stale and must not be dereferenced.
-- */
--static inline int pid_alive(const struct task_struct *p)
--{
-- return p->pids[PIDTYPE_PID].pid != NULL;
--}
--
--/**
- * is_global_init - check if a task structure is init
- * @tsk: Task structure to be checked.
- *
-@@ -1953,6 +2071,25 @@ extern u64 sched_clock_cpu(int cpu);
-
- extern void sched_clock_init(void);
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+static inline void populate_stack(void)
-+{
-+ struct task_struct *curtask = current;
-+ int c;
-+ int *ptr = curtask->stack;
-+ int *end = curtask->stack + THREAD_SIZE;
-+
-+ while (ptr < end) {
-+ c = *(volatile int *)ptr;
-+ ptr += PAGE_SIZE/sizeof(int);
-+ }
-+}
-+#else
-+static inline void populate_stack(void)
-+{
-+}
-+#endif
-+
- #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
- static inline void sched_clock_tick(void)
- {
-@@ -2116,7 +2253,9 @@ void yield(void);
- extern struct exec_domain default_exec_domain;
-
- union thread_union {
-+#ifndef CONFIG_X86
- struct thread_info thread_info;
-+#endif
- unsigned long stack[THREAD_SIZE/sizeof(long)];
- };
-
-@@ -2149,6 +2288,7 @@ extern struct pid_namespace init_pid_ns;
- */
-
- extern struct task_struct *find_task_by_vpid(pid_t nr);
-+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
- extern struct task_struct *find_task_by_pid_ns(pid_t nr,
- struct pid_namespace *ns);
-
-@@ -2270,6 +2410,12 @@ static inline void mmdrop(struct mm_struct * mm)
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
- extern struct mm_struct *get_task_mm(struct task_struct *task);
-+/*
-+ * Grab a reference to a task's mm, if it is not already going away
-+ * and ptrace_may_access with the mode parameter passed to it
-+ * succeeds.
-+ */
-+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
- /* Remove the current tasks stale references to the old mm_struct */
- extern void mm_release(struct task_struct *, struct mm_struct *);
- /* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2286,9 +2432,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
- extern void exit_itimers(struct signal_struct *);
- extern void flush_itimer_signals(void);
-
--extern NORET_TYPE void do_group_exit(int);
-+extern __noreturn void do_group_exit(int);
-
--extern void daemonize(const char *, ...);
- extern int allow_signal(int);
- extern int disallow_signal(int);
-
-@@ -2451,9 +2596,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
-
- #endif
-
--static inline int object_is_on_stack(void *obj)
-+static inline int object_starts_on_stack(const void *obj)
- {
-- void *stack = task_stack_page(current);
-+ const void *stack = task_stack_page(current);
-
- return (obj >= stack) && (obj < (stack + THREAD_SIZE));
- }
-diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
-index 899fbb4..1cb4138 100644
---- a/include/linux/screen_info.h
-+++ b/include/linux/screen_info.h
-@@ -43,7 +43,8 @@ struct screen_info {
- __u16 pages; /* 0x32 */
- __u16 vesa_attributes; /* 0x34 */
- __u32 capabilities; /* 0x36 */
-- __u8 _reserved[6]; /* 0x3a */
-+ __u16 vesapm_size; /* 0x3a */
-+ __u8 _reserved[4]; /* 0x3c */
- } __attribute__((packed));
-
- #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
-diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
-index cc7a4e9..306733e 100644
---- a/include/linux/seccomp.h
-+++ b/include/linux/seccomp.h
-@@ -1,25 +1,89 @@
- #ifndef _LINUX_SECCOMP_H
- #define _LINUX_SECCOMP_H
-
-+#include <linux/compiler.h>
-+#include <linux/types.h>
-
-+
-+/* Valid values for seccomp.mode and prctl(PR_SET_SECCOMP, <mode>) */
-+#define SECCOMP_MODE_DISABLED 0 /* seccomp is not in use. */
-+#define SECCOMP_MODE_STRICT 1 /* uses hard-coded filter. */
-+#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
-+
-+/*
-+ * All BPF programs must return a 32-bit value.
-+ * The bottom 16-bits are for optional return data.
-+ * The upper 16-bits are ordered from least permissive values to most.
-+ *
-+ * The ordering ensures that a min_t() over composed return values always
-+ * selects the least permissive choice.
-+ */
-+#define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
-+#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
-+#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
-+#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
-+#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
-+
-+/* Masks for the return value sections. */
-+#define SECCOMP_RET_ACTION 0xffff0000U
-+#define SECCOMP_RET_DATA 0x0000ffffU
-+
-+/**
-+ * struct seccomp_data - the format the BPF program executes over.
-+ * @nr: the system call number
-+ * @arch: indicates system call convention as an AUDIT_ARCH_* value
-+ * as defined in <linux/audit.h>.
-+ * @instruction_pointer: at the time of the system call.
-+ * @args: up to 6 system call arguments always stored as 64-bit values
-+ * regardless of the architecture.
-+ */
-+struct seccomp_data {
-+ int nr;
-+ __u32 arch;
-+ __u64 instruction_pointer;
-+ __u64 args[6];
-+};
-+
-+#ifdef __KERNEL__
- #ifdef CONFIG_SECCOMP
-
- #include <linux/thread_info.h>
- #include <asm/seccomp.h>
-
--typedef struct { int mode; } seccomp_t;
-+struct seccomp_filter;
-+/**
-+ * struct seccomp - the state of a seccomp'ed process
-+ *
-+ * @mode: indicates one of the valid values above for controlled
-+ * system calls available to a process.
-+ * @filter: The metadata and ruleset for determining what system calls
-+ * are allowed for a task.
-+ *
-+ * @filter must only be accessed from the context of current as there
-+ * is no locking.
-+ */
-+struct seccomp {
-+ int mode;
-+ struct seccomp_filter *filter;
-+};
-
--extern void __secure_computing(int);
--static inline void secure_computing(int this_syscall)
-+/*
-+ * Direct callers to __secure_computing should be updated as
-+ * CONFIG_HAVE_ARCH_SECCOMP_FILTER propagates.
-+ */
-+extern void __secure_computing(int) __deprecated;
-+extern int __secure_computing_int(int);
-+static inline int secure_computing(int this_syscall)
- {
- if (unlikely(test_thread_flag(TIF_SECCOMP)))
-- __secure_computing(this_syscall);
-+ return __secure_computing_int(this_syscall);
-+ return 0;
- }
-
- extern long prctl_get_seccomp(void);
--extern long prctl_set_seccomp(unsigned long);
-+extern long prctl_set_seccomp(unsigned long, char __user *);
-
--static inline int seccomp_mode(seccomp_t *s)
-+static inline int seccomp_mode(struct seccomp *s)
- {
- return s->mode;
- }
-@@ -28,25 +92,40 @@ static inline int seccomp_mode(seccomp_t *s)
-
- #include <linux/errno.h>
-
--typedef struct { } seccomp_t;
-+struct seccomp { };
-+struct seccomp_filter { };
-
--#define secure_computing(x) do { } while (0)
-+#define secure_computing(x) 0
-
- static inline long prctl_get_seccomp(void)
- {
- return -EINVAL;
- }
-
--static inline long prctl_set_seccomp(unsigned long arg2)
-+static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
- {
- return -EINVAL;
- }
-
--static inline int seccomp_mode(seccomp_t *s)
-+static inline int seccomp_mode(struct seccomp *s)
- {
- return 0;
- }
--
- #endif /* CONFIG_SECCOMP */
-
-+#ifdef CONFIG_SECCOMP_FILTER
-+extern void put_seccomp_filter(struct task_struct *tsk);
-+extern void get_seccomp_filter(struct task_struct *tsk);
-+extern u32 seccomp_bpf_load(int off);
-+#else /* CONFIG_SECCOMP_FILTER */
-+static inline void put_seccomp_filter(struct task_struct *tsk)
-+{
-+ return;
-+}
-+static inline void get_seccomp_filter(struct task_struct *tsk)
-+{
-+ return;
-+}
-+#endif /* CONFIG_SECCOMP_FILTER */
-+#endif /* __KERNEL__ */
- #endif /* _LINUX_SECCOMP_H */
-diff --git a/include/linux/security.h b/include/linux/security.h
-index e8c619d..97e49a6 100644
---- a/include/linux/security.h
-+++ b/include/linux/security.h
-@@ -37,6 +37,7 @@
- #include <linux/xfrm.h>
- #include <linux/slab.h>
- #include <linux/xattr.h>
-+#include <linux/grsecurity.h>
- #include <net/flow.h>
-
- /* Maximum number of letters for an LSM name string */
-@@ -98,8 +99,6 @@ struct seq_file;
- extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
- extern int cap_netlink_recv(struct sk_buff *skb, int cap);
-
--void reset_security_ops(void);
--
- #ifdef CONFIG_MMU
- extern unsigned long mmap_min_addr;
- extern unsigned long dac_mmap_min_addr;
-@@ -130,6 +129,7 @@ struct request_sock;
- #define LSM_UNSAFE_SHARE 1
- #define LSM_UNSAFE_PTRACE 2
- #define LSM_UNSAFE_PTRACE_CAP 4
-+#define LSM_UNSAFE_NO_NEW_PRIVS 8
-
- #ifdef CONFIG_MMU
- /*
-@@ -1654,7 +1654,7 @@ struct security_operations {
- struct audit_context *actx);
- void (*audit_rule_free) (void *lsmrule);
- #endif /* CONFIG_AUDIT */
--};
-+} __randomize_layout;
-
- /* prototypes */
- extern int security_init(void);
-@@ -1676,6 +1676,8 @@ int security_capset(struct cred *new, const struct cred *old,
- const kernel_cap_t *permitted);
- int security_capable(struct user_namespace *ns, const struct cred *cred,
- int cap);
-+int security_capable_noaudit(struct user_namespace *ns, const struct cred *cred,
-+ int cap);
- int security_real_capable(struct task_struct *tsk, struct user_namespace *ns,
- int cap);
- int security_real_capable_noaudit(struct task_struct *tsk,
-@@ -1880,6 +1882,12 @@ static inline int security_capable(struct user_namespace *ns,
- return cap_capable(current, cred, ns, cap, SECURITY_CAP_AUDIT);
- }
-
-+static inline int security_capable_noaudit(struct user_namespace *ns,
-+ const struct cred *cred, int cap)
-+{
-+ return cap_capable(current, cred, ns, cap, SECURITY_CAP_NOAUDIT);
-+}
-+
- static inline int security_real_capable(struct task_struct *tsk, struct user_namespace *ns, int cap)
- {
- int ret;
-diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
-index dc368b8..e895209 100644
---- a/include/linux/semaphore.h
-+++ b/include/linux/semaphore.h
-@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
- }
-
- extern void down(struct semaphore *sem);
--extern int __must_check down_interruptible(struct semaphore *sem);
-+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
- extern int __must_check down_killable(struct semaphore *sem);
- extern int __must_check down_trylock(struct semaphore *sem);
- extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
-diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
-index 0b69a46..39a6b09 100644
---- a/include/linux/seq_file.h
-+++ b/include/linux/seq_file.h
-@@ -24,6 +24,9 @@ struct seq_file {
- struct mutex lock;
- const struct seq_operations *op;
- int poll_event;
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ u64 exec_id;
-+#endif
- void *private;
- };
-
-@@ -33,6 +36,7 @@ struct seq_operations {
- void * (*next) (struct seq_file *m, void *v, loff_t *pos);
- int (*show) (struct seq_file *m, void *v);
- };
-+typedef struct seq_operations __no_const seq_operations_no_const;
-
- #define SEQ_SKIP 1
-
-@@ -76,6 +80,7 @@ static inline void seq_commit(struct seq_file *m, int num)
-
- char *mangle_path(char *s, char *p, char *esc);
- int seq_open(struct file *, const struct seq_operations *);
-+int seq_open_restrict(struct file *, const struct seq_operations *);
- ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
- loff_t seq_lseek(struct file *, loff_t, int);
- int seq_release(struct inode *, struct file *);
-@@ -117,6 +122,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
- }
-
- int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
-+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
- int single_release(struct inode *, struct file *);
- void *__seq_open_private(struct file *, const struct seq_operations *, int);
- int seq_open_private(struct file *, const struct seq_operations *, int);
-diff --git a/include/linux/shm.h b/include/linux/shm.h
-index 92808b8..c28cac4 100644
---- a/include/linux/shm.h
-+++ b/include/linux/shm.h
-@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
-
- /* The task created the shm object. NULL if the task is dead. */
- struct task_struct *shm_creator;
-+#ifdef CONFIG_GRKERNSEC
-+ time_t shm_createtime;
-+ pid_t shm_lapid;
-+#endif
- };
-
- /* shm_mode upper byte flags */
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index 1b4ea29..9347e29 100644
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -538,7 +538,7 @@ extern void consume_skb(struct sk_buff *skb);
- extern void __kfree_skb(struct sk_buff *skb);
- extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int fclone, int node);
--static inline struct sk_buff *alloc_skb(unsigned int size,
-+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
- gfp_t priority)
- {
- return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -650,7 +650,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
- */
- static inline int skb_queue_empty(const struct sk_buff_head *list)
- {
-- return list->next == (struct sk_buff *)list;
-+ return list->next == (const struct sk_buff *)list;
- }
-
- /**
-@@ -663,7 +663,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
- static inline bool skb_queue_is_last(const struct sk_buff_head *list,
- const struct sk_buff *skb)
- {
-- return skb->next == (struct sk_buff *)list;
-+ return skb->next == (const struct sk_buff *)list;
- }
-
- /**
-@@ -676,7 +676,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
- static inline bool skb_queue_is_first(const struct sk_buff_head *list,
- const struct sk_buff *skb)
- {
-- return skb->prev == (struct sk_buff *)list;
-+ return skb->prev == (const struct sk_buff *)list;
- }
-
- /**
-@@ -1516,7 +1516,7 @@ static inline u32 skb_network_header_len(const struct sk_buff *skb)
- return skb->transport_header - skb->network_header;
- }
-
--static inline int skb_network_offset(const struct sk_buff *skb)
-+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
- {
- return skb_network_header(skb) - skb->data;
- }
-@@ -1571,7 +1571,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
- * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
- */
- #ifndef NET_SKB_PAD
--#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
-+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
- #endif
-
- extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2126,7 +2126,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
- int noblock, int *err);
- extern unsigned int datagram_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
--extern int skb_copy_datagram_iovec(const struct sk_buff *from,
-+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
- int offset, struct iovec *to,
- int size);
- extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-@@ -2408,6 +2408,9 @@ static inline void nf_reset(struct sk_buff *skb)
- nf_bridge_put(skb->nf_bridge);
- skb->nf_bridge = NULL;
- #endif
-+#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
-+ skb->nf_trace = 0;
-+#endif
- }
-
- static inline void nf_reset_trace(struct sk_buff *skb)
-diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 67d5d94..c51ce65 100644
---- a/include/linux/slab.h
-+++ b/include/linux/slab.h
-@@ -11,14 +11,29 @@
-
- #include <linux/gfp.h>
- #include <linux/types.h>
-+#include <linux/err.h>
-
- /*
- * Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
- */
- #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
-+#else
-+#define SLAB_USERCOPY 0x00000000UL
-+#endif
-+
- #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
- #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
-+#else
-+#define SLAB_NO_SANITIZE 0x00000000UL
-+#endif
-+
- #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
- #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
- #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-@@ -87,10 +102,27 @@
- * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
- * Both make kfree a no-op.
- */
--#define ZERO_SIZE_PTR ((void *)16)
-+#define ZERO_SIZE_PTR \
-+({ \
-+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
-+ (void *)(-MAX_ERRNO-1L); \
-+})
-
--#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
-- (unsigned long)ZERO_SIZE_PTR)
-+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+#ifdef CONFIG_X86_64
-+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
-+#else
-+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
-+#endif
-+enum pax_sanitize_mode {
-+ PAX_SANITIZE_SLAB_OFF = 0,
-+ PAX_SANITIZE_SLAB_FAST,
-+ PAX_SANITIZE_SLAB_FULL,
-+};
-+extern enum pax_sanitize_mode pax_sanitize_slab;
-+#endif
-
- /*
- * struct kmem_cache related prototypes
-@@ -161,6 +193,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
- void kfree(const void *);
- void kzfree(const void *);
- size_t ksize(const void *);
-+const char *check_heap_object(const void *ptr, unsigned long n);
-+bool is_usercopy_object(const void *ptr);
-
- /*
- * Allocator specific definitions. These are mainly used to establish optimized
-diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
-index d00e0ba..e075bd20 100644
---- a/include/linux/slab_def.h
-+++ b/include/linux/slab_def.h
-@@ -68,10 +68,14 @@ struct kmem_cache {
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
-- atomic_t allochit;
-- atomic_t allocmiss;
-- atomic_t freehit;
-- atomic_t freemiss;
-+ atomic_unchecked_t allochit;
-+ atomic_unchecked_t allocmiss;
-+ atomic_unchecked_t freehit;
-+ atomic_unchecked_t freemiss;
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ atomic_unchecked_t sanitized;
-+ atomic_unchecked_t not_sanitized;
-+#endif
-
- /*
- * If debugging is enabled, then the allocator can add additional
-@@ -105,6 +109,11 @@ struct cache_sizes {
- #ifdef CONFIG_ZONE_DMA
- struct kmem_cache *cs_dmacachep;
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ struct kmem_cache *cs_usercopycachep;
-+#endif
-+
- };
- extern struct cache_sizes malloc_sizes[];
-
-@@ -152,6 +161,13 @@ found:
- cachep = malloc_sizes[i].cs_dmacachep;
- else
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ if (flags & GFP_USERCOPY)
-+ cachep = malloc_sizes[i].cs_usercopycachep;
-+ else
-+#endif
-+
- cachep = malloc_sizes[i].cs_cachep;
-
- ret = kmem_cache_alloc_trace(size, cachep, flags);
-@@ -181,6 +197,7 @@ kmem_cache_alloc_node_trace(size_t size,
- }
- #endif
-
-+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
- static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
- {
- struct kmem_cache *cachep;
-@@ -205,6 +222,13 @@ found:
- cachep = malloc_sizes[i].cs_dmacachep;
- else
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ if (flags & GFP_USERCOPY)
-+ cachep = malloc_sizes[i].cs_usercopycachep;
-+ else
-+#endif
-+
- cachep = malloc_sizes[i].cs_cachep;
-
- return kmem_cache_alloc_node_trace(size, cachep, flags, node);
-diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index a32bcfd..f0246c3 100644
---- a/include/linux/slub_def.h
-+++ b/include/linux/slub_def.h
-@@ -89,7 +89,7 @@ struct kmem_cache {
- struct kmem_cache_order_objects max;
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
-- int refcount; /* Refcount for slab cache destroy */
-+ atomic_t refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- int inuse; /* Offset to metadata */
- int align; /* Alignment */
-@@ -150,7 +150,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
- * Sorry that the following has to be that ugly but some versions of GCC
- * have trouble with constant propagation and loops.
- */
--static __always_inline int kmalloc_index(size_t size)
-+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
- {
- if (!size)
- return 0;
-@@ -215,9 +215,9 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
- }
-
- void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
--void *__kmalloc(size_t size, gfp_t flags);
-+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
-
--static __always_inline void *
-+static __always_inline __size_overflow(1) void *
- kmalloc_order(size_t size, gfp_t flags, unsigned int order)
- {
- void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
-diff --git a/include/linux/sonet.h b/include/linux/sonet.h
-index de8832d..0147b46 100644
---- a/include/linux/sonet.h
-+++ b/include/linux/sonet.h
-@@ -61,7 +61,7 @@ struct sonet_stats {
- #include <linux/atomic.h>
-
- struct k_sonet_stats {
--#define __HANDLE_ITEM(i) atomic_t i
-+#define __HANDLE_ITEM(i) atomic_unchecked_t i
- __SONET_ITEMS
- #undef __HANDLE_ITEM
- };
-diff --git a/include/linux/stddef.h b/include/linux/stddef.h
-index 6a40c76..1747b67 100644
---- a/include/linux/stddef.h
-+++ b/include/linux/stddef.h
-@@ -3,14 +3,10 @@
-
- #include <linux/compiler.h>
-
-+#ifdef __KERNEL__
-+
- #undef NULL
--#if defined(__cplusplus)
--#define NULL 0
--#else
- #define NULL ((void *)0)
--#endif
--
--#ifdef __KERNEL__
-
- enum {
- false = 0,
-diff --git a/include/linux/string.h b/include/linux/string.h
-index 8515a4d..3d9feb7 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -133,7 +133,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
- #endif
-
- extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
-- const void *from, size_t available);
-+ const void *from, size_t available);
-
- /**
- * strstarts - does @str start with @prefix?
-diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
-index 3d8f9c4..349a695 100644
---- a/include/linux/sunrpc/clnt.h
-+++ b/include/linux/sunrpc/clnt.h
-@@ -98,7 +98,7 @@ struct rpc_procinfo {
- unsigned int p_timer; /* Which RTT timer to use */
- u32 p_statidx; /* Which procedure to account */
- char * p_name; /* name of procedure */
--};
-+} __do_const;
-
- #ifdef __KERNEL__
-
-@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
- {
- switch (sap->sa_family) {
- case AF_INET:
-- return ntohs(((struct sockaddr_in *)sap)->sin_port);
-+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
- case AF_INET6:
-- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
-+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
- }
- return 0;
- }
-@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
- static inline bool __rpc_copy_addr4(struct sockaddr *dst,
- const struct sockaddr *src)
- {
-- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
-+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
- struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
-
- dsin->sin_family = ssin->sin_family;
-@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
- if (sa->sa_family != AF_INET6)
- return 0;
-
-- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
-+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
- }
-
- #endif /* __KERNEL__ */
-diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
-index 35b37b1..c39eab4 100644
---- a/include/linux/sunrpc/svc.h
-+++ b/include/linux/sunrpc/svc.h
-@@ -408,7 +408,7 @@ struct svc_procedure {
- unsigned int pc_count; /* call count */
- unsigned int pc_cachetype; /* cache info (NFS) */
- unsigned int pc_xdrressize; /* maximum size of XDR reply */
--};
-+} __do_const;
-
- /*
- * Function prototypes.
-diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
-index c14fe86..d04f36c 100644
---- a/include/linux/sunrpc/svc_rdma.h
-+++ b/include/linux/sunrpc/svc_rdma.h
-@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
- extern unsigned int svcrdma_max_requests;
- extern unsigned int svcrdma_max_req_size;
-
--extern atomic_t rdma_stat_recv;
--extern atomic_t rdma_stat_read;
--extern atomic_t rdma_stat_write;
--extern atomic_t rdma_stat_sq_starve;
--extern atomic_t rdma_stat_rq_starve;
--extern atomic_t rdma_stat_rq_poll;
--extern atomic_t rdma_stat_rq_prod;
--extern atomic_t rdma_stat_sq_poll;
--extern atomic_t rdma_stat_sq_prod;
-+extern atomic_unchecked_t rdma_stat_recv;
-+extern atomic_unchecked_t rdma_stat_read;
-+extern atomic_unchecked_t rdma_stat_write;
-+extern atomic_unchecked_t rdma_stat_sq_starve;
-+extern atomic_unchecked_t rdma_stat_rq_starve;
-+extern atomic_unchecked_t rdma_stat_rq_poll;
-+extern atomic_unchecked_t rdma_stat_rq_prod;
-+extern atomic_unchecked_t rdma_stat_sq_poll;
-+extern atomic_unchecked_t rdma_stat_sq_prod;
-
- #define RPCRDMA_VERSION 1
-
-@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
- if (wr_ary) {
- rp_ary = (struct rpcrdma_write_array *)
- &wr_ary->
-- wc_array[wr_ary->wc_nchunks].wc_target.rs_length;
-+ wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
- goto found_it;
- }
-diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
-index 25d333c..a722ca0 100644
---- a/include/linux/sunrpc/svcauth.h
-+++ b/include/linux/sunrpc/svcauth.h
-@@ -100,7 +100,7 @@ struct auth_ops {
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
--};
-+} __do_const;
-
- #define SVC_GARBAGE 1
- #define SVC_SYSERR 2
-diff --git a/include/linux/swab.h b/include/linux/swab.h
-index ea0c02f..0eed39d 100644
---- a/include/linux/swab.h
-+++ b/include/linux/swab.h
-@@ -43,7 +43,7 @@
- * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
- */
-
--static inline __attribute_const__ __u16 __fswab16(__u16 val)
-+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
- {
- #ifdef __arch_swab16
- return __arch_swab16(val);
-@@ -52,7 +52,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
- #endif
- }
-
--static inline __attribute_const__ __u32 __fswab32(__u32 val)
-+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
- {
- #ifdef __arch_swab32
- return __arch_swab32(val);
-@@ -61,7 +61,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
- #endif
- }
-
--static inline __attribute_const__ __u64 __fswab64(__u64 val)
-+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
- {
- #ifdef __arch_swab64
- return __arch_swab64(val);
-diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 86a24b11..a84e6d1 100644
---- a/include/linux/syscalls.h
-+++ b/include/linux/syscalls.h
-@@ -83,12 +83,19 @@ struct file_handle;
- #define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__)
- #define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__)
-
--#define __SC_LONG1(t1, a1) long a1
--#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__)
--#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__)
--#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__)
--#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__)
--#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__)
-+#define __SC_TYPE(t, a) __typeof( \
-+ __builtin_choose_expr( \
-+ sizeof(t) > sizeof(int), \
-+ (t) 0, \
-+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
-+ )) a
-+
-+#define __SC_LONG1(t1, a1) __SC_TYPE(t1, a1)
-+#define __SC_LONG2(t2, a2, ...) __SC_TYPE(t2, a2), __SC_LONG1(__VA_ARGS__)
-+#define __SC_LONG3(t3, a3, ...) __SC_TYPE(t3, a3), __SC_LONG2(__VA_ARGS__)
-+#define __SC_LONG4(t4, a4, ...) __SC_TYPE(t4, a4), __SC_LONG3(__VA_ARGS__)
-+#define __SC_LONG5(t5, a5, ...) __SC_TYPE(t5, a5), __SC_LONG4(__VA_ARGS__)
-+#define __SC_LONG6(t6, a6, ...) __SC_TYPE(t6, a6), __SC_LONG5(__VA_ARGS__)
-
- #define __SC_CAST1(t1, a1) (t1) a1
- #define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__)
-@@ -392,11 +399,11 @@ asmlinkage long sys_sync(void);
- asmlinkage long sys_fsync(unsigned int fd);
- asmlinkage long sys_fdatasync(unsigned int fd);
- asmlinkage long sys_bdflush(int func, long data);
--asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
-- char __user *type, unsigned long flags,
-+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
-+ const char __user *type, unsigned long flags,
- void __user *data);
--asmlinkage long sys_umount(char __user *name, int flags);
--asmlinkage long sys_oldumount(char __user *name);
-+asmlinkage long sys_umount(const char __user *name, int flags);
-+asmlinkage long sys_oldumount(const char __user *name);
- asmlinkage long sys_truncate(const char __user *path, long length);
- asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
- asmlinkage long sys_stat(const char __user *filename,
-@@ -608,7 +615,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
- asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
- asmlinkage long sys_send(int, void __user *, size_t, unsigned);
- asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
-- struct sockaddr __user *, int);
-+ struct sockaddr __user *, int) __intentional_overflow(0);
- asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
- asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
- unsigned int vlen, unsigned flags);
-@@ -667,10 +674,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
-
- asmlinkage long sys_semget(key_t key, int nsems, int semflg);
- asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
-- unsigned nsops);
-+ long nsops);
- asmlinkage long sys_semctl(int semid, int semnum, int cmd, union semun arg);
- asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
-- unsigned nsops,
-+ long nsops,
- const struct timespec __user *timeout);
- asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
- asmlinkage long sys_shmget(key_t key, size_t size, int flag);
-diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
-index 27b3b0b..e093dd9 100644
---- a/include/linux/syscore_ops.h
-+++ b/include/linux/syscore_ops.h
-@@ -16,7 +16,7 @@ struct syscore_ops {
- int (*suspend)(void);
- void (*resume)(void);
- void (*shutdown)(void);
--};
-+} __do_const;
-
- extern void register_syscore_ops(struct syscore_ops *ops);
- extern void unregister_syscore_ops(struct syscore_ops *ops);
-diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
-index 703cfa33..98e3375 100644
---- a/include/linux/sysctl.h
-+++ b/include/linux/sysctl.h
-@@ -155,8 +155,6 @@ enum
- KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
- };
-
--
--
- /* CTL_VM names: */
- enum
- {
-@@ -961,17 +959,21 @@ extern void sysctl_head_finish(struct ctl_table_header *prev);
- extern int sysctl_perm(struct ctl_table_root *root,
- struct ctl_table *table, int op);
-
--typedef struct ctl_table ctl_table;
--
- typedef int proc_handler (struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
- extern int proc_dostring(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-+extern int proc_dostring_modpriv(struct ctl_table *, int,
-+ void __user *, size_t *, loff_t *);
- extern int proc_dointvec(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-+extern int proc_dointvec_secure(struct ctl_table *, int,
-+ void __user *, size_t *, loff_t *);
- extern int proc_dointvec_minmax(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-+extern int proc_dointvec_minmax_secure(struct ctl_table *, int,
-+ void __user *, size_t *, loff_t *);
- extern int proc_dointvec_jiffies(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
- extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
-@@ -1045,7 +1047,9 @@ struct ctl_table
- struct ctl_table_poll *poll;
- void *extra1;
- void *extra2;
--};
-+} __do_const __randomize_layout;
-+typedef struct ctl_table __no_const ctl_table_no_const;
-+typedef struct ctl_table ctl_table;
-
- struct ctl_table_root {
- struct list_head root_list;
-diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
-index 20f63d3..fdd3cbb 100644
---- a/include/linux/sysdev.h
-+++ b/include/linux/sysdev.h
-@@ -98,7 +98,7 @@ struct sysdev_attribute {
- ssize_t (*store)(struct sys_device *, struct sysdev_attribute *,
- const char *, size_t);
- };
--
-+typedef struct sysdev_attribute __no_const sysdev_attribute_no_const;
-
- #define _SYSDEV_ATTR(_name, _mode, _show, _store) \
- { \
-diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
-index 2b9cd8d..b8a7592 100644
---- a/include/linux/sysfs.h
-+++ b/include/linux/sysfs.h
-@@ -30,7 +30,8 @@ struct attribute {
- struct lock_class_key *key;
- struct lock_class_key skey;
- #endif
--};
-+} __do_const;
-+typedef struct attribute __no_const attribute_no_const;
-
- /**
- * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
-@@ -58,8 +59,8 @@ struct attribute_group {
- mode_t (*is_visible)(struct kobject *,
- struct attribute *, int);
- struct attribute **attrs;
--};
--
-+} __do_const;
-+typedef struct attribute_group __no_const attribute_group_no_const;
-
-
- /**
-@@ -104,7 +105,8 @@ struct bin_attribute {
- char *, loff_t, size_t);
- int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
- struct vm_area_struct *vma);
--};
-+} __do_const;
-+typedef struct bin_attribute __no_const bin_attribute_no_const;
-
- /**
- * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
-diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
-index 7faf933..9b85a0c 100644
---- a/include/linux/sysrq.h
-+++ b/include/linux/sysrq.h
-@@ -16,6 +16,7 @@
-
- #include <linux/errno.h>
- #include <linux/types.h>
-+#include <linux/compiler.h>
-
- /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
- #define SYSRQ_DEFAULT_ENABLE 1
-@@ -36,7 +37,7 @@ struct sysrq_key_op {
- char *help_msg;
- char *action_msg;
- int enable_mask;
--};
-+} __do_const;
-
- #ifdef CONFIG_MAGIC_SYSRQ
-
-diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index 8d03f07..995ab36 100644
---- a/include/linux/thread_info.h
-+++ b/include/linux/thread_info.h
-@@ -123,6 +123,13 @@ static inline void set_restore_sigmask(void)
- }
- #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
-
-+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
-+
-+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
-+{
-+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
-+}
-+
- #endif /* __KERNEL__ */
-
- #endif /* _LINUX_THREAD_INFO_H */
-diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
-index a71a292..51bd91d 100644
---- a/include/linux/tracehook.h
-+++ b/include/linux/tracehook.h
-@@ -54,12 +54,12 @@ struct linux_binprm;
- /*
- * ptrace report for syscall entry and exit looks identical.
- */
--static inline void ptrace_report_syscall(struct pt_regs *regs)
-+static inline int ptrace_report_syscall(struct pt_regs *regs)
- {
- int ptrace = current->ptrace;
-
- if (!(ptrace & PT_PTRACED))
-- return;
-+ return 0;
-
- ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
-
-@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-+
-+ return fatal_signal_pending(current);
- }
-
- /**
-@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
- static inline __must_check int tracehook_report_syscall_entry(
- struct pt_regs *regs)
- {
-- ptrace_report_syscall(regs);
-- return 0;
-+ return ptrace_report_syscall(regs);
- }
-
- /**
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 5dbb3cb..a192962 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -331,7 +331,7 @@ struct tty_struct {
- /* If the tty has a pending do_SAK, queue it here - akpm */
- struct work_struct SAK_work;
- struct tty_port *port;
--};
-+} __randomize_layout;
-
- /* Each of a tty's open files has private_data pointing to tty_file_private */
- struct tty_file_private {
-diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
-index ecdaeb9..e1484a7 100644
---- a/include/linux/tty_driver.h
-+++ b/include/linux/tty_driver.h
-@@ -286,7 +286,7 @@ struct tty_operations {
- void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
- #endif
- const struct file_operations *proc_fops;
--};
-+} __do_const __randomize_layout;
-
- struct tty_driver {
- int magic; /* magic number for this structure */
-@@ -321,7 +321,7 @@ struct tty_driver {
-
- const struct tty_operations *ops;
- struct list_head tty_drivers;
--};
-+} __randomize_layout;
-
- extern struct list_head tty_drivers;
-
-diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
-index ff7dc08..893e1bd 100644
---- a/include/linux/tty_ldisc.h
-+++ b/include/linux/tty_ldisc.h
-@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
-
- struct module *owner;
-
-- int refcount;
-+ atomic_t refcount;
- };
-
- struct tty_ldisc {
-diff --git a/include/linux/types.h b/include/linux/types.h
-index 57a9723..dbe234a 100644
---- a/include/linux/types.h
-+++ b/include/linux/types.h
-@@ -213,10 +213,26 @@ typedef struct {
- int counter;
- } atomic_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ int counter;
-+} atomic_unchecked_t;
-+#else
-+typedef atomic_t atomic_unchecked_t;
-+#endif
-+
- #ifdef CONFIG_64BIT
- typedef struct {
- long counter;
- } atomic64_t;
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ long counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
- #endif
-
- struct list_head {
-diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
-index 5ca0951..ab496a5 100644
---- a/include/linux/uaccess.h
-+++ b/include/linux/uaccess.h
-@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
- long ret; \
- mm_segment_t old_fs = get_fs(); \
- \
-- set_fs(KERNEL_DS); \
- pagefault_disable(); \
-- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
-- pagefault_enable(); \
-+ set_fs(KERNEL_DS); \
-+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
- set_fs(old_fs); \
-+ pagefault_enable(); \
- ret; \
- })
-
-diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
-index 99c1b4d..562e6f3 100644
---- a/include/linux/unaligned/access_ok.h
-+++ b/include/linux/unaligned/access_ok.h
-@@ -4,34 +4,34 @@
- #include <linux/kernel.h>
- #include <asm/byteorder.h>
-
--static inline u16 get_unaligned_le16(const void *p)
-+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
- {
-- return le16_to_cpup((__le16 *)p);
-+ return le16_to_cpup((const __le16 *)p);
- }
-
--static inline u32 get_unaligned_le32(const void *p)
-+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
- {
-- return le32_to_cpup((__le32 *)p);
-+ return le32_to_cpup((const __le32 *)p);
- }
-
--static inline u64 get_unaligned_le64(const void *p)
-+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
- {
-- return le64_to_cpup((__le64 *)p);
-+ return le64_to_cpup((const __le64 *)p);
- }
-
--static inline u16 get_unaligned_be16(const void *p)
-+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
- {
-- return be16_to_cpup((__be16 *)p);
-+ return be16_to_cpup((const __be16 *)p);
- }
-
--static inline u32 get_unaligned_be32(const void *p)
-+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
- {
-- return be32_to_cpup((__be32 *)p);
-+ return be32_to_cpup((const __be32 *)p);
- }
-
--static inline u64 get_unaligned_be64(const void *p)
-+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
- {
-- return be64_to_cpup((__be64 *)p);
-+ return be64_to_cpup((const __be64 *)p);
- }
-
- static inline void put_unaligned_le16(u16 val, void *p)
-diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 93629fc..be16802 100644
---- a/include/linux/usb.h
-+++ b/include/linux/usb.h
-@@ -497,7 +497,7 @@ struct usb_device {
- struct usb_device *children[USB_MAXCHILDREN];
-
- u32 quirks;
-- atomic_t urbnum;
-+ atomic_unchecked_t urbnum;
-
- unsigned long active_duration;
-
-@@ -1442,7 +1442,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
-
- extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
- __u8 request, __u8 requesttype, __u16 value, __u16 index,
-- void *data, __u16 size, int timeout);
-+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
- extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length, int timeout);
- extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
-diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
-index e5a40c3..d75f068 100644
---- a/include/linux/usb/renesas_usbhs.h
-+++ b/include/linux/usb/renesas_usbhs.h
-@@ -39,7 +39,7 @@ enum {
- */
- struct renesas_usbhs_driver_callback {
- int (*notify_hotplug)(struct platform_device *pdev);
--};
-+} __no_const;
-
- /*
- * callback functions for platform
-diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
-index 76f4396..3e0a4a8 100644
---- a/include/linux/usb/usbnet.h
-+++ b/include/linux/usb/usbnet.h
-@@ -33,6 +33,7 @@ struct usbnet {
- wait_queue_head_t *wait;
- struct mutex phy_mutex;
- unsigned char suspend_count;
-+ unsigned char pkt_cnt, pkt_err;
-
- /* i/o info: pipes etc */
- unsigned in, out;
-@@ -69,6 +70,8 @@ struct usbnet {
- # define EVENT_DEV_WAKING 6
- # define EVENT_DEV_ASLEEP 7
- # define EVENT_DEV_OPEN 8
-+# define EVENT_NO_RUNTIME_PM 9
-+# define EVENT_RX_KILL 10
- };
-
- static inline struct usb_driver *driver_of(struct usb_interface *intf)
-diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
-index faf4679..f4819bf 100644
---- a/include/linux/user_namespace.h
-+++ b/include/linux/user_namespace.h
-@@ -14,7 +14,7 @@ struct user_namespace {
- struct hlist_head uidhash_table[UIDHASH_SZ];
- struct user_struct *creator;
- struct work_struct destroyer;
--};
-+} __randomize_layout;
-
- extern struct user_namespace init_user_ns;
-
-diff --git a/include/linux/utsname.h b/include/linux/utsname.h
-index c714ed7..fb6d16c 100644
---- a/include/linux/utsname.h
-+++ b/include/linux/utsname.h
-@@ -52,7 +52,7 @@ struct uts_namespace {
- struct kref kref;
- struct new_utsname name;
- struct user_namespace *user_ns;
--};
-+} __randomize_layout;
- extern struct uts_namespace init_uts_ns;
-
- #ifdef CONFIG_UTS_NS
-diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
-index 6f8fbcf..4efc177 100644
---- a/include/linux/vermagic.h
-+++ b/include/linux/vermagic.h
-@@ -25,9 +25,42 @@
- #define MODULE_ARCH_VERMAGIC ""
- #endif
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define MODULE_PAX_REFCOUNT "REFCOUNT "
-+#else
-+#define MODULE_PAX_REFCOUNT ""
-+#endif
-+
-+#ifdef CONSTIFY_PLUGIN
-+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
-+#else
-+#define MODULE_CONSTIFY_PLUGIN ""
-+#endif
-+
-+#ifdef STACKLEAK_PLUGIN
-+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
-+#else
-+#define MODULE_STACKLEAK_PLUGIN ""
-+#endif
-+
-+#ifdef RANDSTRUCT_PLUGIN
-+#include <generated/randomize_layout_hash.h>
-+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
-+#else
-+#define MODULE_RANDSTRUCT_PLUGIN
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+#define MODULE_GRSEC "GRSEC "
-+#else
-+#define MODULE_GRSEC ""
-+#endif
-+
- #define VERMAGIC_STRING \
- UTS_RELEASE " " \
- MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
- MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
-- MODULE_ARCH_VERMAGIC
-+ MODULE_ARCH_VERMAGIC \
-+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
-+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
-
-diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
-index 45a7698..76e6993 100644
---- a/include/linux/videodev2.h
-+++ b/include/linux/videodev2.h
-@@ -1062,7 +1062,7 @@ struct v4l2_ext_control {
- union {
- __s32 value;
- __s64 value64;
-- char *string;
-+ char __user *string;
- };
- } __attribute__ ((packed));
-
-diff --git a/include/linux/virtio.h b/include/linux/virtio.h
-index e4807af..f12924f 100644
---- a/include/linux/virtio.h
-+++ b/include/linux/virtio.h
-@@ -90,6 +90,10 @@ static inline int virtqueue_add_buf(struct virtqueue *vq,
-
- void virtqueue_kick(struct virtqueue *vq);
-
-+bool virtqueue_kick_prepare(struct virtqueue *vq);
-+
-+void virtqueue_notify(struct virtqueue *vq);
-+
- void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
-
- void virtqueue_disable_cb(struct virtqueue *vq);
-@@ -152,6 +156,7 @@ struct virtio_driver {
- const unsigned int *feature_table;
- unsigned int feature_table_size;
- int (*probe)(struct virtio_device *dev);
-+ void (*scan)(struct virtio_device *dev);
- void (*remove)(struct virtio_device *dev);
- void (*config_changed)(struct virtio_device *dev);
- };
-diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
-index 85bb0bb..c5d8455 100644
---- a/include/linux/virtio_ids.h
-+++ b/include/linux/virtio_ids.h
-@@ -34,6 +34,7 @@
- #define VIRTIO_ID_CONSOLE 3 /* virtio console */
- #define VIRTIO_ID_RNG 4 /* virtio ring */
- #define VIRTIO_ID_BALLOON 5 /* virtio balloon */
-+#define VIRTIO_ID_SCSI 8 /* virtio scsi */
- #define VIRTIO_ID_9P 9 /* 9p virtio console */
-
- #endif /* _LINUX_VIRTIO_IDS_H */
-diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
-new file mode 100644
-index 0000000..d6b4440
---- /dev/null
-+++ b/include/linux/virtio_scsi.h
-@@ -0,0 +1,125 @@
-+#ifndef _LINUX_VIRTIO_SCSI_H
-+#define _LINUX_VIRTIO_SCSI_H
-+/* This header is BSD licensed so anyone can use the definitions to implement
-+ * compatible drivers/servers. */
-+
-+#define VIRTIO_SCSI_CDB_SIZE 32
-+#define VIRTIO_SCSI_SENSE_SIZE 96
-+
-+/* SCSI command request, followed by data-out */
-+struct virtio_scsi_cmd_req {
-+ u8 lun[8]; /* Logical Unit Number */
-+ u64 tag; /* Command identifier */
-+ u8 task_attr; /* Task attribute */
-+ u8 prio;
-+ u8 crn;
-+ u8 cdb[VIRTIO_SCSI_CDB_SIZE];
-+} __packed;
-+
-+/* Response, followed by sense data and data-in */
-+struct virtio_scsi_cmd_resp {
-+ u32 sense_len; /* Sense data length */
-+ u32 resid; /* Residual bytes in data buffer */
-+ u16 status_qualifier; /* Status qualifier */
-+ u8 status; /* Command completion status */
-+ u8 response; /* Response values */
-+ u8 sense[VIRTIO_SCSI_SENSE_SIZE];
-+} __packed;
-+
-+/* Task Management Request */
-+struct virtio_scsi_ctrl_tmf_req {
-+ u32 type;
-+ u32 subtype;
-+ u8 lun[8];
-+ u64 tag;
-+} __packed;
-+
-+struct virtio_scsi_ctrl_tmf_resp {
-+ u8 response;
-+} __packed;
-+
-+/* Asynchronous notification query/subscription */
-+struct virtio_scsi_ctrl_an_req {
-+ u32 type;
-+ u8 lun[8];
-+ u32 event_requested;
-+} __packed;
-+
-+struct virtio_scsi_ctrl_an_resp {
-+ u32 event_actual;
-+ u8 response;
-+} __packed;
-+
-+struct virtio_scsi_event {
-+ u32 event;
-+ u8 lun[8];
-+ u32 reason;
-+} __packed;
-+
-+struct virtio_scsi_config {
-+ u32 num_queues;
-+ u32 seg_max;
-+ u32 max_sectors;
-+ u32 cmd_per_lun;
-+ u32 event_info_size;
-+ u32 sense_size;
-+ u32 cdb_size;
-+ u16 max_channel;
-+ u16 max_target;
-+ u32 max_lun;
-+} __packed;
-+
-+/* Feature Bits */
-+#define VIRTIO_SCSI_F_INOUT 0
-+#define VIRTIO_SCSI_F_HOTPLUG 1
-+#define VIRTIO_SCSI_F_CHANGE 2
-+
-+/* Response codes */
-+#define VIRTIO_SCSI_S_OK 0
-+#define VIRTIO_SCSI_S_OVERRUN 1
-+#define VIRTIO_SCSI_S_ABORTED 2
-+#define VIRTIO_SCSI_S_BAD_TARGET 3
-+#define VIRTIO_SCSI_S_RESET 4
-+#define VIRTIO_SCSI_S_BUSY 5
-+#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
-+#define VIRTIO_SCSI_S_TARGET_FAILURE 7
-+#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
-+#define VIRTIO_SCSI_S_FAILURE 9
-+#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
-+#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
-+#define VIRTIO_SCSI_S_INCORRECT_LUN 12
-+
-+/* Controlq type codes. */
-+#define VIRTIO_SCSI_T_TMF 0
-+#define VIRTIO_SCSI_T_AN_QUERY 1
-+#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2
-+
-+/* Valid TMF subtypes. */
-+#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
-+#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
-+#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
-+#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
-+#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
-+#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
-+#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
-+#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
-+
-+/* Events. */
-+#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000
-+#define VIRTIO_SCSI_T_NO_EVENT 0
-+#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
-+#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
-+#define VIRTIO_SCSI_T_PARAM_CHANGE 3
-+
-+/* Reasons of transport reset event */
-+#define VIRTIO_SCSI_EVT_RESET_HARD 0
-+#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
-+#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
-+
-+#define VIRTIO_SCSI_S_SIMPLE 0
-+#define VIRTIO_SCSI_S_ORDERED 1
-+#define VIRTIO_SCSI_S_HEAD 2
-+#define VIRTIO_SCSI_S_ACA 3
-+
-+
-+#endif /* _LINUX_VIRTIO_SCSI_H */
-diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 4bde182..1eb2c432 100644
---- a/include/linux/vmalloc.h
-+++ b/include/linux/vmalloc.h
-@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
- #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
- #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
- #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
-+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
-+#endif
-+
- /* bits [20..32] reserved for arch specific ioremap internals */
-
- /*
-@@ -69,6 +74,10 @@ extern void *vmap(struct page **pages, unsigned int count,
- unsigned long flags, pgprot_t prot);
- extern void vunmap(const void *addr);
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+extern void unmap_process_stacks(struct task_struct *task);
-+#endif
-+
- extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
- unsigned long pgoff);
- void vmalloc_sync_all(void);
-@@ -124,7 +133,7 @@ extern void free_vm_area(struct vm_struct *area);
-
- /* for /dev/kmem */
- extern long vread(char *buf, char *addr, unsigned long count);
--extern long vwrite(char *buf, char *addr, unsigned long count);
-+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
-
- /*
- * Internals. Dont't use..
-diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
-index 65efb92..a90154f 100644
---- a/include/linux/vmstat.h
-+++ b/include/linux/vmstat.h
-@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
- /*
- * Zone based page accounting with per cpu differentials.
- */
--extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
-+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
-
- static inline void zone_page_state_add(long x, struct zone *zone,
- enum zone_stat_item item)
- {
-- atomic_long_add(x, &zone->vm_stat[item]);
-- atomic_long_add(x, &vm_stat[item]);
-+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
-+ atomic_long_add_unchecked(x, &vm_stat[item]);
- }
-
--static inline unsigned long global_page_state(enum zone_stat_item item)
-+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
- {
-- long x = atomic_long_read(&vm_stat[item]);
-+ long x = atomic_long_read_unchecked(&vm_stat[item]);
- #ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-@@ -106,10 +106,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
- return x;
- }
-
--static inline unsigned long zone_page_state(struct zone *zone,
-+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
- enum zone_stat_item item)
- {
-- long x = atomic_long_read(&zone->vm_stat[item]);
-+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
- #ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
- static inline unsigned long zone_page_state_snapshot(struct zone *zone,
- enum zone_stat_item item)
- {
-- long x = atomic_long_read(&zone->vm_stat[item]);
-+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
-
- #ifdef CONFIG_SMP
- int cpu;
-@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
-
- static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
- {
-- atomic_long_inc(&zone->vm_stat[item]);
-- atomic_long_inc(&vm_stat[item]);
-+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
-+ atomic_long_inc_unchecked(&vm_stat[item]);
- }
-
- static inline void __inc_zone_page_state(struct page *page,
-@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
-
- static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
- {
-- atomic_long_dec(&zone->vm_stat[item]);
-- atomic_long_dec(&vm_stat[item]);
-+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
-+ atomic_long_dec_unchecked(&vm_stat[item]);
- }
-
- static inline void __dec_zone_page_state(struct page *page,
-diff --git a/include/linux/xattr.h b/include/linux/xattr.h
-index e5d1220..5a87d07 100644
---- a/include/linux/xattr.h
-+++ b/include/linux/xattr.h
-@@ -57,6 +57,11 @@
- #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
- #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
-
-+/* User namespace */
-+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
-+#define XATTR_PAX_FLAGS_SUFFIX "flags"
-+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
-+
- #ifdef __KERNEL__
-
- #include <linux/types.h>
-@@ -73,7 +78,7 @@ struct xattr_handler {
- size_t size, int handler_flags);
- int (*set)(struct dentry *dentry, const char *name, const void *buffer,
- size_t size, int flags, int handler_flags);
--};
-+} __do_const;
-
- struct xattr {
- char *name;
-@@ -82,6 +87,9 @@ struct xattr {
- };
-
- ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+ssize_t pax_getxattr(struct dentry *, void *, size_t);
-+#endif
- ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
- ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
- int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
-diff --git a/include/linux/yam.h b/include/linux/yam.h
-index 7fe2822..512cdc2 100644
---- a/include/linux/yam.h
-+++ b/include/linux/yam.h
-@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
-
- struct yamdrv_ioctl_mcs {
- int cmd;
-- int bitrate;
-+ unsigned int bitrate;
- unsigned char bits[YAM_FPGA_SIZE];
- };
-diff --git a/include/linux/zlib.h b/include/linux/zlib.h
-index 9c5a6b4..09c9438 100644
---- a/include/linux/zlib.h
-+++ b/include/linux/zlib.h
-@@ -31,6 +31,7 @@
- #define _ZLIB_H
-
- #include <linux/zconf.h>
-+#include <linux/compiler.h>
-
- /* zlib deflate based on ZLIB_VERSION "1.1.3" */
- /* zlib inflate based on ZLIB_VERSION "1.2.3" */
-@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
-
- /* basic functions */
-
--extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
-+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
- /*
- Returns the number of bytes that needs to be allocated for a per-
- stream workspace with the specified parameters. A pointer to this
-diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
-index c7c40f1..5c31482 100644
---- a/include/media/v4l2-dev.h
-+++ b/include/media/v4l2-dev.h
-@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
-
-
- struct v4l2_file_operations {
-- struct module *owner;
-+ struct module * const owner;
- ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
- ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
-diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
-index d61febfb..f0094f6 100644
---- a/include/media/v4l2-device.h
-+++ b/include/media/v4l2-device.h
-@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
- this function returns 0. If the name ends with a digit (e.g. cx18),
- then the name will be set to cx18-0 since cx180 looks really odd. */
- int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-- atomic_t *instance);
-+ atomic_unchecked_t *instance);
-
- /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
- Since the parent disappears this ensures that v4l2_dev doesn't have an
-diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
-index 4d1c74a..8e58054 100644
---- a/include/media/v4l2-ioctl.h
-+++ b/include/media/v4l2-ioctl.h
-@@ -275,7 +275,6 @@ struct v4l2_ioctl_ops {
- bool valid_prio, int cmd, void *arg);
- };
-
--
- /* v4l debugging and diagnostics */
-
- /* Debug bitmask flags to be used on V4L2 */
-diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
-index adcbb20..62c2559 100644
---- a/include/net/9p/transport.h
-+++ b/include/net/9p/transport.h
-@@ -57,7 +57,7 @@ struct p9_trans_module {
- int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*zc_request)(struct p9_client *, struct p9_req_t *,
- char *, char *, int , int, int, int);
--};
-+} __do_const;
-
- void v9fs_register_trans(struct p9_trans_module *m);
- void v9fs_unregister_trans(struct p9_trans_module *m);
-diff --git a/include/net/af_unix.h b/include/net/af_unix.h
-index 91ab5b0..23e3e9b 100644
---- a/include/net/af_unix.h
-+++ b/include/net/af_unix.h
-@@ -30,7 +30,7 @@ struct unix_skb_parms {
- #ifdef CONFIG_SECURITY_NETWORK
- u32 secid; /* Security ID */
- #endif
--};
-+} __randomize_layout;
-
- #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
- #define UNIXSID(skb) (&UNIXCB((skb)).secid)
-diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
-index 6cc18f3..b0df15a 100644
---- a/include/net/bluetooth/l2cap.h
-+++ b/include/net/bluetooth/l2cap.h
-@@ -387,7 +387,7 @@ struct l2cap_ops {
- int (*recv) (void *data, struct sk_buff *skb);
- void (*close) (void *data);
- void (*state_change) (void *data, int state);
--};
-+} __do_const;
-
- struct l2cap_conn {
- struct hci_conn *hcon;
-diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
-index 9e5425b..8136ffc 100644
---- a/include/net/caif/cfctrl.h
-+++ b/include/net/caif/cfctrl.h
-@@ -52,7 +52,7 @@ struct cfctrl_rsp {
- void (*radioset_rsp)(void);
- void (*reject_rsp)(struct cflayer *layer, u8 linkid,
- struct cflayer *client_layer);
--};
-+} __no_const;
-
- /* Link Setup Parameters for CAIF-Links. */
- struct cfctrl_link_param {
-@@ -101,8 +101,8 @@ struct cfctrl_request_info {
- struct cfctrl {
- struct cfsrvl serv;
- struct cfctrl_rsp res;
-- atomic_t req_seq_no;
-- atomic_t rsp_seq_no;
-+ atomic_unchecked_t req_seq_no;
-+ atomic_unchecked_t rsp_seq_no;
- struct list_head list;
- /* Protects from simultaneous access to first_req list */
- spinlock_t info_list_lock;
-diff --git a/include/net/flow.h b/include/net/flow.h
-index 2a7eefdd..3250f3b 100644
---- a/include/net/flow.h
-+++ b/include/net/flow.h
-@@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
-
- extern void flow_cache_flush(void);
- extern void flow_cache_flush_deferred(void);
--extern atomic_t flow_cache_genid;
-+extern atomic_unchecked_t flow_cache_genid;
-
- #endif
-diff --git a/include/net/genetlink.h b/include/net/genetlink.h
-index 82d8d09..d1e04ff 100644
---- a/include/net/genetlink.h
-+++ b/include/net/genetlink.h
-@@ -116,7 +116,7 @@ struct genl_ops {
- struct netlink_callback *cb);
- int (*done)(struct netlink_callback *cb);
- struct list_head ops_list;
--};
-+} __do_const;
-
- extern int genl_register_family(struct genl_family *family);
- extern int genl_register_family_with_ops(struct genl_family *family,
-diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
-index ca2755f..85ec88c 100644
---- a/include/net/inet_connection_sock.h
-+++ b/include/net/inet_connection_sock.h
-@@ -61,7 +61,7 @@ struct inet_connection_sock_af_ops {
- void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
- int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb);
--};
-+} __do_const;
-
- /** inet_connection_sock - INET connection oriented sock
- *
-diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
-index 6172a44..d06d507 100644
---- a/include/net/inetpeer.h
-+++ b/include/net/inetpeer.h
-@@ -52,7 +52,7 @@ struct inet_peer {
- */
- union {
- struct {
-- atomic_t rid; /* Frag reception counter */
-+ atomic_unchecked_t rid; /* Frag reception counter */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
- };
-diff --git a/include/net/ip.h b/include/net/ip.h
-index 1ee535b..91976cb1 100644
---- a/include/net/ip.h
-+++ b/include/net/ip.h
-@@ -214,7 +214,7 @@ extern struct local_ports {
- } sysctl_local_ports;
- extern void inet_get_local_port_range(int *low, int *high);
-
--extern unsigned long *sysctl_local_reserved_ports;
-+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
- static inline int inet_is_reserved_local_port(int port)
- {
- return test_bit(port, sysctl_local_reserved_ports);
-diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
-index 6e4569f..0c8aa25 100644
---- a/include/net/ip_fib.h
-+++ b/include/net/ip_fib.h
-@@ -144,7 +144,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
-
- #define FIB_RES_SADDR(net, res) \
- ((FIB_RES_NH(res).nh_saddr_genid == \
-- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
-+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
- FIB_RES_NH(res).nh_saddr : \
- fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
- #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
-diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
-index 416dcb0..e203877 100644
---- a/include/net/ip_vs.h
-+++ b/include/net/ip_vs.h
-@@ -509,7 +509,7 @@ struct ip_vs_conn {
- struct ip_vs_conn *control; /* Master control connection */
- atomic_t n_control; /* Number of controlled ones */
- struct ip_vs_dest *dest; /* real server */
-- atomic_t in_pkts; /* incoming packet counter */
-+ atomic_unchecked_t in_pkts; /* incoming packet counter */
-
- /* packet transmitter for different forwarding methods. If it
- mangles the packet, it must return NF_DROP or better NF_STOLEN,
-@@ -647,7 +647,7 @@ struct ip_vs_dest {
- __be16 port; /* port number of the server */
- union nf_inet_addr addr; /* IP address of the server */
- volatile unsigned flags; /* dest status flags */
-- atomic_t conn_flags; /* flags to copy to conn */
-+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
- atomic_t weight; /* server weight */
-
- atomic_t refcnt; /* reference counter */
-@@ -878,11 +878,11 @@ struct netns_ipvs {
- /* ip_vs_lblc */
- int sysctl_lblc_expiration;
- struct ctl_table_header *lblc_ctl_header;
-- struct ctl_table *lblc_ctl_table;
-+ ctl_table_no_const *lblc_ctl_table;
- /* ip_vs_lblcr */
- int sysctl_lblcr_expiration;
- struct ctl_table_header *lblcr_ctl_header;
-- struct ctl_table *lblcr_ctl_table;
-+ ctl_table_no_const *lblcr_ctl_table;
- /* ip_vs_est */
- struct list_head est_list; /* estimator list */
- spinlock_t est_lock;
-diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
-index 59ba38bc..d515662 100644
---- a/include/net/irda/ircomm_tty.h
-+++ b/include/net/irda/ircomm_tty.h
-@@ -35,6 +35,7 @@
- #include <linux/termios.h>
- #include <linux/timer.h>
- #include <linux/tty.h> /* struct tty_struct */
-+#include <asm/local.h>
-
- #include <net/irda/irias_object.h>
- #include <net/irda/ircomm_core.h>
-@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
- unsigned short close_delay;
- unsigned short closing_wait; /* time to wait before closing */
-
-- int open_count;
-- int blocked_open; /* # of blocked opens */
-+ local_t open_count;
-+ local_t blocked_open; /* # of blocked opens */
-
- /* Protect concurent access to :
- * o self->open_count
-diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
-index f2419cf..473679f 100644
---- a/include/net/iucv/af_iucv.h
-+++ b/include/net/iucv/af_iucv.h
-@@ -139,7 +139,7 @@ struct iucv_sock {
- struct iucv_sock_list {
- struct hlist_head head;
- rwlock_t lock;
-- atomic_t autobind_name;
-+ atomic_unchecked_t autobind_name;
- };
-
- unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
-diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
-index df83f69..9b640b8 100644
---- a/include/net/llc_c_ac.h
-+++ b/include/net/llc_c_ac.h
-@@ -87,7 +87,7 @@
- #define LLC_CONN_AC_STOP_SENDACK_TMR 70
- #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
-
--typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
-+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
-
- extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
- extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
-index 23a4093..6d106df 100644
---- a/include/net/llc_c_ev.h
-+++ b/include/net/llc_c_ev.h
-@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
- return (struct llc_conn_state_ev *)skb->cb;
- }
-
--typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
--typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
-+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
-+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
-
- extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
- extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
-index 0e79cfb..f46db31 100644
---- a/include/net/llc_c_st.h
-+++ b/include/net/llc_c_st.h
-@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
- u8 next_state;
- llc_conn_ev_qfyr_t *ev_qualifiers;
- llc_conn_action_t *ev_actions;
--};
-+} __do_const;
-
- struct llc_conn_state {
- u8 current_state;
-diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
-index 37a3bbd..55a4241 100644
---- a/include/net/llc_s_ac.h
-+++ b/include/net/llc_s_ac.h
-@@ -23,7 +23,7 @@
- #define SAP_ACT_TEST_IND 9
-
- /* All action functions must look like this */
--typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
-+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
-
- extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
- struct sk_buff *skb);
-diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
-index 567c681..cd73ac02 100644
---- a/include/net/llc_s_st.h
-+++ b/include/net/llc_s_st.h
-@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
- llc_sap_ev_t ev;
- u8 next_state;
- llc_sap_action_t *ev_actions;
--};
-+} __do_const;
-
- struct llc_sap_state {
- u8 curr_state;
-diff --git a/include/net/mac80211.h b/include/net/mac80211.h
-index 1a6201a..66d9531 100644
---- a/include/net/mac80211.h
-+++ b/include/net/mac80211.h
-@@ -3529,7 +3529,7 @@ struct rate_control_ops {
- void (*add_sta_debugfs)(void *priv, void *priv_sta,
- struct dentry *dir);
- void (*remove_sta_debugfs)(void *priv, void *priv_sta);
--};
-+} __do_const;
-
- static inline int rate_supported(struct ieee80211_sta *sta,
- enum ieee80211_band band,
-diff --git a/include/net/neighbour.h b/include/net/neighbour.h
-index 2720884..0dc13cd 100644
---- a/include/net/neighbour.h
-+++ b/include/net/neighbour.h
-@@ -122,7 +122,7 @@ struct neigh_ops {
- void (*error_report)(struct neighbour *, struct sk_buff *);
- int (*output)(struct neighbour *, struct sk_buff *);
- int (*connected_output)(struct neighbour *, struct sk_buff *);
--};
-+} __do_const;
-
- struct pneigh_entry {
- struct pneigh_entry *next;
-@@ -160,7 +160,6 @@ struct neigh_table {
- void (*proxy_redo)(struct sk_buff *skb);
- char *id;
- struct neigh_parms parms;
-- /* HACK. gc_* should follow parms without a gap! */
- int gc_interval;
- int gc_thresh1;
- int gc_thresh2;
-@@ -176,7 +175,7 @@ struct neigh_table {
- struct neigh_statistics __percpu *stats;
- struct neigh_hash_table __rcu *nht;
- struct pneigh_entry **phash_buckets;
--};
-+} __randomize_layout;
-
- /* flags for neigh_update() */
- #define NEIGH_UPDATE_F_OVERRIDE 0x00000001
-diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
-index 3bb6fa0..4ea5d1c 100644
---- a/include/net/net_namespace.h
-+++ b/include/net/net_namespace.h
-@@ -101,7 +101,7 @@ struct net {
- struct netns_xfrm xfrm;
- #endif
- struct netns_ipvs *ipvs;
--};
-+} __randomize_layout;
-
-
- #include <linux/seq_file_net.h>
-@@ -240,10 +240,16 @@ static inline struct net *read_pnet(struct net * const *pnet)
- #define __net_init
- #define __net_exit
- #define __net_initdata
-+#define __net_initconst
- #else
- #define __net_init __init
- #define __net_exit __exit_refok
- #define __net_initdata __initdata
-+#ifdef CONSTIFY_PLUGIN
-+#define __net_initconst __initconst
-+#else
-+#define __net_initconst __initdata
-+#endif
- #endif
-
- struct pernet_operations {
-@@ -253,7 +259,7 @@ struct pernet_operations {
- void (*exit_batch)(struct list_head *net_exit_list);
- int *id;
- size_t size;
--};
-+} __do_const;
-
- /*
- * Use these carefully. If you implement a network device and it
-diff --git a/include/net/netdma.h b/include/net/netdma.h
-index 8ba8ce2..99b7fff 100644
---- a/include/net/netdma.h
-+++ b/include/net/netdma.h
-@@ -24,7 +24,7 @@
- #include <linux/dmaengine.h>
- #include <linux/skbuff.h>
-
--int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
-+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
- struct sk_buff *skb, int offset, struct iovec *to,
- size_t len, struct dma_pinned_list *pinned_list);
-
-diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
-index 252fd10..aa1421f 100644
---- a/include/net/netfilter/nf_queue.h
-+++ b/include/net/netfilter/nf_queue.h
-@@ -22,7 +22,7 @@ struct nf_queue_handler {
- int (*outfn)(struct nf_queue_entry *entry,
- unsigned int queuenum);
- char *name;
--};
-+} __do_const;
-
- extern int nf_register_queue_handler(u_int8_t pf,
- const struct nf_queue_handler *qh);
-diff --git a/include/net/netlink.h b/include/net/netlink.h
-index cb1f350..55e1f96 100644
---- a/include/net/netlink.h
-+++ b/include/net/netlink.h
-@@ -135,6 +135,7 @@
- * nla_get_u16(nla) get payload for a u16 attribute
- * nla_get_u32(nla) get payload for a u32 attribute
- * nla_get_u64(nla) get payload for a u64 attribute
-+ * nla_get_s32(nla) get payload for a s32 attribute
- * nla_get_flag(nla) return 1 if flag is true
- * nla_get_msecs(nla) get payload for a msecs attribute
- *
-@@ -569,7 +570,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
- static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
- {
- if (mark)
-- skb_trim(skb, (unsigned char *) mark - skb->data);
-+ skb_trim(skb, (const unsigned char *) mark - skb->data);
- }
-
- /**
-@@ -998,6 +999,15 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
- }
-
- /**
-+ * nla_get_s32 - return payload of s32 attribute
-+ * @nla: s32 netlink attribute
-+ */
-+static inline s32 nla_get_s32(const struct nlattr *nla)
-+{
-+ return *(s32 *) nla_data(nla);
-+}
-+
-+/**
- * nla_get_flag - return payload of flag attribute
- * @nla: flag netlink attribute
- */
-diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
-index d786b4f..4c3dd41 100644
---- a/include/net/netns/ipv4.h
-+++ b/include/net/netns/ipv4.h
-@@ -56,8 +56,8 @@ struct netns_ipv4 {
-
- unsigned int sysctl_ping_group_range[2];
-
-- atomic_t rt_genid;
-- atomic_t dev_addr_genid;
-+ atomic_unchecked_t rt_genid;
-+ atomic_unchecked_t dev_addr_genid;
-
- #ifdef CONFIG_IP_MROUTE
- #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
-diff --git a/include/net/protocol.h b/include/net/protocol.h
-index 6f7eb80..f9838be 100644
---- a/include/net/protocol.h
-+++ b/include/net/protocol.h
-@@ -44,7 +44,7 @@ struct net_protocol {
- int (*gro_complete)(struct sk_buff *skb);
- unsigned int no_policy:1,
- netns_ok:1;
--};
-+} __do_const;
-
- #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- struct inet6_protocol {
-@@ -63,7 +63,7 @@ struct inet6_protocol {
- int (*gro_complete)(struct sk_buff *skb);
-
- unsigned int flags; /* INET6_PROTO_xxx */
--};
-+} __do_const;
-
- #define INET6_PROTO_NOPOLICY 0x1
- #define INET6_PROTO_FINAL 0x2
-diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
-index 3702939..cf9e78e 100644
---- a/include/net/rtnetlink.h
-+++ b/include/net/rtnetlink.h
-@@ -78,7 +78,7 @@ struct rtnl_link_ops {
- int (*get_tx_queues)(struct net *net, struct nlattr *tb[],
- unsigned int *tx_queues,
- unsigned int *real_tx_queues);
--};
-+} __do_const;
-
- extern int __rtnl_link_register(struct rtnl_link_ops *ops);
- extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
-index e0f1c91..c73f85c 100644
---- a/include/net/sctp/sctp.h
-+++ b/include/net/sctp/sctp.h
-@@ -318,9 +318,9 @@ do { \
-
- #else /* SCTP_DEBUG */
-
--#define SCTP_DEBUG_PRINTK(whatever...)
--#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
--#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
-+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
-+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
-+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
- #define SCTP_ENABLE_DEBUG
- #define SCTP_DISABLE_DEBUG
- #define SCTP_ASSERT(expr, str, func)
-diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
-index 4d1be75..a54d29e 100644
---- a/include/net/sctp/sm.h
-+++ b/include/net/sctp/sm.h
-@@ -86,7 +86,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
- typedef struct {
- sctp_state_fn_t *fn;
- const char *name;
--} sctp_sm_table_entry_t;
-+} __do_const sctp_sm_table_entry_t;
-
- /* A naming convention of "sctp_sf_xxx" applies to all the state functions
- * currently in use.
-@@ -295,7 +295,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
- __u32 sctp_generate_tsn(const struct sctp_endpoint *);
-
- /* Extern declarations for major data structures. */
--extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
-+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
-
-
- /* Get the size of a DATA chunk payload. */
-diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
-index a15432da..9459dcc 100644
---- a/include/net/sctp/structs.h
-+++ b/include/net/sctp/structs.h
-@@ -644,7 +644,7 @@ struct sctp_pf {
- struct sctp_association *asoc);
- void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
- struct sctp_af *af;
--};
-+} __do_const;
-
-
- /* Structure to track chunk fragments that have been acked, but peer
-diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
-index b1c3d1c..895573a 100644
---- a/include/net/secure_seq.h
-+++ b/include/net/secure_seq.h
-@@ -3,6 +3,7 @@
-
- #include <linux/types.h>
-
-+extern void net_secret_init(void);
- extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
- extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport);
-diff --git a/include/net/sock.h b/include/net/sock.h
-index c8dcbb8..50b02f1 100644
---- a/include/net/sock.h
-+++ b/include/net/sock.h
-@@ -277,7 +277,7 @@ struct sock {
- #ifdef CONFIG_RPS
- __u32 sk_rxhash;
- #endif
-- atomic_t sk_drops;
-+ atomic_unchecked_t sk_drops;
- int sk_rcvbuf;
-
- struct sk_filter __rcu *sk_filter;
-@@ -847,7 +847,7 @@ struct proto {
- #ifdef SOCK_REFCNT_DEBUG
- atomic_t socks;
- #endif
--};
-+} __randomize_layout;
-
- extern int proto_register(struct proto *prot, int alloc_slab);
- extern void proto_unregister(struct proto *prot);
-@@ -927,7 +927,7 @@ struct sock_iocb {
- struct scm_cookie *scm;
- struct msghdr *msg, async_msg;
- struct kiocb *kiocb;
--};
-+} __randomize_layout;
-
- static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
- {
-@@ -1414,7 +1414,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
- }
-
- static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
-- char __user *from, char *to,
-+ char __user *from, unsigned char *to,
- int copy, int offset)
- {
- if (skb->ip_summed == CHECKSUM_NONE) {
-@@ -1676,7 +1676,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
- }
- }
-
--struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
-+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
-
- static inline struct page *sk_stream_alloc_page(struct sock *sk)
- {
-diff --git a/include/net/tcp.h b/include/net/tcp.h
-index e90235f..b943bda 100644
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -426,6 +426,25 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
- extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
- struct ip_options *opt);
- #ifdef CONFIG_SYN_COOKIES
-+#include <linux/ktime.h>
-+
-+/* Syncookies use a monotonic timer which increments every 60 seconds.
-+ * This counter is used both as a hash input and partially encoded into
-+ * the cookie value. A cookie is only validated further if the delta
-+ * between the current counter value and the encoded one is less than this,
-+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
-+ * the counter advances immediately after a cookie is generated).
-+ */
-+#define MAX_SYNCOOKIE_AGE 2
-+
-+static inline u32 tcp_cookie_time(void)
-+{
-+ u64 val = get_jiffies_64();
-+
-+ do_div(val, 60 * HZ);
-+ return val;
-+}
-+
- extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
- __u16 *mss);
- #else
-@@ -463,7 +482,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
- extern void tcp_xmit_retransmit_queue(struct sock *);
- extern void tcp_simple_retransmit(struct sock *);
- extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
--extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-
- extern void tcp_send_probe0(struct sock *);
- extern void tcp_send_partial(struct sock *);
-@@ -626,8 +645,8 @@ struct tcp_skb_cb {
- struct inet6_skb_parm h6;
- #endif
- } header; /* For incoming frames */
-- __u32 seq; /* Starting sequence number */
-- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
-+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
-+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
- __u32 when; /* used to compute rtt's */
- __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
- __u8 sacked; /* State flags for SACK/FACK. */
-@@ -640,7 +659,7 @@ struct tcp_skb_cb {
- #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
- #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
-
-- __u32 ack_seq; /* Sequence number ACK'd */
-+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
- };
-
- #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
-diff --git a/include/net/xfrm.h b/include/net/xfrm.h
-index 921f627..3744fe8 100644
---- a/include/net/xfrm.h
-+++ b/include/net/xfrm.h
-@@ -282,7 +282,6 @@ struct xfrm_dst;
- struct xfrm_policy_afinfo {
- unsigned short family;
- struct dst_ops *dst_ops;
-- void (*garbage_collect)(struct net *net);
- struct dst_entry *(*dst_lookup)(struct net *net, int tos,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr);
-@@ -298,7 +297,7 @@ struct xfrm_policy_afinfo {
- struct net_device *dev,
- const struct flowi *fl);
- struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
--};
-+} __do_const;
-
- extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
- extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-@@ -334,7 +333,7 @@ struct xfrm_state_afinfo {
- struct sk_buff *skb);
- int (*transport_finish)(struct sk_buff *skb,
- int async);
--};
-+} __do_const;
-
- extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
- extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
-@@ -417,7 +416,7 @@ struct xfrm_mode {
- struct module *owner;
- unsigned int encap;
- int flags;
--};
-+} __do_const;
-
- /* Flags for xfrm_mode. */
- enum {
-@@ -508,7 +507,7 @@ struct xfrm_policy {
- struct timer_list timer;
-
- struct flow_cache_object flo;
-- atomic_t genid;
-+ atomic_unchecked_t genid;
- u32 priority;
- u32 index;
- struct xfrm_mark mark;
-@@ -1141,6 +1140,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
- }
- }
-
-+extern void xfrm_garbage_collect_deferred(struct net *net);
-+
- #else
-
- static inline void xfrm_sk_free_policy(struct sock *sk) {}
-@@ -1175,6 +1176,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
- {
- return 1;
- }
-+static inline void xfrm_garbage_collect_deferred(struct net *net)
-+{
-+}
- #endif
-
- static __inline__
-diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
-index 1a046b1..ee0bef0 100644
---- a/include/rdma/iw_cm.h
-+++ b/include/rdma/iw_cm.h
-@@ -122,7 +122,7 @@ struct iw_cm_verbs {
- int backlog);
-
- int (*destroy_listen)(struct iw_cm_id *cm_id);
--};
-+} __no_const;
-
- /**
- * iw_create_cm_id - Create an IW CM identifier.
-diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
-index 5d1a758..1dbf795 100644
---- a/include/scsi/libfc.h
-+++ b/include/scsi/libfc.h
-@@ -748,6 +748,7 @@ struct libfc_function_template {
- */
- void (*disc_stop_final) (struct fc_lport *);
- };
-+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
-
- /**
- * struct fc_disc - Discovery context
-@@ -851,7 +852,7 @@ struct fc_lport {
- struct fc_vport *vport;
-
- /* Operational Information */
-- struct libfc_function_template tt;
-+ libfc_function_template_no_const tt;
- u8 link_up;
- u8 qfull;
- enum fc_lport_state state;
-diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
-index 377ba61..1b6890c 100644
---- a/include/scsi/scsi_device.h
-+++ b/include/scsi/scsi_device.h
-@@ -162,9 +162,9 @@ struct scsi_device {
- unsigned int max_device_blocked; /* what device_blocked counts down from */
- #define SCSI_DEFAULT_DEVICE_BLOCKED 3
-
-- atomic_t iorequest_cnt;
-- atomic_t iodone_cnt;
-- atomic_t ioerr_cnt;
-+ atomic_unchecked_t iorequest_cnt;
-+ atomic_unchecked_t iodone_cnt;
-+ atomic_unchecked_t ioerr_cnt;
-
- struct device sdev_gendev,
- sdev_dev;
-diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
-index 9fd6702..52e04b7 100644
---- a/include/scsi/scsi_driver.h
-+++ b/include/scsi/scsi_driver.h
-@@ -15,7 +15,7 @@ struct scsi_driver {
- struct device_driver gendrv;
-
- void (*rescan)(struct device *);
-- int (*done)(struct scsi_cmnd *);
-+ unsigned int (*done)(struct scsi_cmnd *);
- };
- #define to_scsi_driver(drv) \
- container_of((drv), struct scsi_driver, gendrv)
-diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
-index 2a65167..91e01f8 100644
---- a/include/scsi/scsi_transport_fc.h
-+++ b/include/scsi/scsi_transport_fc.h
-@@ -711,7 +711,7 @@ struct fc_function_template {
- unsigned long show_host_system_hostname:1;
-
- unsigned long disable_target_scan:1;
--};
-+} __do_const;
-
-
- /**
-diff --git a/include/sound/soc.h b/include/sound/soc.h
-index 11cfb59..808afef 100644
---- a/include/sound/soc.h
-+++ b/include/sound/soc.h
-@@ -641,7 +641,7 @@ struct snd_soc_codec_driver {
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
--};
-+} __do_const;
-
- /* SoC platform interface */
- struct snd_soc_platform_driver {
-@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
- /* platform IO - used for platform DAPM */
- unsigned int (*read)(struct snd_soc_platform *, unsigned int);
- int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
--};
-+} __do_const;
-
- struct snd_soc_platform {
- const char *name;
-diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
-index 444cd6b..3327cc5 100644
---- a/include/sound/ymfpci.h
-+++ b/include/sound/ymfpci.h
-@@ -358,7 +358,7 @@ struct snd_ymfpci {
- spinlock_t reg_lock;
- spinlock_t voice_lock;
- wait_queue_head_t interrupt_sleep;
-- atomic_t interrupt_sleep_count;
-+ atomic_unchecked_t interrupt_sleep_count;
- struct snd_info_entry *proc_entry;
- const struct firmware *dsp_microcode;
- const struct firmware *controller_microcode;
-diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 6ee550e..ebec4cc 100644
---- a/include/target/target_core_base.h
-+++ b/include/target/target_core_base.h
-@@ -466,8 +466,8 @@ struct se_cmd {
- atomic_t t_se_count;
- atomic_t t_task_cdbs_left;
- atomic_t t_task_cdbs_ex_left;
-- atomic_t t_task_cdbs_sent;
-- atomic_t t_transport_aborted;
-+ atomic_unchecked_t t_task_cdbs_sent;
-+ atomic_unchecked_t t_transport_aborted;
- atomic_t t_transport_active;
- atomic_t t_transport_complete;
- atomic_t t_transport_queue_active;
-@@ -706,7 +706,7 @@ struct se_device {
- /* Active commands on this virtual SE device */
- atomic_t simple_cmds;
- atomic_t depth_left;
-- atomic_t dev_ordered_id;
-+ atomic_unchecked_t dev_ordered_id;
- atomic_t execute_tasks;
- atomic_t dev_ordered_sync;
- atomic_t dev_qf_count;
-diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
-new file mode 100644
-index 0000000..2efe49d
---- /dev/null
-+++ b/include/trace/events/fs.h
-@@ -0,0 +1,53 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM fs
-+
-+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_FS_H
-+
-+#include <linux/fs.h>
-+#include <linux/tracepoint.h>
-+
-+TRACE_EVENT(do_sys_open,
-+
-+ TP_PROTO(char *filename, int flags, int mode),
-+
-+ TP_ARGS(filename, flags, mode),
-+
-+ TP_STRUCT__entry(
-+ __string( filename, filename )
-+ __field( int, flags )
-+ __field( int, mode )
-+ ),
-+
-+ TP_fast_assign(
-+ __assign_str(filename, filename);
-+ __entry->flags = flags;
-+ __entry->mode = mode;
-+ ),
-+
-+ TP_printk("\"%s\" %x %o",
-+ __get_str(filename), __entry->flags, __entry->mode)
-+);
-+
-+TRACE_EVENT(open_exec,
-+
-+ TP_PROTO(const char *filename),
-+
-+ TP_ARGS(filename),
-+
-+ TP_STRUCT__entry(
-+ __string( filename, filename )
-+ ),
-+
-+ TP_fast_assign(
-+ __assign_str(filename, filename);
-+ ),
-+
-+ TP_printk("\"%s\"",
-+ __get_str(filename))
-+);
-+
-+#endif /* _TRACE_FS_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
-index 1c09820..7f5ec79 100644
---- a/include/trace/events/irq.h
-+++ b/include/trace/events/irq.h
-@@ -36,7 +36,7 @@ struct softirq_action;
- */
- TRACE_EVENT(irq_handler_entry,
-
-- TP_PROTO(int irq, struct irqaction *action),
-+ TP_PROTO(int irq, const struct irqaction *action),
-
- TP_ARGS(irq, action),
-
-@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
- */
- TRACE_EVENT(irq_handler_exit,
-
-- TP_PROTO(int irq, struct irqaction *action, int ret),
-+ TP_PROTO(int irq, const struct irqaction *action, int ret),
-
- TP_ARGS(irq, action, ret),
-
-diff --git a/include/trace/events/random.h b/include/trace/events/random.h
-new file mode 100644
-index 0000000..805af6d
---- /dev/null
-+++ b/include/trace/events/random.h
-@@ -0,0 +1,315 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM random
-+
-+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_RANDOM_H
-+
-+#include <linux/writeback.h>
-+#include <linux/tracepoint.h>
-+
-+TRACE_EVENT(add_device_randomness,
-+ TP_PROTO(int bytes, unsigned long IP),
-+
-+ TP_ARGS(bytes, IP),
-+
-+ TP_STRUCT__entry(
-+ __field( int, bytes )
-+ __field(unsigned long, IP )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->bytes = bytes;
-+ __entry->IP = IP;
-+ ),
-+
-+ TP_printk("bytes %d caller %pF",
-+ __entry->bytes, (void *)__entry->IP)
-+);
-+
-+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
-+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-+
-+ TP_ARGS(pool_name, bytes, IP),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, bytes )
-+ __field(unsigned long, IP )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->bytes = bytes;
-+ __entry->IP = IP;
-+ ),
-+
-+ TP_printk("%s pool: bytes %d caller %pF",
-+ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
-+);
-+
-+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
-+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-+
-+ TP_ARGS(pool_name, bytes, IP)
-+);
-+
-+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
-+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
-+
-+ TP_ARGS(pool_name, bytes, IP)
-+);
-+
-+TRACE_EVENT(credit_entropy_bits,
-+ TP_PROTO(const char *pool_name, int bits, int entropy_count,
-+ int entropy_total, unsigned long IP),
-+
-+ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, bits )
-+ __field( int, entropy_count )
-+ __field( int, entropy_total )
-+ __field(unsigned long, IP )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->bits = bits;
-+ __entry->entropy_count = entropy_count;
-+ __entry->entropy_total = entropy_total;
-+ __entry->IP = IP;
-+ ),
-+
-+ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
-+ "caller %pF", __entry->pool_name, __entry->bits,
-+ __entry->entropy_count, __entry->entropy_total,
-+ (void *)__entry->IP)
-+);
-+
-+TRACE_EVENT(push_to_pool,
-+ TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
-+
-+ TP_ARGS(pool_name, pool_bits, input_bits),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, pool_bits )
-+ __field( int, input_bits )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->pool_bits = pool_bits;
-+ __entry->input_bits = input_bits;
-+ ),
-+
-+ TP_printk("%s: pool_bits %d input_pool_bits %d",
-+ __entry->pool_name, __entry->pool_bits,
-+ __entry->input_bits)
-+);
-+
-+TRACE_EVENT(debit_entropy,
-+ TP_PROTO(const char *pool_name, int debit_bits),
-+
-+ TP_ARGS(pool_name, debit_bits),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, debit_bits )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->debit_bits = debit_bits;
-+ ),
-+
-+ TP_printk("%s: debit_bits %d", __entry->pool_name,
-+ __entry->debit_bits)
-+);
-+
-+TRACE_EVENT(add_input_randomness,
-+ TP_PROTO(int input_bits),
-+
-+ TP_ARGS(input_bits),
-+
-+ TP_STRUCT__entry(
-+ __field( int, input_bits )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->input_bits = input_bits;
-+ ),
-+
-+ TP_printk("input_pool_bits %d", __entry->input_bits)
-+);
-+
-+TRACE_EVENT(add_disk_randomness,
-+ TP_PROTO(dev_t dev, int input_bits),
-+
-+ TP_ARGS(dev, input_bits),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( int, input_bits )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = dev;
-+ __entry->input_bits = input_bits;
-+ ),
-+
-+ TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
-+ MINOR(__entry->dev), __entry->input_bits)
-+);
-+
-+TRACE_EVENT(xfer_secondary_pool,
-+ TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
-+ int pool_entropy, int input_entropy),
-+
-+ TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
-+ input_entropy),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, xfer_bits )
-+ __field( int, request_bits )
-+ __field( int, pool_entropy )
-+ __field( int, input_entropy )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->xfer_bits = xfer_bits;
-+ __entry->request_bits = request_bits;
-+ __entry->pool_entropy = pool_entropy;
-+ __entry->input_entropy = input_entropy;
-+ ),
-+
-+ TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
-+ "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
-+ __entry->request_bits, __entry->pool_entropy,
-+ __entry->input_entropy)
-+);
-+
-+DECLARE_EVENT_CLASS(random__get_random_bytes,
-+ TP_PROTO(int nbytes, unsigned long IP),
-+
-+ TP_ARGS(nbytes, IP),
-+
-+ TP_STRUCT__entry(
-+ __field( int, nbytes )
-+ __field(unsigned long, IP )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->nbytes = nbytes;
-+ __entry->IP = IP;
-+ ),
-+
-+ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
-+);
-+
-+DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
-+ TP_PROTO(int nbytes, unsigned long IP),
-+
-+ TP_ARGS(nbytes, IP)
-+);
-+
-+DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
-+ TP_PROTO(int nbytes, unsigned long IP),
-+
-+ TP_ARGS(nbytes, IP)
-+);
-+
-+DECLARE_EVENT_CLASS(random__extract_entropy,
-+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
-+ unsigned long IP),
-+
-+ TP_ARGS(pool_name, nbytes, entropy_count, IP),
-+
-+ TP_STRUCT__entry(
-+ __field( const char *, pool_name )
-+ __field( int, nbytes )
-+ __field( int, entropy_count )
-+ __field(unsigned long, IP )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->pool_name = pool_name;
-+ __entry->nbytes = nbytes;
-+ __entry->entropy_count = entropy_count;
-+ __entry->IP = IP;
-+ ),
-+
-+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
-+ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
-+ (void *)__entry->IP)
-+);
-+
-+
-+DEFINE_EVENT(random__extract_entropy, extract_entropy,
-+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
-+ unsigned long IP),
-+
-+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
-+);
-+
-+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
-+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
-+ unsigned long IP),
-+
-+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
-+);
-+
-+TRACE_EVENT(random_read,
-+ TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
-+
-+ TP_ARGS(got_bits, need_bits, pool_left, input_left),
-+
-+ TP_STRUCT__entry(
-+ __field( int, got_bits )
-+ __field( int, need_bits )
-+ __field( int, pool_left )
-+ __field( int, input_left )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->got_bits = got_bits;
-+ __entry->need_bits = need_bits;
-+ __entry->pool_left = pool_left;
-+ __entry->input_left = input_left;
-+ ),
-+
-+ TP_printk("got_bits %d still_needed_bits %d "
-+ "blocking_pool_entropy_left %d input_entropy_left %d",
-+ __entry->got_bits, __entry->got_bits, __entry->pool_left,
-+ __entry->input_left)
-+);
-+
-+TRACE_EVENT(urandom_read,
-+ TP_PROTO(int got_bits, int pool_left, int input_left),
-+
-+ TP_ARGS(got_bits, pool_left, input_left),
-+
-+ TP_STRUCT__entry(
-+ __field( int, got_bits )
-+ __field( int, pool_left )
-+ __field( int, input_left )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->got_bits = got_bits;
-+ __entry->pool_left = pool_left;
-+ __entry->input_left = input_left;
-+ ),
-+
-+ TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
-+ "input_entropy_left %d", __entry->got_bits,
-+ __entry->pool_left, __entry->input_left)
-+);
-+
-+#endif /* _TRACE_RANDOM_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff --git a/include/video/udlfb.h b/include/video/udlfb.h
-index c41f308..6918de3 100644
---- a/include/video/udlfb.h
-+++ b/include/video/udlfb.h
-@@ -52,10 +52,10 @@ struct dlfb_data {
- u32 pseudo_palette[256];
- int blank_mode; /*one of FB_BLANK_ */
- /* blit-only rendering path metrics, exposed through sysfs */
-- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
-- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
-- atomic_t bytes_sent; /* to usb, after compression including overhead */
-- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
-+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
-+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
-+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
-+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
- };
-
- #define NR_USB_REQUEST_I2C_SUB_IO 0x02
-diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
-index 0993a22..32ba2fe 100644
---- a/include/video/uvesafb.h
-+++ b/include/video/uvesafb.h
-@@ -177,6 +177,7 @@ struct uvesafb_par {
- u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
- u8 pmi_setpal; /* PMI for palette changes */
- u16 *pmi_base; /* protected mode interface location */
-+ u8 *pmi_code; /* protected mode code location */
- void *pmi_start;
- void *pmi_pal;
- u8 *vbe_state_orig; /*
-diff --git a/init/Kconfig b/init/Kconfig
-index b8dc1de..e4ce6c6 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -1215,7 +1215,7 @@ config SLUB_DEBUG
-
- config COMPAT_BRK
- bool "Disable heap randomization"
-- default y
-+ default n
- help
- Randomizing heap placement makes heap exploits harder, but it
- also breaks ancient binaries (including anything libc5 based).
-@@ -1398,7 +1398,7 @@ config INIT_ALL_POSSIBLE
- config STOP_MACHINE
- bool
- default y
-- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
-+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
- help
- Need stop_machine() primitive.
-
-diff --git a/init/do_mounts.c b/init/do_mounts.c
-index d6c229f..4746631 100644
---- a/init/do_mounts.c
-+++ b/init/do_mounts.c
-@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
-
- static int __init do_mount_root(char *name, char *fs, int flags, void *data)
- {
-- int err = sys_mount(name, "/root", fs, flags, data);
-+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
- if (err)
- return err;
-
-- sys_chdir((const char __user __force *)"/root");
-+ sys_chdir((const char __force_user*)"/root");
- ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
- printk(KERN_INFO
- "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
-@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
- va_start(args, fmt);
- vsprintf(buf, fmt, args);
- va_end(args);
-- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
-+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
- if (fd >= 0) {
- sys_ioctl(fd, FDEJECT, 0);
- sys_close(fd);
- }
- printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
-- fd = sys_open("/dev/console", O_RDWR, 0);
-+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
- if (fd >= 0) {
- sys_ioctl(fd, TCGETS, (long)&termios);
- termios.c_lflag &= ~ICANON;
- sys_ioctl(fd, TCSETSF, (long)&termios);
-- sys_read(fd, &c, 1);
-+ sys_read(fd, (char __user *)&c, 1);
- termios.c_lflag |= ICANON;
- sys_ioctl(fd, TCSETSF, (long)&termios);
- sys_close(fd);
-@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
- mount_root();
- out:
- devtmpfs_mount("dev");
-- sys_mount(".", "/", NULL, MS_MOVE, NULL);
-- sys_chroot((const char __user __force *)".");
-+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
-+ sys_chroot((const char __force_user *)".");
- }
-diff --git a/init/do_mounts.h b/init/do_mounts.h
-index f5b978a..69dbfe8 100644
---- a/init/do_mounts.h
-+++ b/init/do_mounts.h
-@@ -15,15 +15,15 @@ extern int root_mountflags;
-
- static inline int create_dev(char *name, dev_t dev)
- {
-- sys_unlink(name);
-- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
-+ sys_unlink((char __force_user *)name);
-+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
- }
-
- #if BITS_PER_LONG == 32
- static inline u32 bstat(char *name)
- {
- struct stat64 stat;
-- if (sys_stat64(name, &stat) != 0)
-+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
- return 0;
- if (!S_ISBLK(stat.st_mode))
- return 0;
-@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
- static inline u32 bstat(char *name)
- {
- struct stat stat;
-- if (sys_newstat(name, &stat) != 0)
-+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
- return 0;
- if (!S_ISBLK(stat.st_mode))
- return 0;
-diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
-index 3098a38..253064e 100644
---- a/init/do_mounts_initrd.c
-+++ b/init/do_mounts_initrd.c
-@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
- create_dev("/dev/root.old", Root_RAM0);
- /* mount initrd on rootfs' /root */
- mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
-- sys_mkdir("/old", 0700);
-- root_fd = sys_open("/", 0, 0);
-- old_fd = sys_open("/old", 0, 0);
-+ sys_mkdir((const char __force_user *)"/old", 0700);
-+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
-+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
- /* move initrd over / and chdir/chroot in initrd root */
-- sys_chdir("/root");
-- sys_mount(".", "/", NULL, MS_MOVE, NULL);
-- sys_chroot(".");
-+ sys_chdir((const char __force_user *)"/root");
-+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
-+ sys_chroot((const char __force_user *)".");
-
- /*
- * In case that a resume from disk is carried out by linuxrc or one of
-@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
-
- /* move initrd to rootfs' /old */
- sys_fchdir(old_fd);
-- sys_mount("/", ".", NULL, MS_MOVE, NULL);
-+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
- /* switch root and cwd back to / of rootfs */
- sys_fchdir(root_fd);
-- sys_chroot(".");
-+ sys_chroot((const char __force_user *)".");
- sys_close(old_fd);
- sys_close(root_fd);
-
- if (new_decode_dev(real_root_dev) == Root_RAM0) {
-- sys_chdir("/old");
-+ sys_chdir((const char __force_user *)"/old");
- return;
- }
-
-@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
- mount_root();
-
- printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
-- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
-+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
- if (!error)
- printk("okay\n");
- else {
-- int fd = sys_open("/dev/root.old", O_RDWR, 0);
-+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
- if (error == -ENOENT)
- printk("/initrd does not exist. Ignored.\n");
- else
- printk("failed\n");
- printk(KERN_NOTICE "Unmounting old root\n");
-- sys_umount("/old", MNT_DETACH);
-+ sys_umount((char __force_user *)"/old", MNT_DETACH);
- printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
- if (fd < 0) {
- error = fd;
-@@ -116,11 +116,11 @@ int __init initrd_load(void)
- * mounted in the normal path.
- */
- if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
-- sys_unlink("/initrd.image");
-+ sys_unlink((const char __force_user *)"/initrd.image");
- handle_initrd();
- return 1;
- }
- }
-- sys_unlink("/initrd.image");
-+ sys_unlink((const char __force_user *)"/initrd.image");
- return 0;
- }
-diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
-index 32c4799..c27ee74 100644
---- a/init/do_mounts_md.c
-+++ b/init/do_mounts_md.c
-@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
- partitioned ? "_d" : "", minor,
- md_setup_args[ent].device_names);
-
-- fd = sys_open(name, 0, 0);
-+ fd = sys_open((char __force_user *)name, 0, 0);
- if (fd < 0) {
- printk(KERN_ERR "md: open failed - cannot start "
- "array %s\n", name);
-@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
- * array without it
- */
- sys_close(fd);
-- fd = sys_open(name, 0, 0);
-+ fd = sys_open((char __force_user *)name, 0, 0);
- sys_ioctl(fd, BLKRRPART, 0);
- }
- sys_close(fd);
-@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
-
- wait_for_device_probe();
-
-- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
-+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
- if (fd >= 0) {
- sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
- sys_close(fd);
-diff --git a/init/initramfs.c b/init/initramfs.c
-index 2531811..4f036c4 100644
---- a/init/initramfs.c
-+++ b/init/initramfs.c
-@@ -74,7 +74,7 @@ static void __init free_hash(void)
- }
- }
-
--static long __init do_utime(char __user *filename, time_t mtime)
-+static long __init do_utime(__force char __user *filename, time_t mtime)
- {
- struct timespec t[2];
-
-@@ -109,7 +109,7 @@ static void __init dir_utime(void)
- struct dir_entry *de, *tmp;
- list_for_each_entry_safe(de, tmp, &dir_list, list) {
- list_del(&de->list);
-- do_utime(de->name, de->mtime);
-+ do_utime((char __force_user *)de->name, de->mtime);
- kfree(de->name);
- kfree(de);
- }
-@@ -271,7 +271,7 @@ static int __init maybe_link(void)
- if (nlink >= 2) {
- char *old = find_link(major, minor, ino, mode, collected);
- if (old)
-- return (sys_link(old, collected) < 0) ? -1 : 1;
-+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
- }
- return 0;
- }
-@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
- {
- struct stat st;
-
-- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
-+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
- if (S_ISDIR(st.st_mode))
-- sys_rmdir(path);
-+ sys_rmdir((char __force_user *)path);
- else
-- sys_unlink(path);
-+ sys_unlink((char __force_user *)path);
- }
- }
-
-@@ -305,7 +305,7 @@ static int __init do_name(void)
- int openflags = O_WRONLY|O_CREAT;
- if (ml != 1)
- openflags |= O_TRUNC;
-- wfd = sys_open(collected, openflags, mode);
-+ wfd = sys_open((char __force_user *)collected, openflags, mode);
-
- if (wfd >= 0) {
- sys_fchown(wfd, uid, gid);
-@@ -317,17 +317,17 @@ static int __init do_name(void)
- }
- }
- } else if (S_ISDIR(mode)) {
-- sys_mkdir(collected, mode);
-- sys_chown(collected, uid, gid);
-- sys_chmod(collected, mode);
-+ sys_mkdir((char __force_user *)collected, mode);
-+ sys_chown((char __force_user *)collected, uid, gid);
-+ sys_chmod((char __force_user *)collected, mode);
- dir_add(collected, mtime);
- } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
- S_ISFIFO(mode) || S_ISSOCK(mode)) {
- if (maybe_link() == 0) {
-- sys_mknod(collected, mode, rdev);
-- sys_chown(collected, uid, gid);
-- sys_chmod(collected, mode);
-- do_utime(collected, mtime);
-+ sys_mknod((char __force_user *)collected, mode, rdev);
-+ sys_chown((char __force_user *)collected, uid, gid);
-+ sys_chmod((char __force_user *)collected, mode);
-+ do_utime((char __force_user *)collected, mtime);
- }
- }
- return 0;
-@@ -336,15 +336,15 @@ static int __init do_name(void)
- static int __init do_copy(void)
- {
- if (count >= body_len) {
-- sys_write(wfd, victim, body_len);
-+ sys_write(wfd, (char __force_user *)victim, body_len);
- sys_close(wfd);
-- do_utime(vcollected, mtime);
-+ do_utime((char __force_user *)vcollected, mtime);
- kfree(vcollected);
- eat(body_len);
- state = SkipIt;
- return 0;
- } else {
-- sys_write(wfd, victim, count);
-+ sys_write(wfd, (char __force_user *)victim, count);
- body_len -= count;
- eat(count);
- return 1;
-@@ -355,9 +355,9 @@ static int __init do_symlink(void)
- {
- collected[N_ALIGN(name_len) + body_len] = '\0';
- clean_path(collected, 0);
-- sys_symlink(collected + N_ALIGN(name_len), collected);
-- sys_lchown(collected, uid, gid);
-- do_utime(collected, mtime);
-+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
-+ sys_lchown((char __force_user *)collected, uid, gid);
-+ do_utime((char __force_user *)collected, mtime);
- state = SkipIt;
- next_state = Reset;
- return 0;
-@@ -573,7 +573,7 @@ static int __init populate_rootfs(void)
- {
- char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
- if (err)
-- panic(err); /* Failed to decompress INTERNAL initramfs */
-+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
- if (initrd_start) {
- #ifdef CONFIG_BLK_DEV_RAM
- int fd;
-diff --git a/init/main.c b/init/main.c
-index e937d9b..4700693 100644
---- a/init/main.c
-+++ b/init/main.c
-@@ -97,6 +97,8 @@ static inline void mark_rodata_ro(void) { }
- extern void tc_init(void);
- #endif
-
-+extern void grsecurity_init(void);
-+
- /*
- * Debug helper: via this flag we know that we are in 'early bootup code'
- * where only the boot processor is running with IRQ disabled. This means
-@@ -150,6 +152,74 @@ static int __init set_reset_devices(char *str)
-
- __setup("reset_devices", set_reset_devices);
-
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
-+static int __init setup_grsec_proc_gid(char *str)
-+{
-+ grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
-+ return 1;
-+}
-+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
-+int grsec_enable_sysfs_restrict = 1;
-+static int __init setup_grsec_sysfs_restrict(char *str)
-+{
-+ if (!simple_strtol(str, NULL, 0))
-+ grsec_enable_sysfs_restrict = 0;
-+ return 1;
-+}
-+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
-+#endif
-+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
-+EXPORT_SYMBOL(pax_user_shadow_base);
-+extern char pax_enter_kernel_user[];
-+extern char pax_exit_kernel_user[];
-+extern pgdval_t clone_pgd_mask;
-+#endif
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+static int __init setup_pax_nouderef(char *str)
-+{
-+#ifdef CONFIG_X86_32
-+ unsigned int cpu;
-+ struct desc_struct *gdt;
-+
-+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-+ gdt = get_cpu_gdt_table(cpu);
-+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
-+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
-+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
-+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
-+ }
-+ loadsegment(ds, __KERNEL_DS);
-+ loadsegment(es, __KERNEL_DS);
-+ loadsegment(ss, __KERNEL_DS);
-+#else
-+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
-+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
-+ clone_pgd_mask = ~(pgdval_t)0UL;
-+ pax_user_shadow_base = 0UL;
-+#endif
-+
-+ return 0;
-+}
-+early_param("pax_nouderef", setup_pax_nouderef);
-+#endif
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+int pax_softmode;
-+
-+static int __init setup_pax_softmode(char *str)
-+{
-+ get_option(&str, &pax_softmode);
-+ return 1;
-+}
-+__setup("pax_softmode=", setup_pax_softmode);
-+#endif
-+
- static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
- const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
- static const char *panic_later, *panic_param;
-@@ -683,6 +753,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
- {
- int count = preempt_count();
- int ret;
-+ const char *msg1 = "", *msg2 = "";
-
- if (initcall_debug)
- ret = do_one_initcall_debug(fn);
-@@ -695,17 +766,18 @@ int __init_or_module do_one_initcall(initcall_t fn)
- sprintf(msgbuf, "error code %d ", ret);
-
- if (preempt_count() != count) {
-- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
-+ msg1 = " preemption imbalance";
- preempt_count() = count;
- }
- if (irqs_disabled()) {
-- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
-+ msg2 = " disabled interrupts";
- local_irq_enable();
- }
-- if (msgbuf[0]) {
-- printk("initcall %pF returned with %s\n", fn, msgbuf);
-+ if (msgbuf[0] || *msg1 || *msg2) {
-+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
- }
-
-+ add_latent_entropy();
- return ret;
- }
-
-@@ -754,6 +826,10 @@ static void run_init_process(const char *init_filename)
- kernel_execve(init_filename, argv_init, envp_init);
- }
-
-+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
-+extern int gr_init_ran;
-+#endif
-+
- /* This is a non __init function. Force it to be noinline otherwise gcc
- * makes it inline to init() and it becomes part of init.text section
- */
-@@ -775,6 +851,11 @@ static noinline int init_post(void)
- ramdisk_execute_command);
- }
-
-+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
-+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
-+ gr_init_ran = 1;
-+#endif
-+
- /*
- * We try each of these until one succeeds.
- *
-@@ -827,7 +908,7 @@ static int __init kernel_init(void * unused)
- do_basic_setup();
-
- /* Open the /dev/console on the rootfs, this should never fail */
-- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
-+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
- printk(KERN_WARNING "Warning: unable to open an initial console.\n");
-
- (void) sys_dup(0);
-@@ -840,11 +921,13 @@ static int __init kernel_init(void * unused)
- if (!ramdisk_execute_command)
- ramdisk_execute_command = "/init";
-
-- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
-+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
- ramdisk_execute_command = NULL;
- prepare_namespace();
- }
-
-+ grsecurity_init();
-+
- /*
- * Ok, we have completed the initial bootup, and
- * we're essentially up and running. Get rid of the
-diff --git a/ipc/compat.c b/ipc/compat.c
-index 845a287..6a0666b 100644
---- a/ipc/compat.c
-+++ b/ipc/compat.c
-@@ -672,7 +672,7 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
- }
-
- long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
-- unsigned nsops, const struct compat_timespec __user *timeout)
-+ compat_long_t nsops, const struct compat_timespec __user *timeout)
- {
- struct timespec __user *ts64 = NULL;
- if (timeout) {
-diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
-index 00fba2b..9afd545 100644
---- a/ipc/ipc_sysctl.c
-+++ b/ipc/ipc_sysctl.c
-@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
- static int proc_ipc_dointvec(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table ipc_table;
-+ ctl_table_no_const ipc_table;
-
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
- static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table ipc_table;
-+ ctl_table_no_const ipc_table;
-
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
- static int proc_ipc_callback_dointvec(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table ipc_table;
-+ ctl_table_no_const ipc_table;
- size_t lenp_bef = *lenp;
- int rc;
-
-@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
- static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table ipc_table;
-+ ctl_table_no_const ipc_table;
- memcpy(&ipc_table, table, sizeof(ipc_table));
- ipc_table.data = get_ipc(table);
-
-@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
- static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table ipc_table;
-+ ctl_table_no_const ipc_table;
- size_t lenp_bef = *lenp;
- int oldval;
- int rc;
-diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
-index 0c09366..c81a8ec 100644
---- a/ipc/mq_sysctl.c
-+++ b/ipc/mq_sysctl.c
-@@ -34,7 +34,7 @@ static void *get_mq(ctl_table *table)
- static int proc_mq_dointvec(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table mq_table;
-+ ctl_table_no_const mq_table;
- memcpy(&mq_table, table, sizeof(mq_table));
- mq_table.data = get_mq(table);
-
-@@ -44,7 +44,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
- static int proc_mq_dointvec_minmax(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table mq_table;
-+ ctl_table_no_const mq_table;
- memcpy(&mq_table, table, sizeof(mq_table));
- mq_table.data = get_mq(table);
-
-diff --git a/ipc/mqueue.c b/ipc/mqueue.c
-index 5b4293d..f179875 100644
---- a/ipc/mqueue.c
-+++ b/ipc/mqueue.c
-@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
- mq_bytes = (mq_msg_tblsz +
- (info->attr.mq_maxmsg * info->attr.mq_msgsize));
-
-+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
- spin_lock(&mq_lock);
- if (u->mq_bytes + mq_bytes < u->mq_bytes ||
- u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
-diff --git a/ipc/msg.c b/ipc/msg.c
-index 25f1a61..58f7ac1 100644
---- a/ipc/msg.c
-+++ b/ipc/msg.c
-@@ -311,18 +311,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
- return security_msg_queue_associate(msq, msgflg);
- }
-
-+static struct ipc_ops msg_ops = {
-+ .getnew = newque,
-+ .associate = msg_security,
-+ .more_checks = NULL
-+};
-+
- SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
- {
- struct ipc_namespace *ns;
-- struct ipc_ops msg_ops;
- struct ipc_params msg_params;
-
- ns = current->nsproxy->ipc_ns;
-
-- msg_ops.getnew = newque;
-- msg_ops.associate = msg_security;
-- msg_ops.more_checks = NULL;
--
- msg_params.key = key;
- msg_params.flg = msgflg;
-
-diff --git a/ipc/sem.c b/ipc/sem.c
-index 5215a81..bbbca2e 100644
---- a/ipc/sem.c
-+++ b/ipc/sem.c
-@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
- return 0;
- }
-
-+static struct ipc_ops sem_ops = {
-+ .getnew = newary,
-+ .associate = sem_security,
-+ .more_checks = sem_more_checks
-+};
-+
- SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
- {
- struct ipc_namespace *ns;
-- struct ipc_ops sem_ops;
- struct ipc_params sem_params;
-
- ns = current->nsproxy->ipc_ns;
-@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
- if (nsems < 0 || nsems > ns->sc_semmsl)
- return -EINVAL;
-
-- sem_ops.getnew = newary;
-- sem_ops.associate = sem_security;
-- sem_ops.more_checks = sem_more_checks;
--
- sem_params.key = key;
- sem_params.flg = semflg;
- sem_params.u.nsems = nsems;
-@@ -1328,7 +1329,7 @@ static int get_queue_result(struct sem_queue *q)
-
-
- SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
-- unsigned, nsops, const struct timespec __user *, timeout)
-+ long, nsops, const struct timespec __user *, timeout)
- {
- int error = -EINVAL;
- struct sem_array *sma;
-@@ -1546,7 +1547,7 @@ out_free:
- }
-
- SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
-- unsigned, nsops)
-+ long, nsops)
- {
- return sys_semtimedop(semid, tsops, nsops, NULL);
- }
-diff --git a/ipc/shm.c b/ipc/shm.c
-index 326a20b..62e6b7e 100644
---- a/ipc/shm.c
-+++ b/ipc/shm.c
-@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
- static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
- #endif
-
-+#ifdef CONFIG_GRKERNSEC
-+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+ const time_t shm_createtime, const uid_t cuid,
-+ const int shmid);
-+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+ const time_t shm_createtime);
-+#endif
-+
- void shm_init_ns(struct ipc_namespace *ns)
- {
- ns->shm_ctlmax = SHMMAX;
-@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
- shp->shm_lprid = 0;
- shp->shm_atim = shp->shm_dtim = 0;
- shp->shm_ctim = get_seconds();
-+#ifdef CONFIG_GRKERNSEC
-+ {
-+ struct timespec timeval;
-+ do_posix_clock_monotonic_gettime(&timeval);
-+
-+ shp->shm_createtime = timeval.tv_sec;
-+ }
-+#endif
- shp->shm_segsz = size;
- shp->shm_nattch = 0;
- shp->shm_file = file;
-@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
- return 0;
- }
-
-+static struct ipc_ops shm_ops = {
-+ .getnew = newseg,
-+ .associate = shm_security,
-+ .more_checks = shm_more_checks
-+};
-+
- SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
- {
- struct ipc_namespace *ns;
-- struct ipc_ops shm_ops;
- struct ipc_params shm_params;
-
- ns = current->nsproxy->ipc_ns;
-
-- shm_ops.getnew = newseg;
-- shm_ops.associate = shm_security;
-- shm_ops.more_checks = shm_more_checks;
--
- shm_params.key = key;
- shm_params.flg = shmflg;
- shm_params.u.size = size;
-@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
- f_mode = FMODE_READ | FMODE_WRITE;
- }
- if (shmflg & SHM_EXEC) {
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
-+ goto out;
-+#endif
-+
- prot |= PROT_EXEC;
- acc_mode |= S_IXUGO;
- }
-@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
- if (err)
- goto out_unlock;
-
-+#ifdef CONFIG_GRKERNSEC
-+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
-+ shp->shm_perm.cuid, shmid) ||
-+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
-+ err = -EACCES;
-+ goto out_unlock;
-+ }
-+#endif
-+
- path = shp->shm_file->f_path;
- path_get(&path);
- shp->shm_nattch++;
-+#ifdef CONFIG_GRKERNSEC
-+ shp->shm_lapid = current->pid;
-+#endif
- size = i_size_read(path.dentry->d_inode);
- shm_unlock(shp);
-
-diff --git a/ipc/util.c b/ipc/util.c
-index 75261a3..7060953 100644
---- a/ipc/util.c
-+++ b/ipc/util.c
-@@ -47,6 +47,8 @@ struct ipc_proc_iface {
- int (*show)(struct seq_file *, void *);
- };
-
-+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
-+
- #ifdef CONFIG_MEMORY_HOTPLUG
-
- static void ipc_memory_notifier(struct work_struct *work)
-@@ -617,6 +619,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
- granted_mode >>= 6;
- else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
- granted_mode >>= 3;
-+
-+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
-+ return -1;
-+
- /* is there some bit set in requested_mode but not in granted_mode? */
- if ((requested_mode & ~granted_mode & 0007) &&
- !ns_capable(ns->user_ns, CAP_IPC_OWNER))
-diff --git a/kernel/acct.c b/kernel/acct.c
-index fa7eb3d..7faf116 100644
---- a/kernel/acct.c
-+++ b/kernel/acct.c
-@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
- */
- flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-- file->f_op->write(file, (char *)&ac,
-+ file->f_op->write(file, (char __force_user *)&ac,
- sizeof(acct_t), &file->f_pos);
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
- set_fs(fs);
-diff --git a/kernel/audit.c b/kernel/audit.c
-index e14bc74..bdf7f6c 100644
---- a/kernel/audit.c
-+++ b/kernel/audit.c
-@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
- 3) suppressed due to audit_rate_limit
- 4) suppressed due to audit_backlog_limit
- */
--static atomic_t audit_lost = ATOMIC_INIT(0);
-+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
-
- /* The netlink socket. */
- static struct sock *audit_sock;
-@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
- unsigned long now;
- int print;
-
-- atomic_inc(&audit_lost);
-+ atomic_inc_unchecked(&audit_lost);
-
- print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
-
-@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
- printk(KERN_WARNING
- "audit: audit_lost=%d audit_rate_limit=%d "
- "audit_backlog_limit=%d\n",
-- atomic_read(&audit_lost),
-+ atomic_read_unchecked(&audit_lost),
- audit_rate_limit,
- audit_backlog_limit);
- audit_panic(message);
-@@ -690,7 +690,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- status_set.pid = audit_pid;
- status_set.rate_limit = audit_rate_limit;
- status_set.backlog_limit = audit_backlog_limit;
-- status_set.lost = atomic_read(&audit_lost);
-+ status_set.lost = atomic_read_unchecked(&audit_lost);
- status_set.backlog = skb_queue_len(&audit_skb_queue);
- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
- &status_set, sizeof(status_set));
-@@ -1261,12 +1261,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
- avail = audit_expand(ab,
- max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
- if (!avail)
-- goto out;
-+ goto out_va_end;
- len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
- }
-- va_end(args2);
- if (len > 0)
- skb_put(skb, len);
-+out_va_end:
-+ va_end(args2);
- out:
- return;
- }
-@@ -1307,7 +1308,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
- int i, avail, new_len;
- unsigned char *ptr;
- struct sk_buff *skb;
-- static const unsigned char *hex = "0123456789ABCDEF";
-+ static const unsigned char hex[] = "0123456789ABCDEF";
-
- if (!ab)
- return;
-diff --git a/kernel/auditsc.c b/kernel/auditsc.c
-index d1d2843..4408c0d 100644
---- a/kernel/auditsc.c
-+++ b/kernel/auditsc.c
-@@ -67,6 +67,7 @@
- #include <linux/syscalls.h>
- #include <linux/capability.h>
- #include <linux/fs_struct.h>
-+#include <linux/compat.h>
-
- #include "audit.h"
-
-@@ -1062,7 +1063,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
- * for strings that are too long, we should not have created
- * any.
- */
-- if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
-+ if (unlikely(len > MAX_ARG_STRLEN - 1)) {
- WARN_ON(1);
- send_sig(SIGKILL, current, 0);
- return -1;
-@@ -1177,8 +1178,8 @@ static void audit_log_execve_info(struct audit_context *context,
- struct audit_buffer **ab,
- struct audit_aux_data_execve *axi)
- {
-- int i;
-- size_t len, len_sent = 0;
-+ int i, len;
-+ size_t len_sent = 0;
- const char __user *p;
- char *buf;
-
-@@ -2129,7 +2130,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
- }
-
- /* global counter which is incremented every time something logs in */
--static atomic_t session_id = ATOMIC_INIT(0);
-+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
-
- /**
- * audit_set_loginuid - set a task's audit_context loginuid
-@@ -2140,9 +2141,9 @@ static atomic_t session_id = ATOMIC_INIT(0);
- *
- * Called (set) from fs/proc/base.c::proc_loginuid_write().
- */
--int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
-+int __intentional_overflow(-1) audit_set_loginuid(struct task_struct *task, uid_t loginuid)
- {
-- unsigned int sessionid = atomic_inc_return(&session_id);
-+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
- struct audit_context *context = task->audit_context;
-
- if (context && context->in_syscall) {
-@@ -2510,46 +2511,59 @@ void __audit_mmap_fd(int fd, int flags)
- context->type = AUDIT_MMAP;
- }
-
--/**
-- * audit_core_dumps - record information about processes that end abnormally
-- * @signr: signal value
-- *
-- * If a process ends with a core dump, something fishy is going on and we
-- * should record the event for investigation.
-- */
--void audit_core_dumps(long signr)
-+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
- {
-- struct audit_buffer *ab;
-- u32 sid;
-- uid_t auid = audit_get_loginuid(current), uid;
-+ uid_t auid, uid;
- gid_t gid;
-- unsigned int sessionid = audit_get_sessionid(current);
-+ unsigned int sessionid;
-
-- if (!audit_enabled)
-- return;
--
-- if (signr == SIGQUIT) /* don't care for those */
-- return;
--
-- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-+ auid = audit_get_loginuid(current);
-+ sessionid = audit_get_sessionid(current);
- current_uid_gid(&uid, &gid);
-+
- audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
- auid, uid, gid, sessionid);
-- security_task_getsecid(current, &sid);
-- if (sid) {
-- char *ctx = NULL;
-- u32 len;
--
-- if (security_secid_to_secctx(sid, &ctx, &len))
-- audit_log_format(ab, " ssid=%u", sid);
-- else {
-- audit_log_format(ab, " subj=%s", ctx);
-- security_release_secctx(ctx, len);
-- }
-- }
-+ audit_log_task_context(ab);
- audit_log_format(ab, " pid=%d comm=", current->pid);
- audit_log_untrustedstring(ab, current->comm);
-+ audit_log_format(ab, " reason=");
-+ audit_log_string(ab, reason);
- audit_log_format(ab, " sig=%ld", signr);
-+}
-+/**
-+ * audit_core_dumps - record information about processes that end abnormally
-+ * @signr: signal value
-+ *
-+ * If a process ends with a core dump, something fishy is going on and we
-+ * should record the event for investigation.
-+ */
-+void audit_core_dumps(long signr)
-+{
-+ struct audit_buffer *ab;
-+
-+ if (!audit_enabled)
-+ return;
-+
-+ if (signr == SIGQUIT) /* don't care for those */
-+ return;
-+
-+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-+ audit_log_abend(ab, "memory violation", signr);
-+ audit_log_end(ab);
-+}
-+
-+void __audit_seccomp(unsigned long syscall, long signr, int code)
-+{
-+ struct audit_buffer *ab;
-+
-+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-+ audit_log_abend(ab, "seccomp", signr);
-+ audit_log_format(ab, " syscall=%ld", syscall);
-+#ifdef CONFIG_COMPAT
-+ audit_log_format(ab, " compat=%d", is_compat_task());
-+#endif
-+ audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
-+ audit_log_format(ab, " code=0x%x", code);
- audit_log_end(ab);
- }
-
-diff --git a/kernel/capability.c b/kernel/capability.c
-index b463871..59495fd 100644
---- a/kernel/capability.c
-+++ b/kernel/capability.c
-@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
- * before modification is attempted and the application
- * fails.
- */
-+ if (tocopy > ARRAY_SIZE(kdata))
-+ return -EFAULT;
-+
- if (copy_to_user(dataptr, kdata, tocopy
- * sizeof(struct __user_cap_data_struct))) {
- return -EFAULT;
-@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
- BUG();
- }
-
-- if (security_capable(ns, current_cred(), cap) == 0) {
-+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
- current->flags |= PF_SUPERPRIV;
- return true;
- }
-@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
- }
- EXPORT_SYMBOL(ns_capable);
-
-+bool ns_capable_nolog(struct user_namespace *ns, int cap)
-+{
-+ if (unlikely(!cap_valid(cap))) {
-+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
-+ BUG();
-+ }
-+
-+ if (security_capable_noaudit(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
-+ current->flags |= PF_SUPERPRIV;
-+ return true;
-+ }
-+ return false;
-+}
-+EXPORT_SYMBOL(ns_capable_nolog);
-+
-+bool capable_nolog(int cap)
-+{
-+ return ns_capable_nolog(&init_user_ns, cap);
-+}
-+EXPORT_SYMBOL(capable_nolog);
-+
- /**
- * task_ns_capable - Determine whether current task has a superior
- * capability targeted at a specific task's user namespace.
-@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
- }
- EXPORT_SYMBOL(task_ns_capable);
-
-+bool task_ns_capable_nolog(struct task_struct *t, int cap)
-+{
-+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
-+}
-+EXPORT_SYMBOL(task_ns_capable_nolog);
-+
- /**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
-diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index eafb6dd..59c908d 100644
---- a/kernel/cgroup.c
-+++ b/kernel/cgroup.c
-@@ -4755,6 +4755,14 @@ static void cgroup_release_agent(struct work_struct *work)
- release_list);
- list_del_init(&cgrp->release_list);
- raw_spin_unlock(&release_list_lock);
-+
-+ /*
-+ * don't bother calling call_usermodehelper if we haven't
-+ * configured a binary to execute
-+ */
-+ if (cgrp->root->release_agent_path[0] == '\0')
-+ goto continue_free;
-+
- pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pathbuf)
- goto continue_free;
-@@ -5174,7 +5182,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
- struct css_set *cg = link->cg;
- struct task_struct *task;
- int count = 0;
-- seq_printf(seq, "css_set %p\n", cg);
-+ seq_printf(seq, "css_set %pK\n", cg);
- list_for_each_entry(task, &cg->tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
- seq_puts(seq, " ...\n");
-diff --git a/kernel/compat.c b/kernel/compat.c
-index a6d0649..1e3815f 100644
---- a/kernel/compat.c
-+++ b/kernel/compat.c
-@@ -13,6 +13,7 @@
-
- #include <linux/linkage.h>
- #include <linux/compat.h>
-+#include <linux/module.h>
- #include <linux/errno.h>
- #include <linux/time.h>
- #include <linux/signal.h>
-@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
- mm_segment_t oldfs;
- long ret;
-
-- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
-+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- ret = hrtimer_nanosleep_restart(restart);
-@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- ret = hrtimer_nanosleep(&tu,
-- rmtp ? (struct timespec __user *)&rmt : NULL,
-+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
- HRTIMER_MODE_REL, CLOCK_MONOTONIC);
- set_fs(oldfs);
-
-@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
-- ret = sys_sigpending((old_sigset_t __user *) &s);
-+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
- set_fs(old_fs);
- if (ret == 0)
- ret = put_user(s, set);
-@@ -399,7 +400,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
-- ret = sys_old_getrlimit(resource, &r);
-+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
- set_fs(old_fs);
-
- if (!ret) {
-@@ -471,7 +472,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
-- ret = sys_getrusage(who, (struct rusage __user *) &r);
-+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
- set_fs(old_fs);
-
- if (ret)
-@@ -498,8 +499,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
- set_fs (KERNEL_DS);
- ret = sys_wait4(pid,
- (stat_addr ?
-- (unsigned int __user *) &status : NULL),
-- options, (struct rusage __user *) &r);
-+ (unsigned int __force_user *) &status : NULL),
-+ options, (struct rusage __force_user *) &r);
- set_fs (old_fs);
-
- if (ret > 0) {
-@@ -524,8 +525,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
- memset(&info, 0, sizeof(info));
-
- set_fs(KERNEL_DS);
-- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
-- uru ? (struct rusage __user *)&ru : NULL);
-+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
-+ uru ? (struct rusage __force_user *)&ru : NULL);
- set_fs(old_fs);
-
- if ((ret < 0) || (info.si_signo == 0))
-@@ -655,8 +656,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_timer_settime(timer_id, flags,
-- (struct itimerspec __user *) &newts,
-- (struct itimerspec __user *) &oldts);
-+ (struct itimerspec __force_user *) &newts,
-+ (struct itimerspec __force_user *) &oldts);
- set_fs(oldfs);
- if (!err && old && put_compat_itimerspec(old, &oldts))
- return -EFAULT;
-@@ -673,7 +674,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_timer_gettime(timer_id,
-- (struct itimerspec __user *) &ts);
-+ (struct itimerspec __force_user *) &ts);
- set_fs(oldfs);
- if (!err && put_compat_itimerspec(setting, &ts))
- return -EFAULT;
-@@ -692,7 +693,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_clock_settime(which_clock,
-- (struct timespec __user *) &ts);
-+ (struct timespec __force_user *) &ts);
- set_fs(oldfs);
- return err;
- }
-@@ -707,7 +708,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_clock_gettime(which_clock,
-- (struct timespec __user *) &ts);
-+ (struct timespec __force_user *) &ts);
- set_fs(oldfs);
- if (!err && put_compat_timespec(&ts, tp))
- return -EFAULT;
-@@ -727,7 +728,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
-+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
- set_fs(oldfs);
-
- err = compat_put_timex(utp, &txc);
-@@ -747,7 +748,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_clock_getres(which_clock,
-- (struct timespec __user *) &ts);
-+ (struct timespec __force_user *) &ts);
- set_fs(oldfs);
- if (!err && tp && put_compat_timespec(&ts, tp))
- return -EFAULT;
-@@ -759,9 +760,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
- long err;
- mm_segment_t oldfs;
- struct timespec tu;
-- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
-+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
-
-- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
-+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = clock_nanosleep_restart(restart);
-@@ -793,8 +794,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_clock_nanosleep(which_clock, flags,
-- (struct timespec __user *) &in,
-- (struct timespec __user *) &out);
-+ (struct timespec __force_user *) &in,
-+ (struct timespec __force_user *) &out);
- set_fs(oldfs);
-
- if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
-@@ -855,7 +856,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- * bitmap. We must however ensure the end of the
- * kernel bitmap is zeroed.
- */
-- if (nr_compat_longs-- > 0) {
-+ if (nr_compat_longs) {
-+ nr_compat_longs--;
- if (__get_user(um, umask))
- return -EFAULT;
- } else {
-@@ -897,7 +899,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- * We dont want to write past the end of the userspace
- * bitmap.
- */
-- if (nr_compat_longs-- > 0) {
-+ if (nr_compat_longs) {
-+ nr_compat_longs--;
- if (__put_user(um, umask))
- return -EFAULT;
- }
-diff --git a/kernel/configs.c b/kernel/configs.c
-index 42e8fa0..9e7406b 100644
---- a/kernel/configs.c
-+++ b/kernel/configs.c
-@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
- struct proc_dir_entry *entry;
-
- /* create the current config file */
-+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
-+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
-+ &ikconfig_file_ops);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
-+ &ikconfig_file_ops);
-+#endif
-+#else
- entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
- &ikconfig_file_ops);
-+#endif
-+
- if (!entry)
- return -ENOMEM;
-
-diff --git a/kernel/cred.c b/kernel/cred.c
-index 48c6fd3..cb63d13 100644
---- a/kernel/cred.c
-+++ b/kernel/cred.c
-@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
- validate_creds(cred);
- put_cred(cred);
- }
-+
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ cred = (struct cred *) tsk->delayed_cred;
-+ if (cred) {
-+ tsk->delayed_cred = NULL;
-+ validate_creds(cred);
-+ put_cred(cred);
-+ }
-+#endif
- }
-
- /**
-@@ -472,7 +481,7 @@ error_put:
- * Always returns 0 thus allowing this function to be tail-called at the end
- * of, say, sys_setgid().
- */
--int commit_creds(struct cred *new)
-+static int __commit_creds(struct cred *new)
- {
- struct task_struct *task = current;
- const struct cred *old = task->real_cred;
-@@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
-
- get_cred(new); /* we will require a ref for the subj creds too */
-
-+ gr_set_role_label(task, new->uid, new->gid);
-+
- /* dumpability changes */
- if (old->euid != new->euid ||
- old->egid != new->egid ||
-@@ -540,6 +551,107 @@ int commit_creds(struct cred *new)
- put_cred(old);
- return 0;
- }
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern int set_user(struct cred *new);
-+
-+void gr_delayed_cred_worker(void)
-+{
-+ const struct cred *new = current->delayed_cred;
-+ struct cred *ncred;
-+
-+ current->delayed_cred = NULL;
-+
-+ if (current_uid() && new != NULL) {
-+ // from doing get_cred on it when queueing this
-+ put_cred(new);
-+ return;
-+ } else if (new == NULL)
-+ return;
-+
-+ ncred = prepare_creds();
-+ if (!ncred)
-+ goto die;
-+ // uids
-+ ncred->uid = new->uid;
-+ ncred->euid = new->euid;
-+ ncred->suid = new->suid;
-+ ncred->fsuid = new->fsuid;
-+ // gids
-+ ncred->gid = new->gid;
-+ ncred->egid = new->egid;
-+ ncred->sgid = new->sgid;
-+ ncred->fsgid = new->fsgid;
-+ // groups
-+ if (set_groups(ncred, new->group_info) < 0) {
-+ abort_creds(ncred);
-+ goto die;
-+ }
-+ // caps
-+ ncred->securebits = new->securebits;
-+ ncred->cap_inheritable = new->cap_inheritable;
-+ ncred->cap_permitted = new->cap_permitted;
-+ ncred->cap_effective = new->cap_effective;
-+ ncred->cap_bset = new->cap_bset;
-+
-+ if (set_user(ncred)) {
-+ abort_creds(ncred);
-+ goto die;
-+ }
-+
-+ // from doing get_cred on it when queueing this
-+ put_cred(new);
-+
-+ __commit_creds(ncred);
-+ return;
-+die:
-+ // from doing get_cred on it when queueing this
-+ put_cred(new);
-+ do_group_exit(SIGKILL);
-+}
-+#endif
-+
-+int commit_creds(struct cred *new)
-+{
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ int ret;
-+ int schedule_it = 0;
-+ struct task_struct *t;
-+ unsigned oldsecurebits = current_cred()->securebits;
-+
-+ /* we won't get called with tasklist_lock held for writing
-+ and interrupts disabled as the cred struct in that case is
-+ init_cred
-+ */
-+ if (grsec_enable_setxid && !current_is_single_threaded() &&
-+ !current_uid() && new->uid) {
-+ schedule_it = 1;
-+ }
-+ ret = __commit_creds(new);
-+ if (schedule_it) {
-+ rcu_read_lock();
-+ read_lock(&tasklist_lock);
-+ for (t = next_thread(current); t != current;
-+ t = next_thread(t)) {
-+ /* we'll check if the thread has uid 0 in
-+ * the delayed worker routine
-+ */
-+ if (task_securebits(t) == oldsecurebits &&
-+ t->delayed_cred == NULL) {
-+ t->delayed_cred = get_cred(new);
-+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
-+ set_tsk_need_resched(t);
-+ }
-+ }
-+ read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
-+ }
-+
-+ return ret;
-+#else
-+ return __commit_creds(new);
-+#endif
-+}
-+
- EXPORT_SYMBOL(commit_creds);
-
- /**
-diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
-index 7fda904..59f620c 100644
---- a/kernel/debug/debug_core.c
-+++ b/kernel/debug/debug_core.c
-@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
- */
- static atomic_t masters_in_kgdb;
- static atomic_t slaves_in_kgdb;
--static atomic_t kgdb_break_tasklet_var;
-+static atomic_unchecked_t kgdb_break_tasklet_var;
- atomic_t kgdb_setting_breakpoint;
-
- struct task_struct *kgdb_usethread;
-@@ -129,7 +129,7 @@ int kgdb_single_step;
- static pid_t kgdb_sstep_pid;
-
- /* to keep track of the CPU which is doing the single stepping*/
--atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-
- /*
- * If you are debugging a problem where roundup (the collection of
-@@ -537,7 +537,7 @@ return_normal:
- * kernel will only try for the value of sstep_tries before
- * giving up and continuing on.
- */
-- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
-+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
- (kgdb_info[cpu].task &&
- kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
- atomic_set(&kgdb_active, -1);
-@@ -631,8 +631,8 @@ cpu_master_loop:
- }
-
- kgdb_restore:
-- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
-+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
-+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
- if (kgdb_info[sstep_cpu].task)
- kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
- else
-@@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
- static void kgdb_tasklet_bpt(unsigned long ing)
- {
- kgdb_breakpoint();
-- atomic_set(&kgdb_break_tasklet_var, 0);
-+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
- }
-
- static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
-
- void kgdb_schedule_breakpoint(void)
- {
-- if (atomic_read(&kgdb_break_tasklet_var) ||
-+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
- atomic_read(&kgdb_active) != -1 ||
- atomic_read(&kgdb_setting_breakpoint))
- return;
-- atomic_inc(&kgdb_break_tasklet_var);
-+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
- tasklet_schedule(&kgdb_tasklet_breakpoint);
- }
- EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
-diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
-index f56af55..657c675 100644
---- a/kernel/debug/kdb/kdb_main.c
-+++ b/kernel/debug/kdb/kdb_main.c
-@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
- list_for_each_entry(mod, kdb_modules, list) {
-
- kdb_printf("%-20s%8u 0x%p ", mod->name,
-- mod->core_size, (void *)mod);
-+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
- #ifdef CONFIG_MODULE_UNLOAD
- kdb_printf("%4d ", module_refcount(mod));
- #endif
-@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
- kdb_printf(" (Loading)");
- else
- kdb_printf(" (Live)");
-- kdb_printf(" 0x%p", mod->module_core);
-+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
-
- #ifdef CONFIG_MODULE_UNLOAD
- {
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 4277095..c1440e1 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -146,8 +146,15 @@ static struct srcu_struct pmus_srcu;
- * 0 - disallow raw tracepoint access for unpriv
- * 1 - disallow cpu events for unpriv
- * 2 - disallow kernel profiling for unpriv
-+ * 3 - disallow all unpriv perf event use
- */
--int sysctl_perf_event_paranoid __read_mostly = 1;
-+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
-+int sysctl_perf_event_legitimately_concerned __read_only = 3;
-+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
-+int sysctl_perf_event_legitimately_concerned __read_only = 2;
-+#else
-+int sysctl_perf_event_legitimately_concerned __read_only = 1;
-+#endif
-
- /* Minimum for 512 kiB + 1 user control page */
- int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -174,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
- return 0;
- }
-
--static atomic64_t perf_event_id;
-+static atomic64_unchecked_t perf_event_id;
-
- static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
-@@ -2600,7 +2607,7 @@ static void __perf_event_read(void *info)
-
- static inline u64 perf_event_count(struct perf_event *event)
- {
-- return local64_read(&event->count) + atomic64_read(&event->child_count);
-+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
- }
-
- static u64 perf_event_read(struct perf_event *event)
-@@ -3143,9 +3150,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
- mutex_lock(&event->child_mutex);
- total += perf_event_read(event);
- *enabled += event->total_time_enabled +
-- atomic64_read(&event->child_total_time_enabled);
-+ atomic64_read_unchecked(&event->child_total_time_enabled);
- *running += event->total_time_running +
-- atomic64_read(&event->child_total_time_running);
-+ atomic64_read_unchecked(&event->child_total_time_running);
-
- list_for_each_entry(child, &event->child_list, child_list) {
- total += perf_event_read(child);
-@@ -3556,10 +3563,10 @@ void perf_event_update_userpage(struct perf_event *event)
- userpg->offset -= local64_read(&event->hw.prev_count);
-
- userpg->time_enabled = enabled +
-- atomic64_read(&event->child_total_time_enabled);
-+ atomic64_read_unchecked(&event->child_total_time_enabled);
-
- userpg->time_running = running +
-- atomic64_read(&event->child_total_time_running);
-+ atomic64_read_unchecked(&event->child_total_time_running);
-
- barrier();
- ++userpg->lock;
-@@ -4077,11 +4084,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
- values[n++] = perf_event_count(event);
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
- values[n++] = enabled +
-- atomic64_read(&event->child_total_time_enabled);
-+ atomic64_read_unchecked(&event->child_total_time_enabled);
- }
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
- values[n++] = running +
-- atomic64_read(&event->child_total_time_running);
-+ atomic64_read_unchecked(&event->child_total_time_running);
- }
- if (read_format & PERF_FORMAT_ID)
- values[n++] = primary_event_id(event);
-@@ -4732,12 +4739,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
- * need to add enough zero bytes after the string to handle
- * the 64bit alignment we do later.
- */
-- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
-+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!buf) {
- name = strncpy(tmp, "//enomem", sizeof(tmp));
- goto got_name;
- }
-- name = d_path(&file->f_path, buf, PATH_MAX);
-+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
- if (IS_ERR(name)) {
- name = strncpy(tmp, "//toolong", sizeof(tmp));
- goto got_name;
-@@ -6103,7 +6110,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
- event->parent = parent_event;
-
- event->ns = get_pid_ns(current->nsproxy->pid_ns);
-- event->id = atomic64_inc_return(&perf_event_id);
-+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
-
- event->state = PERF_EVENT_STATE_INACTIVE;
-
-@@ -6349,6 +6356,11 @@ SYSCALL_DEFINE5(perf_event_open,
- if (flags & ~PERF_FLAG_ALL)
- return -EINVAL;
-
-+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
-+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
-+ return -EACCES;
-+#endif
-+
- err = perf_copy_attr(attr_uptr, &attr);
- if (err)
- return err;
-@@ -6647,10 +6659,10 @@ static void sync_child_event(struct perf_event *child_event,
- /*
- * Add back the child's count to the parent's count:
- */
-- atomic64_add(child_val, &parent_event->child_count);
-- atomic64_add(child_event->total_time_enabled,
-+ atomic64_add_unchecked(child_val, &parent_event->child_count);
-+ atomic64_add_unchecked(child_event->total_time_enabled,
- &parent_event->child_total_time_enabled);
-- atomic64_add(child_event->total_time_running,
-+ atomic64_add_unchecked(child_event->total_time_running,
- &parent_event->child_total_time_running);
-
- /*
-diff --git a/kernel/events/internal.h b/kernel/events/internal.h
-index a2101bb..f2e0354 100644
---- a/kernel/events/internal.h
-+++ b/kernel/events/internal.h
-@@ -78,7 +78,7 @@ static unsigned long perf_data_size(struct ring_buffer *rb)
-
- static inline void
- __output_copy(struct perf_output_handle *handle,
-- const void *buf, unsigned int len)
-+ const void *buf, unsigned long len)
- {
- do {
- unsigned long size = min_t(unsigned long, handle->size, len);
-diff --git a/kernel/exit.c b/kernel/exit.c
-index fde15f9..99f1b97 100644
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -168,6 +168,10 @@ void release_task(struct task_struct * p)
- struct task_struct *leader;
- int zap_leader;
- repeat:
-+#ifdef CONFIG_NET
-+ gr_del_task_from_ip_table(p);
-+#endif
-+
- /* don't need to get the RCU readlock here - the process is dead and
- * can't be modifying its own credentials. But shut RCU-lockdep up */
- rcu_read_lock();
-@@ -380,7 +384,7 @@ int allow_signal(int sig)
- * know it'll be handled, so that they don't get converted to
- * SIGKILL or just silently dropped.
- */
-- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
-+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- return 0;
-@@ -402,56 +406,6 @@ int disallow_signal(int sig)
-
- EXPORT_SYMBOL(disallow_signal);
-
--/*
-- * Put all the gunge required to become a kernel thread without
-- * attached user resources in one place where it belongs.
-- */
--
--void daemonize(const char *name, ...)
--{
-- va_list args;
-- sigset_t blocked;
--
-- va_start(args, name);
-- vsnprintf(current->comm, sizeof(current->comm), name, args);
-- va_end(args);
--
-- /*
-- * If we were started as result of loading a module, close all of the
-- * user space pages. We don't need them, and if we didn't close them
-- * they would be locked into memory.
-- */
-- exit_mm(current);
-- /*
-- * We don't want to have TIF_FREEZE set if the system-wide hibernation
-- * or suspend transition begins right now.
-- */
-- current->flags |= (PF_NOFREEZE | PF_KTHREAD);
--
-- if (current->nsproxy != &init_nsproxy) {
-- get_nsproxy(&init_nsproxy);
-- switch_task_namespaces(current, &init_nsproxy);
-- }
-- set_special_pids(&init_struct_pid);
-- proc_clear_tty(current);
--
-- /* Block and flush all signals */
-- sigfillset(&blocked);
-- sigprocmask(SIG_BLOCK, &blocked, NULL);
-- flush_signals(current);
--
-- /* Become as one with the init task */
--
-- daemonize_fs_struct();
-- exit_files(current);
-- current->files = init_task.files;
-- atomic_inc(&current->files->count);
--
-- reparent_to_kthreadd();
--}
--
--EXPORT_SYMBOL(daemonize);
--
- static void close_files(struct files_struct * files)
- {
- int i, j;
-@@ -881,6 +835,8 @@ NORET_TYPE void do_exit(long code)
- struct task_struct *tsk = current;
- int group_dead;
-
-+ set_fs(USER_DS);
-+
- profile_task_exit(tsk);
-
- WARN_ON(blk_needs_flush_plug(tsk));
-@@ -897,7 +853,6 @@ NORET_TYPE void do_exit(long code)
- * mm_release()->clear_child_tid() from writing to a user-controlled
- * kernel address.
- */
-- set_fs(USER_DS);
-
- ptrace_event(PTRACE_EVENT_EXIT, code);
-
-@@ -959,6 +914,9 @@ NORET_TYPE void do_exit(long code)
- tsk->exit_code = code;
- taskstats_exit(tsk, group_dead);
-
-+ gr_acl_handle_psacct(tsk, code);
-+ gr_acl_handle_exit();
-+
- exit_mm(tsk);
-
- if (group_dead)
-@@ -1072,7 +1030,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
- * Take down every thread in the group. This is called by fatal signals
- * as well as by sys_exit_group (below).
- */
--NORET_TYPE void
-+__noreturn void
- do_group_exit(int exit_code)
- {
- struct signal_struct *sig = current->signal;
-diff --git a/kernel/fork.c b/kernel/fork.c
-index 29b4604..ee14dbd 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -34,6 +34,7 @@
- #include <linux/cgroup.h>
- #include <linux/security.h>
- #include <linux/hugetlb.h>
-+#include <linux/seccomp.h>
- #include <linux/swap.h>
- #include <linux/syscalls.h>
- #include <linux/jiffies.h>
-@@ -137,6 +138,49 @@ static inline void free_thread_info(struct thread_info *ti)
- }
- #endif
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
-+ int node, void **lowmem_stack)
-+{
-+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
-+ void *ret = NULL;
-+ unsigned int i;
-+
-+ *lowmem_stack = alloc_thread_info_node(tsk, node);
-+ if (*lowmem_stack == NULL)
-+ goto out;
-+
-+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
-+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
-+
-+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
-+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
-+ if (ret == NULL) {
-+ free_thread_info(*lowmem_stack);
-+ *lowmem_stack = NULL;
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
-+{
-+ unmap_process_stacks(tsk);
-+}
-+#else
-+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
-+ int node, void **lowmem_stack)
-+{
-+ return alloc_thread_info_node(tsk, node);
-+}
-+
-+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
-+{
-+ free_thread_info(ti);
-+}
-+#endif
-+
- /* SLAB cache for signal_struct structures (tsk->signal) */
- static struct kmem_cache *signal_cachep;
-
-@@ -155,19 +199,24 @@ struct kmem_cache *vm_area_cachep;
- /* SLAB cache for mm_struct structures (tsk->mm) */
- static struct kmem_cache *mm_cachep;
-
--static void account_kernel_stack(struct thread_info *ti, int account)
-+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
- {
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
-+#else
- struct zone *zone = page_zone(virt_to_page(ti));
-+#endif
-
- mod_zone_page_state(zone, NR_KERNEL_STACK, account);
- }
-
- void free_task(struct task_struct *tsk)
- {
-- account_kernel_stack(tsk->stack, -1);
-- free_thread_info(tsk->stack);
-+ account_kernel_stack(tsk, tsk->stack, -1);
-+ gr_free_thread_info(tsk, tsk->stack);
- rt_mutex_debug_task_free(tsk);
- ftrace_graph_exit_task(tsk);
-+ put_seccomp_filter(tsk);
- free_task_struct(tsk);
- }
- EXPORT_SYMBOL(free_task);
-@@ -254,6 +303,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
- struct task_struct *tsk;
- struct thread_info *ti;
- unsigned long *stackend;
-+ void *lowmem_stack;
- int node = tsk_fork_get_node(orig);
- int err;
-
-@@ -263,26 +313,34 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
- if (!tsk)
- return NULL;
-
-- ti = alloc_thread_info_node(tsk, node);
-+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
- if (!ti) {
- free_task_struct(tsk);
- return NULL;
- }
-
- err = arch_dup_task_struct(tsk, orig);
-- if (err)
-- goto out;
-
-+ /*
-+ * We defer looking at err, because we will need this setup
-+ * for the clean up path to work correctly.
-+ */
- tsk->stack = ti;
--
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ tsk->lowmem_stack = lowmem_stack;
-+#endif
- setup_thread_stack(tsk, orig);
-+
-+ if (err)
-+ goto out;
-+
- clear_user_return_notifier(tsk);
- clear_tsk_need_resched(tsk);
- stackend = end_of_stack(tsk);
- *stackend = STACK_END_MAGIC; /* for overflow detection */
-
- #ifdef CONFIG_CC_STACKPROTECTOR
-- tsk->stack_canary = get_random_int();
-+ tsk->stack_canary = pax_get_random_long();
- #endif
-
- /*
-@@ -295,24 +353,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
- #endif
- tsk->splice_pipe = NULL;
-
-- account_kernel_stack(ti, 1);
-+ account_kernel_stack(tsk, ti, 1);
-
- return tsk;
-
- out:
-- free_thread_info(ti);
-+ gr_free_thread_info(tsk, ti);
- free_task_struct(tsk);
- return NULL;
- }
-
- #ifdef CONFIG_MMU
--static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
-+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
- {
-- struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
-- struct rb_node **rb_link, *rb_parent;
-- int retval;
-+ struct vm_area_struct *tmp;
- unsigned long charge;
- struct mempolicy *pol;
-+ struct file *file;
-+
-+ charge = 0;
-+ if (mpnt->vm_flags & VM_ACCOUNT) {
-+ unsigned long len;
-+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
-+ if (security_vm_enough_memory(len))
-+ goto fail_nomem;
-+ charge = len;
-+ }
-+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+ if (!tmp)
-+ goto fail_nomem;
-+ *tmp = *mpnt;
-+ tmp->vm_mm = mm;
-+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
-+ pol = mpol_dup(vma_policy(mpnt));
-+ if (IS_ERR(pol))
-+ goto fail_nomem_policy;
-+ vma_set_policy(tmp, pol);
-+ if (anon_vma_fork(tmp, mpnt))
-+ goto fail_nomem_anon_vma_fork;
-+ tmp->vm_flags &= ~VM_LOCKED;
-+ tmp->vm_next = tmp->vm_prev = NULL;
-+ tmp->vm_mirror = NULL;
-+ file = tmp->vm_file;
-+ if (file) {
-+ struct inode *inode = file->f_path.dentry->d_inode;
-+ struct address_space *mapping = file->f_mapping;
-+
-+ get_file(file);
-+ if (tmp->vm_flags & VM_DENYWRITE)
-+ atomic_dec(&inode->i_writecount);
-+ mutex_lock(&mapping->i_mmap_mutex);
-+ if (tmp->vm_flags & VM_SHARED)
-+ mapping->i_mmap_writable++;
-+ flush_dcache_mmap_lock(mapping);
-+ /* insert tmp into the share list, just after mpnt */
-+ vma_prio_tree_add(tmp, mpnt);
-+ flush_dcache_mmap_unlock(mapping);
-+ mutex_unlock(&mapping->i_mmap_mutex);
-+ }
-+
-+ /*
-+ * Clear hugetlb-related page reserves for children. This only
-+ * affects MAP_PRIVATE mappings. Faults generated by the child
-+ * are not guaranteed to succeed, even if read-only
-+ */
-+ if (is_vm_hugetlb_page(tmp))
-+ reset_vma_resv_huge_pages(tmp);
-+
-+ return tmp;
-+
-+fail_nomem_anon_vma_fork:
-+ mpol_put(pol);
-+fail_nomem_policy:
-+ kmem_cache_free(vm_area_cachep, tmp);
-+fail_nomem:
-+ vm_unacct_memory(charge);
-+ return NULL;
-+}
-+
-+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
-+{
-+ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
-+ struct rb_node **rb_link, *rb_parent;
-+ int retval;
-
- down_write(&oldmm->mmap_sem);
- flush_cache_dup_mm(oldmm);
-@@ -324,8 +447,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
- mm->locked_vm = 0;
- mm->mmap = NULL;
- mm->mmap_cache = NULL;
-- mm->free_area_cache = oldmm->mmap_base;
-- mm->cached_hole_size = ~0UL;
-+ mm->free_area_cache = oldmm->free_area_cache;
-+ mm->cached_hole_size = oldmm->cached_hole_size;
- mm->map_count = 0;
- cpumask_clear(mm_cpumask(mm));
- mm->mm_rb = RB_ROOT;
-@@ -341,63 +464,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
-
- prev = NULL;
- for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
-- struct file *file;
--
- if (mpnt->vm_flags & VM_DONTCOPY) {
-- long pages = vma_pages(mpnt);
-- mm->total_vm -= pages;
- vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-- -pages);
-+ -vma_pages(mpnt));
- continue;
- }
-- charge = 0;
-- if (mpnt->vm_flags & VM_ACCOUNT) {
-- unsigned long len;
-- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
-- if (security_vm_enough_memory(len))
-- goto fail_nomem;
-- charge = len;
-+ tmp = dup_vma(mm, mpnt);
-+ if (!tmp) {
-+ retval = -ENOMEM;
-+ goto out;
- }
-- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-- if (!tmp)
-- goto fail_nomem;
-- *tmp = *mpnt;
-- INIT_LIST_HEAD(&tmp->anon_vma_chain);
-- pol = mpol_dup(vma_policy(mpnt));
-- retval = PTR_ERR(pol);
-- if (IS_ERR(pol))
-- goto fail_nomem_policy;
-- vma_set_policy(tmp, pol);
-- tmp->vm_mm = mm;
-- if (anon_vma_fork(tmp, mpnt))
-- goto fail_nomem_anon_vma_fork;
-- tmp->vm_flags &= ~VM_LOCKED;
-- tmp->vm_next = tmp->vm_prev = NULL;
-- file = tmp->vm_file;
-- if (file) {
-- struct inode *inode = file->f_path.dentry->d_inode;
-- struct address_space *mapping = file->f_mapping;
--
-- get_file(file);
-- if (tmp->vm_flags & VM_DENYWRITE)
-- atomic_dec(&inode->i_writecount);
-- mutex_lock(&mapping->i_mmap_mutex);
-- if (tmp->vm_flags & VM_SHARED)
-- mapping->i_mmap_writable++;
-- flush_dcache_mmap_lock(mapping);
-- /* insert tmp into the share list, just after mpnt */
-- vma_prio_tree_add(tmp, mpnt);
-- flush_dcache_mmap_unlock(mapping);
-- mutex_unlock(&mapping->i_mmap_mutex);
-- }
--
-- /*
-- * Clear hugetlb-related page reserves for children. This only
-- * affects MAP_PRIVATE mappings. Faults generated by the child
-- * are not guaranteed to succeed, even if read-only
-- */
-- if (is_vm_hugetlb_page(tmp))
-- reset_vma_resv_huge_pages(tmp);
-
- /*
- * Link in the new vma and copy the page table entries.
-@@ -420,6 +496,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
- if (retval)
- goto out;
- }
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
-+ struct vm_area_struct *mpnt_m;
-+
-+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
-+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
-+
-+ if (!mpnt->vm_mirror)
-+ continue;
-+
-+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
-+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
-+ mpnt->vm_mirror = mpnt_m;
-+ } else {
-+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
-+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
-+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
-+ mpnt->vm_mirror->vm_mirror = mpnt;
-+ }
-+ }
-+ BUG_ON(mpnt_m);
-+ }
-+#endif
-+
- /* a new mm has just been created */
- arch_dup_mmap(oldmm, mm);
- retval = 0;
-@@ -428,14 +529,6 @@ out:
- flush_tlb_mm(oldmm);
- up_write(&oldmm->mmap_sem);
- return retval;
--fail_nomem_anon_vma_fork:
-- mpol_put(pol);
--fail_nomem_policy:
-- kmem_cache_free(vm_area_cachep, tmp);
--fail_nomem:
-- retval = -ENOMEM;
-- vm_unacct_memory(charge);
-- goto out;
- }
-
- static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -647,6 +740,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
- }
- EXPORT_SYMBOL_GPL(get_task_mm);
-
-+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
-+{
-+ struct mm_struct *mm;
-+ int err;
-+
-+ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
-+ if (err)
-+ return ERR_PTR(err);
-+
-+ mm = get_task_mm(task);
-+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
-+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
-+ mmput(mm);
-+ mm = ERR_PTR(-EACCES);
-+ }
-+ mutex_unlock(&task->signal->cred_guard_mutex);
-+
-+ return mm;
-+}
-+
- /* Please note the differences between mmput and mm_release.
- * mmput is called whenever we stop holding onto a mm_struct,
- * error success whatever.
-@@ -832,13 +945,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
- spin_unlock(&fs->lock);
- return -EAGAIN;
- }
-- fs->users++;
-+ atomic_inc(&fs->users);
- spin_unlock(&fs->lock);
- return 0;
- }
- tsk->fs = copy_fs_struct(fs);
- if (!tsk->fs)
- return -ENOMEM;
-+ /* Carry through gr_chroot_dentry and is_chrooted instead
-+ of recomputing it here. Already copied when the task struct
-+ is duplicated. This allows pivot_root to not be treated as
-+ a chroot
-+ */
-+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
-+
- return 0;
- }
-
-@@ -1047,7 +1167,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
- * parts of the process environment (as per the clone
- * flags). The actual kick-off is left to the caller.
- */
--static struct task_struct *copy_process(unsigned long clone_flags,
-+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
- unsigned long stack_start,
- struct pt_regs *regs,
- unsigned long stack_size,
-@@ -1096,6 +1216,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
- goto fork_out;
-
- ftrace_graph_init_task(p);
-+ get_seccomp_filter(p);
-
- rt_mutex_init_task(p);
-
-@@ -1104,10 +1225,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
- DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
- #endif
- retval = -EAGAIN;
-+
-+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
-+
- if (atomic_read(&p->real_cred->user->processes) >=
- task_rlimit(p, RLIMIT_NPROC)) {
-- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
-- p->real_cred->user != INIT_USER)
-+ if (p->real_cred->user != INIT_USER &&
-+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
- goto bad_fork_free;
- }
- current->flags &= ~PF_NPROC_EXCEEDED;
-@@ -1341,6 +1465,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
- goto bad_fork_free_pid;
- }
-
-+ /* synchronizes with gr_set_acls()
-+ we need to call this past the point of no return for fork()
-+ */
-+ gr_copy_label(p);
-+
- if (clone_flags & CLONE_THREAD) {
- current->signal->nr_threads++;
- atomic_inc(&current->signal->live);
-@@ -1424,6 +1553,8 @@ bad_fork_cleanup_count:
- bad_fork_free:
- free_task(p);
- fork_out:
-+ gr_log_forkfail(retval);
-+
- return ERR_PTR(retval);
- }
-
-@@ -1510,6 +1641,7 @@ long do_fork(unsigned long clone_flags,
-
- p = copy_process(clone_flags, stack_start, regs, stack_size,
- child_tidptr, NULL, trace);
-+ add_latent_entropy();
- /*
- * Do this prior waking up the new thread - the thread pointer
- * might get invalid after that point, if the thread exits quickly.
-@@ -1526,6 +1658,8 @@ long do_fork(unsigned long clone_flags,
- if (clone_flags & CLONE_PARENT_SETTID)
- put_user(nr, parent_tidptr);
-
-+ gr_handle_brute_check();
-+
- if (clone_flags & CLONE_VFORK) {
- p->vfork_done = &vfork;
- init_completion(&vfork);
-@@ -1598,7 +1732,7 @@ void __init proc_caches_init(void)
- mm_cachep = kmem_cache_create("mm_struct",
- sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
-- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
-+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
- mmap_init();
- nsproxy_cache_init();
- }
-@@ -1637,7 +1771,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
- return 0;
-
- /* don't need lock here; in the worst case we'll do useless copy */
-- if (fs->users == 1)
-+ if (atomic_read(&fs->users) == 1)
- return 0;
-
- *new_fsp = copy_fs_struct(fs);
-@@ -1726,7 +1860,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
- fs = current->fs;
- spin_lock(&fs->lock);
- current->fs = new_fs;
-- if (--fs->users)
-+ gr_set_chroot_entries(current, &current->fs->root);
-+ if (atomic_dec_return(&fs->users))
- new_fs = NULL;
- else
- new_fs = fs;
-diff --git a/kernel/futex.c b/kernel/futex.c
-index 7481595..64a53fb 100644
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -54,6 +54,7 @@
- #include <linux/mount.h>
- #include <linux/pagemap.h>
- #include <linux/syscalls.h>
-+#include <linux/ptrace.h>
- #include <linux/signal.h>
- #include <linux/export.h>
- #include <linux/magic.h>
-@@ -97,7 +98,7 @@ struct futex_pi_state {
- atomic_t refcount;
-
- union futex_key key;
--};
-+} __randomize_layout;
-
- /**
- * struct futex_q - The hashed futex queue entry, one per waiting task
-@@ -131,7 +132,7 @@ struct futex_q {
- struct rt_mutex_waiter *rt_waiter;
- union futex_key *requeue_pi_key;
- u32 bitset;
--};
-+} __randomize_layout;
-
- static const struct futex_q futex_q_init = {
- /* list gets initialized in queue_me()*/
-@@ -190,6 +191,8 @@ static void get_futex_key_refs(union futex_key *key)
- case FUT_OFF_MMSHARED:
- atomic_inc(&key->private.mm->mm_count);
- break;
-+ default:
-+ smp_mb(); /* explicit MB (B) */
- }
- }
-
-@@ -240,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
- struct page *page, *page_head;
- int err, ro = 0;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
-+ return -EFAULT;
-+#endif
-+
- /*
- * The futex address must be "naturally" aligned.
- */
-@@ -438,7 +446,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
-
- static int get_futex_value_locked(u32 *dest, u32 __user *from)
- {
-- int ret;
-+ unsigned long ret;
-
- pagefault_disable();
- ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2878,6 +2886,7 @@ static int __init futex_init(void)
- {
- u32 curval;
- int i;
-+ mm_segment_t oldfs;
-
- /*
- * This will fail and we want it. Some arch implementations do
-@@ -2889,8 +2898,11 @@ static int __init futex_init(void)
- * implementation, the non-functional ones will return
- * -ENOSYS.
- */
-+ oldfs = get_fs();
-+ set_fs(USER_DS);
- if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
- futex_cmpxchg_enabled = 1;
-+ set_fs(oldfs);
-
- for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
- plist_head_init(&futex_queues[i].chain);
-diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
-index a9642d5..51eb98c 100644
---- a/kernel/futex_compat.c
-+++ b/kernel/futex_compat.c
-@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
- return 0;
- }
-
--static void __user *futex_uaddr(struct robust_list __user *entry,
-+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
- compat_long_t futex_offset)
- {
- compat_uptr_t base = ptr_to_compat(entry);
-diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
-index 9b22d03..6295b62 100644
---- a/kernel/gcov/base.c
-+++ b/kernel/gcov/base.c
-@@ -102,11 +102,6 @@ void gcov_enable_events(void)
- }
-
- #ifdef CONFIG_MODULES
--static inline int within(void *addr, void *start, unsigned long size)
--{
-- return ((addr >= start) && (addr < start + size));
--}
--
- /* Update list and generate events when modules are unloaded. */
- static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
- prev = NULL;
- /* Remove entries located in module from linked list. */
- for (info = gcov_info_head; info; info = info->next) {
-- if (within(info, mod->module_core, mod->core_size)) {
-+ if (within_module_core_rw((unsigned long)info, mod)) {
- if (prev)
- prev->next = info->next;
- else
-diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 20e88af..ec1b0d2 100644
---- a/kernel/hrtimer.c
-+++ b/kernel/hrtimer.c
-@@ -1436,7 +1436,7 @@ void hrtimer_peek_ahead_timers(void)
- local_irq_restore(flags);
- }
-
--static void run_hrtimer_softirq(struct softirq_action *h)
-+static __latent_entropy void run_hrtimer_softirq(void)
- {
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
-@@ -1778,7 +1778,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata hrtimers_nb = {
-+static struct notifier_block hrtimers_nb = {
- .notifier_call = hrtimer_cpu_notify,
- };
-
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 127a32e..129057f 100644
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -814,7 +814,7 @@ static int irq_thread(void *data)
- raw_spin_unlock_irq(&desc->lock);
- action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
-- atomic_inc(&desc->threads_handled);
-+ atomic_inc_unchecked(&desc->threads_handled);
- }
-
- wake = atomic_dec_and_test(&desc->threads_active);
-@@ -900,22 +900,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
- return -ENOSYS;
- if (!try_module_get(desc->owner))
- return -ENODEV;
-- /*
-- * Some drivers like serial.c use request_irq() heavily,
-- * so we have to be careful not to interfere with a
-- * running system.
-- */
-- if (new->flags & IRQF_SAMPLE_RANDOM) {
-- /*
-- * This function might sleep, we want to call it first,
-- * outside of the atomic block.
-- * Yes, this might clear the entropy pool if the wrong
-- * driver is attempted to be loaded, without actually
-- * installing a new handler, but is this really a problem,
-- * only the sysadmin is able to do this.
-- */
-- rand_initialize_irq(irq);
-- }
-
- /*
- * Check whether the interrupt nests into another interrupt
-@@ -1361,7 +1345,6 @@ EXPORT_SYMBOL(free_irq);
- * Flags:
- *
- * IRQF_SHARED Interrupt is shared
-- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
- * IRQF_TRIGGER_* Specify active edge(s) or level
- *
- */
-diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
-index 6d426eb..01b2d87 100644
---- a/kernel/irq/spurious.c
-+++ b/kernel/irq/spurious.c
-@@ -331,7 +331,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
- * count. We just care about the count being
- * different than the one we saw before.
- */
-- handled = atomic_read(&desc->threads_handled);
-+ handled = atomic_read_unchecked(&desc->threads_handled);
- handled |= SPURIOUS_DEFERRED;
- if (handled != desc->threads_handled_last) {
- action_ret = IRQ_HANDLED;
-diff --git a/kernel/jump_label.c b/kernel/jump_label.c
-index 66ff710..794bc5a 100644
---- a/kernel/jump_label.c
-+++ b/kernel/jump_label.c
-@@ -13,6 +13,7 @@
- #include <linux/sort.h>
- #include <linux/err.h>
- #include <linux/jump_label.h>
-+#include <linux/mm.h>
-
- #ifdef HAVE_JUMP_LABEL
-
-@@ -55,7 +56,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
-
- size = (((unsigned long)stop - (unsigned long)start)
- / sizeof(struct jump_entry));
-+ pax_open_kernel();
- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
-+ pax_close_kernel();
- }
-
- static void jump_label_update(struct jump_label_key *key, int enable);
-@@ -303,10 +306,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
- struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
- struct jump_entry *iter;
-
-+ pax_open_kernel();
- for (iter = iter_start; iter < iter_stop; iter++) {
- if (within_module_init(iter->code, mod))
- iter->code = 0;
- }
-+ pax_close_kernel();
- }
-
- static int
-diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
-index 079f1d3..d712c9c 100644
---- a/kernel/kallsyms.c
-+++ b/kernel/kallsyms.c
-@@ -11,6 +11,9 @@
- * Changed the compression method from stem compression to "table lookup"
- * compression (see scripts/kallsyms.c for a more complete description)
- */
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+#define __INCLUDED_BY_HIDESYM 1
-+#endif
- #include <linux/kallsyms.h>
- #include <linux/module.h>
- #include <linux/init.h>
-@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
-
- static inline int is_kernel_inittext(unsigned long addr)
- {
-+ if (system_state != SYSTEM_BOOTING)
-+ return 0;
-+
- if (addr >= (unsigned long)_sinittext
- && addr <= (unsigned long)_einittext)
- return 1;
- return 0;
- }
-
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+#ifdef CONFIG_MODULES
-+static inline int is_module_text(unsigned long addr)
-+{
-+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
-+ return 1;
-+
-+ addr = ktla_ktva(addr);
-+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
-+}
-+#else
-+static inline int is_module_text(unsigned long addr)
-+{
-+ return 0;
-+}
-+#endif
-+#endif
-+
- static inline int is_kernel_text(unsigned long addr)
- {
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
-@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
-
- static inline int is_kernel(unsigned long addr)
- {
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
-+ return 1;
-+
-+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
-+#else
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
-+#endif
-+
- return 1;
- return in_gate_area_no_mm(addr);
- }
-
- static int is_ksym_addr(unsigned long addr)
- {
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (is_module_text(addr))
-+ return 0;
-+#endif
-+
- if (all_var)
- return is_kernel(addr);
-
-@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
-
- static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
- {
-- iter->name[0] = '\0';
- iter->nameoff = get_symbol_offset(new_pos);
- iter->pos = new_pos;
- }
-@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
- {
- struct kallsym_iter *iter = m->private;
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ if (current_uid())
-+ return 0;
-+#endif
-+
- /* Some debugging symbols have no name. Ignore them. */
- if (!iter->name[0])
- return 0;
-@@ -515,6 +558,7 @@ static int s_show(struct seq_file *m, void *p)
- */
- type = iter->exported ? toupper(iter->type) :
- tolower(iter->type);
-+
- seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
- type, iter->name, iter->module_name);
- } else
-@@ -540,7 +584,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
- struct kallsym_iter *iter;
- int ret;
-
-- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
-+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return -ENOMEM;
- reset_iter(iter, 0);
-diff --git a/kernel/kexec.c b/kernel/kexec.c
-index dc7bc08..4601964 100644
---- a/kernel/kexec.c
-+++ b/kernel/kexec.c
-@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
- unsigned long flags)
- {
- struct compat_kexec_segment in;
-- struct kexec_segment out, __user *ksegments;
-+ struct kexec_segment out;
-+ struct kexec_segment __user *ksegments;
- unsigned long i, result;
-
- /* Don't allow clients that don't understand the native
-diff --git a/kernel/kmod.c b/kernel/kmod.c
-index a16dac1..e744189 100644
---- a/kernel/kmod.c
-+++ b/kernel/kmod.c
-@@ -64,7 +64,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
- kfree(info->argv);
- }
-
--static int call_modprobe(char *module_name, int wait)
-+static int call_modprobe(char *module_name, char *module_param, int wait)
- {
- static char *envp[] = {
- "HOME=/",
-@@ -73,7 +73,7 @@ static int call_modprobe(char *module_name, int wait)
- NULL
- };
-
-- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
-+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
- if (!argv)
- goto out;
-
-@@ -85,7 +85,8 @@ static int call_modprobe(char *module_name, int wait)
- argv[1] = "-q";
- argv[2] = "--";
- argv[3] = module_name; /* check free_modprobe_argv() */
-- argv[4] = NULL;
-+ argv[4] = module_param;
-+ argv[5] = NULL;
-
- return call_usermodehelper_fns(modprobe_path, argv, envp,
- wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
-@@ -110,9 +111,8 @@ out:
- * If module auto-loading support is disabled then this function
- * becomes a no-operation.
- */
--int __request_module(bool wait, const char *fmt, ...)
-+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
- {
-- va_list args;
- char module_name[MODULE_NAME_LEN];
- unsigned int max_modprobes;
- int ret;
-@@ -120,9 +120,7 @@ int __request_module(bool wait, const char *fmt, ...)
- #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
- static int kmod_loop_msg;
-
-- va_start(args, fmt);
-- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
-- va_end(args);
-+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
- if (ret >= MODULE_NAME_LEN)
- return -ENAMETOOLONG;
-
-@@ -130,6 +128,20 @@ int __request_module(bool wait, const char *fmt, ...)
- if (ret)
- return ret;
-
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ if (!current_uid()) {
-+ /* hack to workaround consolekit/udisks stupidity */
-+ read_lock(&tasklist_lock);
-+ if (!strcmp(current->comm, "mount") &&
-+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
-+ read_unlock(&tasklist_lock);
-+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
-+ return -EPERM;
-+ }
-+ read_unlock(&tasklist_lock);
-+ }
-+#endif
-+
- /* If modprobe needs a service that is in a module, we get a recursive
- * loop. Limit the number of running kmod threads to max_threads/2 or
- * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
-@@ -158,11 +170,52 @@ int __request_module(bool wait, const char *fmt, ...)
-
- trace_module_request(module_name, wait, _RET_IP_);
-
-- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
-+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
-
- atomic_dec(&kmod_concurrent);
- return ret;
- }
-+
-+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
-+{
-+ va_list args;
-+ int ret;
-+
-+ va_start(args, fmt);
-+ ret = ____request_module(wait, module_param, fmt, args);
-+ va_end(args);
-+
-+ return ret;
-+}
-+
-+int __request_module(bool wait, const char *fmt, ...)
-+{
-+ va_list args;
-+ int ret;
-+
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ if (current_uid()) {
-+ char module_param[MODULE_NAME_LEN];
-+
-+ memset(module_param, 0, sizeof(module_param));
-+
-+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
-+
-+ va_start(args, fmt);
-+ ret = ____request_module(wait, module_param, fmt, args);
-+ va_end(args);
-+
-+ return ret;
-+ }
-+#endif
-+
-+ va_start(args, fmt);
-+ ret = ____request_module(wait, NULL, fmt, args);
-+ va_end(args);
-+
-+ return ret;
-+}
-+
- EXPORT_SYMBOL(__request_module);
- #endif /* CONFIG_MODULES */
-
-@@ -188,6 +241,21 @@ static int ____call_usermodehelper(void *data)
- */
- set_user_nice(current, 0);
-
-+#ifdef CONFIG_GRKERNSEC
-+ /* this is race-free as far as userland is concerned as we copied
-+ out the path to be used prior to this point and are now operating
-+ on that copy
-+ */
-+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
-+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
-+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
-+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
-+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
-+ retval = -EPERM;
-+ goto fail;
-+ }
-+#endif
-+
- retval = -ENOMEM;
- new = prepare_kernel_cred(current);
- if (!new)
-@@ -221,6 +289,10 @@ fail:
-
- void call_usermodehelper_freeinfo(struct subprocess_info *info)
- {
-+#ifdef CONFIG_GRKERNSEC
-+ kfree(info->path);
-+ info->path = info->origpath;
-+#endif
- if (info->cleanup)
- (*info->cleanup)(info);
- kfree(info);
-@@ -265,7 +337,7 @@ static int wait_for_helper(void *data)
- *
- * Thus the __user pointer cast is valid here.
- */
-- sys_wait4(pid, (int __user *)&ret, 0, NULL);
-+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
-
- /*
- * If ret is 0, either ____call_usermodehelper failed and the
-@@ -413,7 +485,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
- goto out;
-
- INIT_WORK(&sub_info->work, __call_usermodehelper);
-+#ifdef CONFIG_GRKERNSEC
-+ sub_info->origpath = path;
-+ sub_info->path = kstrdup(path, gfp_mask);
-+#else
- sub_info->path = path;
-+#endif
- sub_info->argv = argv;
- sub_info->envp = envp;
- out:
-@@ -512,7 +589,7 @@ EXPORT_SYMBOL(call_usermodehelper_exec);
- static int proc_cap_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table t;
-+ ctl_table_no_const t;
- unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
- kernel_cap_t new_cap;
- int err, i;
-diff --git a/kernel/kprobes.c b/kernel/kprobes.c
-index bc90b87..32da385 100644
---- a/kernel/kprobes.c
-+++ b/kernel/kprobes.c
-@@ -31,6 +31,9 @@
- * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
- * <prasanna@in.ibm.com> added function-return probes.
- */
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+#define __INCLUDED_BY_HIDESYM 1
-+#endif
- #include <linux/kprobes.h>
- #include <linux/hash.h>
- #include <linux/init.h>
-@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
- * kernel image and loaded module images reside. This is required
- * so x86_64 can correctly handle the %rip-relative fixups.
- */
-- kip->insns = module_alloc(PAGE_SIZE);
-+ kip->insns = module_alloc_exec(PAGE_SIZE);
- if (!kip->insns) {
- kfree(kip);
- return NULL;
-@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
- */
- if (!list_is_singular(&kip->list)) {
- list_del(&kip->list);
-- module_free(NULL, kip->insns);
-+ module_free_exec(NULL, kip->insns);
- kfree(kip);
- }
- return 1;
-@@ -1955,7 +1958,7 @@ static int __init init_kprobes(void)
- {
- int i, err = 0;
- unsigned long offset = 0, size = 0;
-- char *modname, namebuf[128];
-+ char *modname, namebuf[KSYM_NAME_LEN];
- const char *symbol_name;
- void *addr;
- struct kprobe_blackpoint *kb;
-@@ -2040,11 +2043,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
- kprobe_type = "k";
-
- if (sym)
-- seq_printf(pi, "%p %s %s+0x%x %s ",
-+ seq_printf(pi, "%pK %s %s+0x%x %s ",
- p->addr, kprobe_type, sym, offset,
- (modname ? modname : " "));
- else
-- seq_printf(pi, "%p %s %p ",
-+ seq_printf(pi, "%pK %s %pK ",
- p->addr, kprobe_type, p->addr);
-
- if (!pp)
-@@ -2081,7 +2084,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
- const char *sym = NULL;
- unsigned int i = *(loff_t *) v;
- unsigned long offset = 0;
-- char *modname, namebuf[128];
-+ char *modname, namebuf[KSYM_NAME_LEN];
-
- head = &kprobe_table[i];
- preempt_disable();
-@@ -2204,7 +2207,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
- {
- char buf[32];
-- int buf_size;
-+ size_t buf_size;
-
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
-diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
-index 4e316e1..a2879b6 100644
---- a/kernel/ksysfs.c
-+++ b/kernel/ksysfs.c
-@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
- {
- if (count+1 > UEVENT_HELPER_PATH_LEN)
- return -ENOENT;
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
- memcpy(uevent_helper, buf, count);
- uevent_helper[count] = '\0';
- if (count && uevent_helper[count-1] == '\n')
-@@ -156,7 +158,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
- return count;
- }
-
--static struct bin_attribute notes_attr = {
-+static bin_attribute_no_const notes_attr __read_only = {
- .attr = {
- .name = "notes",
- .mode = S_IRUGO,
-diff --git a/kernel/lockdep.c b/kernel/lockdep.c
-index b2e08c9..01d8049 100644
---- a/kernel/lockdep.c
-+++ b/kernel/lockdep.c
-@@ -592,6 +592,10 @@ static int static_obj(void *obj)
- end = (unsigned long) &_end,
- addr = (unsigned long) obj;
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ start = ktla_ktva(start);
-+#endif
-+
- /*
- * static variable?
- */
-@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
- if (!static_obj(lock->key)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
-+ printk("lock:%pS key:%pS.\n", lock, lock->key);
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
-@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- if (!class)
- return 0;
- }
-- atomic_inc((atomic_t *)&class->ops);
-+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
- if (very_verbose(class)) {
- printk("\nacquire class [%p] %s", class->key, class->name);
- if (class->name_version > 1)
-diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
-index 91c32a0..7b88d63 100644
---- a/kernel/lockdep_proc.c
-+++ b/kernel/lockdep_proc.c
-@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
-
- static void print_name(struct seq_file *m, struct lock_class *class)
- {
-- char str[128];
-+ char str[KSYM_NAME_LEN];
- const char *name = class->name;
-
- if (!name) {
-@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
- return 0;
- }
-
-- seq_printf(m, "%p", class->key);
-+ seq_printf(m, "%pK", class->key);
- #ifdef CONFIG_DEBUG_LOCKDEP
- seq_printf(m, " OPS:%8ld", class->ops);
- #endif
-@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
-
- list_for_each_entry(entry, &class->locks_after, entry) {
- if (entry->distance == 1) {
-- seq_printf(m, " -> [%p] ", entry->class->key);
-+ seq_printf(m, " -> [%pK] ", entry->class->key);
- print_name(m, entry->class);
- seq_puts(m, "\n");
- }
-@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
- if (!class->key)
- continue;
-
-- seq_printf(m, "[%p] ", class->key);
-+ seq_printf(m, "[%pK] ", class->key);
- print_name(m, class);
- seq_puts(m, "\n");
- }
-@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
- if (!i)
- seq_line(m, '-', 40-namelen, namelen);
-
-- snprintf(ip, sizeof(ip), "[<%p>]",
-+ snprintf(ip, sizeof(ip), "[<%pK>]",
- (void *)class->contention_point[i]);
- seq_printf(m, "%40s %14lu %29s %pS\n",
- name, stats->contention_point[i],
-@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
- if (!i)
- seq_line(m, '-', 40-namelen, namelen);
-
-- snprintf(ip, sizeof(ip), "[<%p>]",
-+ snprintf(ip, sizeof(ip), "[<%pK>]",
- (void *)class->contending_point[i]);
- seq_printf(m, "%40s %14lu %29s %pS\n",
- name, stats->contending_point[i],
-diff --git a/kernel/module.c b/kernel/module.c
-index 95ecd9f..db549a6 100644
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -58,6 +58,7 @@
- #include <linux/jump_label.h>
- #include <linux/pfn.h>
- #include <linux/bsearch.h>
-+#include <linux/grsecurity.h>
-
- #define CREATE_TRACE_POINTS
- #include <trace/events/module.h>
-@@ -110,7 +111,7 @@ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
-
-
- /* Block module loading/unloading? */
--int modules_disabled = 0;
-+int modules_disabled __read_only = 0;
-
- /* Waiting for a module to finish initializing? */
- static DECLARE_WAIT_QUEUE_HEAD(module_wq);
-@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
-
- /* Bounds of module allocation, for speeding __module_address.
- * Protected by module_mutex. */
--static unsigned long module_addr_min = -1UL, module_addr_max = 0;
-+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
-+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
-
- int register_module_notifier(struct notifier_block * nb)
- {
-@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- return true;
-
- list_for_each_entry_rcu(mod, &modules, list) {
-- struct symsearch arr[] = {
-+ struct symsearch modarr[] = {
- { mod->syms, mod->syms + mod->num_syms, mod->crcs,
- NOT_GPL_ONLY, false },
- { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
-@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- #endif
- };
-
-- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
-+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
- return true;
- }
- return false;
-@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
- static int percpu_modalloc(struct module *mod,
- unsigned long size, unsigned long align)
- {
-- if (align > PAGE_SIZE) {
-+ if (align-1 >= PAGE_SIZE) {
- printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
- mod->name, align, PAGE_SIZE);
- align = PAGE_SIZE;
-@@ -1062,13 +1064,29 @@ static int check_version(Elf_Shdr *sechdrs,
- goto bad_version;
- }
-
-- printk(KERN_WARNING "%s: no symbol version for %s\n",
-- mod->name, symname);
-+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
-+ /*
-+ * avoid potentially printing jibberish on attempted load
-+ * of a module randomized with a different seed
-+ */
-+ pr_warn("no symbol version for %s\n", symname);
-+#else
-+ pr_warn("%s: no symbol version for %s\n", mod->name, symname);
-+#endif
- return 0;
-
- bad_version:
-+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
-+ /*
-+ * avoid potentially printing jibberish on attempted load
-+ * of a module randomized with a different seed
-+ */
-+ printk("attempted module disagrees about version of symbol %s\n",
-+ symname);
-+#else
- printk("%s: disagrees about version of symbol %s\n",
- mod->name, symname);
-+#endif
- return 0;
- }
-
-@@ -1183,7 +1201,7 @@ resolve_symbol_wait(struct module *mod,
- */
- #ifdef CONFIG_SYSFS
-
--#ifdef CONFIG_KALLSYMS
-+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
- static inline bool sect_empty(const Elf_Shdr *sect)
- {
- return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
-@@ -1323,7 +1341,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
- {
- unsigned int notes, loaded, i;
- struct module_notes_attrs *notes_attrs;
-- struct bin_attribute *nattr;
-+ bin_attribute_no_const *nattr;
-
- /* failed to create section attributes, so can't create notes */
- if (!mod->sect_attrs)
-@@ -1435,7 +1453,7 @@ static void del_usage_links(struct module *mod)
- static int module_add_modinfo_attrs(struct module *mod)
- {
- struct module_attribute *attr;
-- struct module_attribute *temp_attr;
-+ module_attribute_no_const *temp_attr;
- int error = 0;
- int i;
-
-@@ -1649,21 +1667,21 @@ static void set_section_ro_nx(void *base,
-
- static void unset_module_core_ro_nx(struct module *mod)
- {
-- set_page_attributes(mod->module_core + mod->core_text_size,
-- mod->module_core + mod->core_size,
-+ set_page_attributes(mod->module_core_rw,
-+ mod->module_core_rw + mod->core_size_rw,
- set_memory_x);
-- set_page_attributes(mod->module_core,
-- mod->module_core + mod->core_ro_size,
-+ set_page_attributes(mod->module_core_rx,
-+ mod->module_core_rx + mod->core_size_rx,
- set_memory_rw);
- }
-
- static void unset_module_init_ro_nx(struct module *mod)
- {
-- set_page_attributes(mod->module_init + mod->init_text_size,
-- mod->module_init + mod->init_size,
-+ set_page_attributes(mod->module_init_rw,
-+ mod->module_init_rw + mod->init_size_rw,
- set_memory_x);
-- set_page_attributes(mod->module_init,
-- mod->module_init + mod->init_ro_size,
-+ set_page_attributes(mod->module_init_rx,
-+ mod->module_init_rx + mod->init_size_rx,
- set_memory_rw);
- }
-
-@@ -1674,14 +1692,14 @@ void set_all_modules_text_rw(void)
-
- mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
-- if ((mod->module_core) && (mod->core_text_size)) {
-- set_page_attributes(mod->module_core,
-- mod->module_core + mod->core_text_size,
-+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
-+ set_page_attributes(mod->module_core_rx,
-+ mod->module_core_rx + mod->core_size_rx,
- set_memory_rw);
- }
-- if ((mod->module_init) && (mod->init_text_size)) {
-- set_page_attributes(mod->module_init,
-- mod->module_init + mod->init_text_size,
-+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
-+ set_page_attributes(mod->module_init_rx,
-+ mod->module_init_rx + mod->init_size_rx,
- set_memory_rw);
- }
- }
-@@ -1695,14 +1713,14 @@ void set_all_modules_text_ro(void)
-
- mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
-- if ((mod->module_core) && (mod->core_text_size)) {
-- set_page_attributes(mod->module_core,
-- mod->module_core + mod->core_text_size,
-+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
-+ set_page_attributes(mod->module_core_rx,
-+ mod->module_core_rx + mod->core_size_rx,
- set_memory_ro);
- }
-- if ((mod->module_init) && (mod->init_text_size)) {
-- set_page_attributes(mod->module_init,
-- mod->module_init + mod->init_text_size,
-+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
-+ set_page_attributes(mod->module_init_rx,
-+ mod->module_init_rx + mod->init_size_rx,
- set_memory_ro);
- }
- }
-@@ -1748,16 +1766,19 @@ static void free_module(struct module *mod)
-
- /* This may be NULL, but that's OK */
- unset_module_init_ro_nx(mod);
-- module_free(mod, mod->module_init);
-+ module_free(mod, mod->module_init_rw);
-+ module_free_exec(mod, mod->module_init_rx);
- kfree(mod->args);
- percpu_modfree(mod);
-
- /* Free lock-classes: */
-- lockdep_free_key_range(mod->module_core, mod->core_size);
-+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
-+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
-
- /* Finally, free the core (containing the module structure) */
- unset_module_core_ro_nx(mod);
-- module_free(mod, mod->module_core);
-+ module_free_exec(mod, mod->module_core_rx);
-+ module_free(mod, mod->module_core_rw);
-
- #ifdef CONFIG_MPU
- update_protections(current->mm);
-@@ -1826,10 +1847,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
- unsigned int i;
- int ret = 0;
- const struct kernel_symbol *ksym;
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ int is_fs_load = 0;
-+ int register_filesystem_found = 0;
-+ char *p;
-+
-+ p = strstr(mod->args, "grsec_modharden_fs");
-+ if (p) {
-+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
-+ /* copy \0 as well */
-+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
-+ is_fs_load = 1;
-+ }
-+#endif
-
- for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
- const char *name = info->strtab + sym[i].st_name;
-
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ /* it's a real shame this will never get ripped and copied
-+ upstream! ;(
-+ */
-+ if (is_fs_load && !strcmp(name, "register_filesystem"))
-+ register_filesystem_found = 1;
-+#endif
-+
- switch (sym[i].st_shndx) {
- case SHN_COMMON:
- /* We compiled with -fno-common. These are not
-@@ -1850,7 +1892,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
- ksym = resolve_symbol_wait(mod, info, name);
- /* Ok if resolved. */
- if (ksym && !IS_ERR(ksym)) {
-+ pax_open_kernel();
- sym[i].st_value = ksym->value;
-+ pax_close_kernel();
- break;
- }
-
-@@ -1869,11 +1913,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
- secbase = (unsigned long)mod_percpu(mod);
- else
- secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
-+ pax_open_kernel();
- sym[i].st_value += secbase;
-+ pax_close_kernel();
- break;
- }
- }
-
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ if (is_fs_load && !register_filesystem_found) {
-+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
-+ ret = -EPERM;
-+ }
-+#endif
-+
- return ret;
- }
-
-@@ -1977,22 +2030,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
- || s->sh_entsize != ~0UL
- || strstarts(sname, ".init"))
- continue;
-- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
-+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
-+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
-+ else
-+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
- DEBUGP("\t%s\n", name);
- }
-- switch (m) {
-- case 0: /* executable */
-- mod->core_size = debug_align(mod->core_size);
-- mod->core_text_size = mod->core_size;
-- break;
-- case 1: /* RO: text and ro-data */
-- mod->core_size = debug_align(mod->core_size);
-- mod->core_ro_size = mod->core_size;
-- break;
-- case 3: /* whole core */
-- mod->core_size = debug_align(mod->core_size);
-- break;
-- }
- }
-
- DEBUGP("Init section allocation order:\n");
-@@ -2006,23 +2049,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
- || s->sh_entsize != ~0UL
- || !strstarts(sname, ".init"))
- continue;
-- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
-- | INIT_OFFSET_MASK);
-+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
-+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
-+ else
-+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
-+ s->sh_entsize |= INIT_OFFSET_MASK;
- DEBUGP("\t%s\n", sname);
- }
-- switch (m) {
-- case 0: /* executable */
-- mod->init_size = debug_align(mod->init_size);
-- mod->init_text_size = mod->init_size;
-- break;
-- case 1: /* RO: text and ro-data */
-- mod->init_size = debug_align(mod->init_size);
-- mod->init_ro_size = mod->init_size;
-- break;
-- case 3: /* whole init */
-- mod->init_size = debug_align(mod->init_size);
-- break;
-- }
- }
- }
-
-@@ -2187,7 +2220,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
-
- /* Put symbol section at end of init part of module. */
- symsect->sh_flags |= SHF_ALLOC;
-- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
-+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
- info->index.sym) | INIT_OFFSET_MASK;
- DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
-
-@@ -2206,19 +2239,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
- }
-
- /* Append room for core symbols at end of core part. */
-- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
-- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
-+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
-+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
-
- /* Put string table section at end of init part of module. */
- strsect->sh_flags |= SHF_ALLOC;
-- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
-+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
- info->index.str) | INIT_OFFSET_MASK;
- DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
-
- /* Append room for core symbols' strings at end of core part. */
-- info->stroffs = mod->core_size;
-+ info->stroffs = mod->core_size_rx;
- __set_bit(0, info->strmap);
-- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
-+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
- }
-
- static void add_kallsyms(struct module *mod, const struct load_info *info)
-@@ -2234,11 +2267,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
- /* Make sure we get permanent strtab: don't use info->strtab. */
- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
-
-+ pax_open_kernel();
-+
- /* Set types up while we still have access to sections. */
- for (i = 0; i < mod->num_symtab; i++)
- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
-
-- mod->core_symtab = dst = mod->module_core + info->symoffs;
-+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
- src = mod->symtab;
- for (ndst = i = 0; i < mod->num_symtab; i++) {
- if (i == 0 ||
-@@ -2251,10 +2286,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
- }
- mod->core_num_syms = ndst;
-
-- mod->core_strtab = s = mod->module_core + info->stroffs;
-+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
- for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
- if (test_bit(i, info->strmap))
- *++s = mod->strtab[i];
-+
-+ pax_close_kernel();
- }
- #else
- static inline void layout_symtab(struct module *mod, struct load_info *info)
-@@ -2288,17 +2325,33 @@ void * __weak module_alloc(unsigned long size)
- return size == 0 ? NULL : vmalloc_exec(size);
- }
-
--static void *module_alloc_update_bounds(unsigned long size)
-+static void *module_alloc_update_bounds_rw(unsigned long size)
- {
- void *ret = module_alloc(size);
-
- if (ret) {
- mutex_lock(&module_mutex);
- /* Update module bounds. */
-- if ((unsigned long)ret < module_addr_min)
-- module_addr_min = (unsigned long)ret;
-- if ((unsigned long)ret + size > module_addr_max)
-- module_addr_max = (unsigned long)ret + size;
-+ if ((unsigned long)ret < module_addr_min_rw)
-+ module_addr_min_rw = (unsigned long)ret;
-+ if ((unsigned long)ret + size > module_addr_max_rw)
-+ module_addr_max_rw = (unsigned long)ret + size;
-+ mutex_unlock(&module_mutex);
-+ }
-+ return ret;
-+}
-+
-+static void *module_alloc_update_bounds_rx(unsigned long size)
-+{
-+ void *ret = module_alloc_exec(size);
-+
-+ if (ret) {
-+ mutex_lock(&module_mutex);
-+ /* Update module bounds. */
-+ if ((unsigned long)ret < module_addr_min_rx)
-+ module_addr_min_rx = (unsigned long)ret;
-+ if ((unsigned long)ret + size > module_addr_max_rx)
-+ module_addr_max_rx = (unsigned long)ret + size;
- mutex_unlock(&module_mutex);
- }
- return ret;
-@@ -2458,8 +2511,15 @@ static struct module *setup_load_info(struct load_info *info)
- mod = (void *)info->sechdrs[info->index.mod].sh_addr;
-
- if (info->index.sym == 0) {
-- printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
-- mod->name);
-+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
-+ /*
-+ * avoid potentially printing jibberish on attempted load
-+ * of a module randomized with a different seed
-+ */
-+ pr_warn("module has no symbols (stripped?)\n");
-+#else
-+ pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
-+#endif
- return ERR_PTR(-ENOEXEC);
- }
-
-@@ -2475,8 +2535,14 @@ static struct module *setup_load_info(struct load_info *info)
- static int check_modinfo(struct module *mod, struct load_info *info)
- {
- const char *modmagic = get_modinfo(info, "vermagic");
-+ const char *license = get_modinfo(info, "license");
- int err;
-
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ if (!license || !license_is_gpl_compatible(license))
-+ return -ENOEXEC;
-+#endif
-+
- /* This is allowed: modprobe --force will invalidate it. */
- if (!modmagic) {
- err = try_to_force_load(mod, "bad vermagic");
-@@ -2499,7 +2565,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
- }
-
- /* Set up license info based on the info section */
-- set_license(mod, get_modinfo(info, "license"));
-+ set_license(mod, license);
-
- return 0;
- }
-@@ -2593,7 +2659,7 @@ static int move_module(struct module *mod, struct load_info *info)
- void *ptr;
-
- /* Do the allocs. */
-- ptr = module_alloc_update_bounds(mod->core_size);
-+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
- /*
- * The pointer to this block is stored in the module structure
- * which is inside the block. Just mark it as not being a
-@@ -2603,10 +2669,10 @@ static int move_module(struct module *mod, struct load_info *info)
- if (!ptr)
- return -ENOMEM;
-
-- memset(ptr, 0, mod->core_size);
-- mod->module_core = ptr;
-+ memset(ptr, 0, mod->core_size_rw);
-+ mod->module_core_rw = ptr;
-
-- ptr = module_alloc_update_bounds(mod->init_size);
-+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
- /*
- * The pointer to this block is stored in the module structure
- * which is inside the block. This block doesn't need to be
-@@ -2614,12 +2680,39 @@ static int move_module(struct module *mod, struct load_info *info)
- * after the module is initialized.
- */
- kmemleak_ignore(ptr);
-- if (!ptr && mod->init_size) {
-- module_free(mod, mod->module_core);
-+ if (!ptr && mod->init_size_rw) {
-+ module_free(mod, mod->module_core_rw);
- return -ENOMEM;
- }
-- memset(ptr, 0, mod->init_size);
-- mod->module_init = ptr;
-+ memset(ptr, 0, mod->init_size_rw);
-+ mod->module_init_rw = ptr;
-+
-+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
-+ kmemleak_not_leak(ptr);
-+ if (!ptr) {
-+ module_free(mod, mod->module_init_rw);
-+ module_free(mod, mod->module_core_rw);
-+ return -ENOMEM;
-+ }
-+
-+ pax_open_kernel();
-+ memset(ptr, 0, mod->core_size_rx);
-+ pax_close_kernel();
-+ mod->module_core_rx = ptr;
-+
-+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
-+ kmemleak_ignore(ptr);
-+ if (!ptr && mod->init_size_rx) {
-+ module_free_exec(mod, mod->module_core_rx);
-+ module_free(mod, mod->module_init_rw);
-+ module_free(mod, mod->module_core_rw);
-+ return -ENOMEM;
-+ }
-+
-+ pax_open_kernel();
-+ memset(ptr, 0, mod->init_size_rx);
-+ pax_close_kernel();
-+ mod->module_init_rx = ptr;
-
- /* Transfer each section which specifies SHF_ALLOC */
- DEBUGP("final section addresses:\n");
-@@ -2630,16 +2723,45 @@ static int move_module(struct module *mod, struct load_info *info)
- if (!(shdr->sh_flags & SHF_ALLOC))
- continue;
-
-- if (shdr->sh_entsize & INIT_OFFSET_MASK)
-- dest = mod->module_init
-- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
-- else
-- dest = mod->module_core + shdr->sh_entsize;
-+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
-+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
-+ dest = mod->module_init_rw
-+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
-+ else
-+ dest = mod->module_init_rx
-+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
-+ } else {
-+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
-+ dest = mod->module_core_rw + shdr->sh_entsize;
-+ else
-+ dest = mod->module_core_rx + shdr->sh_entsize;
-+ }
-+
-+ if (shdr->sh_type != SHT_NOBITS) {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_64
-+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
-+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
-+#endif
-+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
-+ pax_open_kernel();
-+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
-+ pax_close_kernel();
-+ } else
-+#endif
-
-- if (shdr->sh_type != SHT_NOBITS)
- memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
-+ }
- /* Update sh_addr to point to copy in image. */
-- shdr->sh_addr = (unsigned long)dest;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ if (shdr->sh_flags & SHF_EXECINSTR)
-+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
-+ else
-+#endif
-+
-+ shdr->sh_addr = (unsigned long)dest;
- DEBUGP("\t0x%lx %s\n",
- shdr->sh_addr, info->secstrings + shdr->sh_name);
- }
-@@ -2694,12 +2816,12 @@ static void flush_module_icache(const struct module *mod)
- * Do it before processing of module parameters, so the module
- * can provide parameter accessor functions of its own.
- */
-- if (mod->module_init)
-- flush_icache_range((unsigned long)mod->module_init,
-- (unsigned long)mod->module_init
-- + mod->init_size);
-- flush_icache_range((unsigned long)mod->module_core,
-- (unsigned long)mod->module_core + mod->core_size);
-+ if (mod->module_init_rx)
-+ flush_icache_range((unsigned long)mod->module_init_rx,
-+ (unsigned long)mod->module_init_rx
-+ + mod->init_size_rx);
-+ flush_icache_range((unsigned long)mod->module_core_rx,
-+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
-
- set_fs(old_fs);
- }
-@@ -2779,8 +2901,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
- {
- kfree(info->strmap);
- percpu_modfree(mod);
-- module_free(mod, mod->module_init);
-- module_free(mod, mod->module_core);
-+ module_free_exec(mod, mod->module_init_rx);
-+ module_free_exec(mod, mod->module_core_rx);
-+ module_free(mod, mod->module_init_rw);
-+ module_free(mod, mod->module_core_rw);
- }
-
- int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -2844,9 +2968,38 @@ static struct module *load_module(void __user *umod,
- if (err)
- goto free_unload;
-
-+ /* Now copy in args */
-+ mod->args = strndup_user(uargs, ~0UL >> 1);
-+ if (IS_ERR(mod->args)) {
-+ err = PTR_ERR(mod->args);
-+ goto free_unload;
-+ }
-+
- /* Set up MODINFO_ATTR fields */
- setup_modinfo(mod, &info);
-
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ {
-+ char *p, *p2;
-+
-+ if (strstr(mod->args, "grsec_modharden_netdev")) {
-+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
-+ err = -EPERM;
-+ goto free_modinfo;
-+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
-+ p += sizeof("grsec_modharden_normal") - 1;
-+ p2 = strstr(p, "_");
-+ if (p2) {
-+ *p2 = '\0';
-+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
-+ *p2 = '_';
-+ }
-+ err = -EPERM;
-+ goto free_modinfo;
-+ }
-+ }
-+#endif
-+
- /* Fix up syms, so that st_value is a pointer to location. */
- err = simplify_symbols(mod, &info);
- if (err < 0)
-@@ -2862,13 +3015,6 @@ static struct module *load_module(void __user *umod,
-
- flush_module_icache(mod);
-
-- /* Now copy in args */
-- mod->args = strndup_user(uargs, ~0UL >> 1);
-- if (IS_ERR(mod->args)) {
-- err = PTR_ERR(mod->args);
-- goto free_arch_cleanup;
-- }
--
- /* Mark state as coming so strong_try_module_get() ignores us. */
- mod->state = MODULE_STATE_COMING;
-
-@@ -2929,11 +3075,10 @@ static struct module *load_module(void __user *umod,
- unlock:
- mutex_unlock(&module_mutex);
- synchronize_sched();
-- kfree(mod->args);
-- free_arch_cleanup:
- module_arch_cleanup(mod);
- free_modinfo:
- free_modinfo(mod);
-+ kfree(mod->args);
- free_unload:
- module_unload_free(mod);
- free_module:
-@@ -2974,16 +3119,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
- MODULE_STATE_COMING, mod);
-
- /* Set RO and NX regions for core */
-- set_section_ro_nx(mod->module_core,
-- mod->core_text_size,
-- mod->core_ro_size,
-- mod->core_size);
-+ set_section_ro_nx(mod->module_core_rx,
-+ mod->core_size_rx,
-+ mod->core_size_rx,
-+ mod->core_size_rx);
-
- /* Set RO and NX regions for init */
-- set_section_ro_nx(mod->module_init,
-- mod->init_text_size,
-- mod->init_ro_size,
-- mod->init_size);
-+ set_section_ro_nx(mod->module_init_rx,
-+ mod->init_size_rx,
-+ mod->init_size_rx,
-+ mod->init_size_rx);
-
- do_mod_ctors(mod);
- /* Start the module */
-@@ -3029,11 +3174,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
- mod->strtab = mod->core_strtab;
- #endif
- unset_module_init_ro_nx(mod);
-- module_free(mod, mod->module_init);
-- mod->module_init = NULL;
-- mod->init_size = 0;
-- mod->init_ro_size = 0;
-- mod->init_text_size = 0;
-+ module_free(mod, mod->module_init_rw);
-+ module_free_exec(mod, mod->module_init_rx);
-+ mod->module_init_rw = NULL;
-+ mod->module_init_rx = NULL;
-+ mod->init_size_rw = 0;
-+ mod->init_size_rx = 0;
- mutex_unlock(&module_mutex);
-
- return 0;
-@@ -3064,10 +3210,16 @@ static const char *get_ksymbol(struct module *mod,
- unsigned long nextval;
-
- /* At worse, next value is at end of module */
-- if (within_module_init(addr, mod))
-- nextval = (unsigned long)mod->module_init+mod->init_text_size;
-+ if (within_module_init_rx(addr, mod))
-+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
-+ else if (within_module_init_rw(addr, mod))
-+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
-+ else if (within_module_core_rx(addr, mod))
-+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
-+ else if (within_module_core_rw(addr, mod))
-+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
- else
-- nextval = (unsigned long)mod->module_core+mod->core_text_size;
-+ return NULL;
-
- /* Scan for closest preceding symbol, and next symbol. (ELF
- starts real symbols at 1). */
-@@ -3315,7 +3467,7 @@ static int m_show(struct seq_file *m, void *p)
- char buf[8];
-
- seq_printf(m, "%s %u",
-- mod->name, mod->init_size + mod->core_size);
-+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
- print_unload_info(m, mod);
-
- /* Informative for users. */
-@@ -3324,7 +3476,7 @@ static int m_show(struct seq_file *m, void *p)
- mod->state == MODULE_STATE_COMING ? "Loading":
- "Live");
- /* Used by oprofile and other similar tools. */
-- seq_printf(m, " 0x%pK", mod->module_core);
-+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
-
- /* Taints info */
- if (mod->taints)
-@@ -3360,7 +3512,17 @@ static const struct file_operations proc_modules_operations = {
-
- static int __init proc_modules_init(void)
- {
-+#ifndef CONFIG_GRKERNSEC_HIDESYM
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
-+#else
- proc_create("modules", 0, NULL, &proc_modules_operations);
-+#endif
-+#else
-+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
-+#endif
- return 0;
- }
- module_init(proc_modules_init);
-@@ -3419,12 +3581,12 @@ struct module *__module_address(unsigned long addr)
- {
- struct module *mod;
-
-- if (addr < module_addr_min || addr > module_addr_max)
-+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
-+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
- return NULL;
-
- list_for_each_entry_rcu(mod, &modules, list)
-- if (within_module_core(addr, mod)
-- || within_module_init(addr, mod))
-+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
- return mod;
- return NULL;
- }
-@@ -3458,11 +3620,20 @@ bool is_module_text_address(unsigned long addr)
- */
- struct module *__module_text_address(unsigned long addr)
- {
-- struct module *mod = __module_address(addr);
-+ struct module *mod;
-+
-+#ifdef CONFIG_X86_32
-+ addr = ktla_ktva(addr);
-+#endif
-+
-+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
-+ return NULL;
-+
-+ mod = __module_address(addr);
-+
- if (mod) {
- /* Make sure it's within the text section. */
-- if (!within(addr, mod->module_init, mod->init_text_size)
-- && !within(addr, mod->module_core, mod->core_text_size))
-+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
- mod = NULL;
- }
- return mod;
-diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
-index 7e3443f..b2a1e6b 100644
---- a/kernel/mutex-debug.c
-+++ b/kernel/mutex-debug.c
-@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
- }
-
- void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-- struct thread_info *ti)
-+ struct task_struct *task)
- {
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
-
- /* Mark the current thread as blocked on the lock: */
-- ti->task->blocked_on = waiter;
-+ task->blocked_on = waiter;
- }
-
- void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-- struct thread_info *ti)
-+ struct task_struct *task)
- {
- DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-- ti->task->blocked_on = NULL;
-+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
-+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
-+ task->blocked_on = NULL;
-
- list_del_init(&waiter->list);
- waiter->task = NULL;
-diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
-index 0799fd3..d06ae3b 100644
---- a/kernel/mutex-debug.h
-+++ b/kernel/mutex-debug.h
-@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
- extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
- extern void debug_mutex_add_waiter(struct mutex *lock,
- struct mutex_waiter *waiter,
-- struct thread_info *ti);
-+ struct task_struct *task);
- extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-- struct thread_info *ti);
-+ struct task_struct *task);
- extern void debug_mutex_unlock(struct mutex *lock);
- extern void debug_mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
-diff --git a/kernel/mutex.c b/kernel/mutex.c
-index 89096dd..f91ebc5 100644
---- a/kernel/mutex.c
-+++ b/kernel/mutex.c
-@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- spin_lock_mutex(&lock->wait_lock, flags);
-
- debug_mutex_lock_common(lock, &waiter);
-- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
-+ debug_mutex_add_waiter(lock, &waiter, task);
-
- /* add waiting tasks to the end of the waitqueue (FIFO): */
- list_add_tail(&waiter.list, &lock->wait_list);
-@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- * TASK_UNINTERRUPTIBLE case.)
- */
- if (unlikely(signal_pending_state(state, task))) {
-- mutex_remove_waiter(lock, &waiter,
-- task_thread_info(task));
-+ mutex_remove_waiter(lock, &waiter, task);
- mutex_release(&lock->dep_map, 1, ip);
- spin_unlock_mutex(&lock->wait_lock, flags);
-
-@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- done:
- lock_acquired(&lock->dep_map, ip);
- /* got the lock - rejoice! */
-- mutex_remove_waiter(lock, &waiter, current_thread_info());
-+ mutex_remove_waiter(lock, &waiter, task);
- mutex_set_owner(lock);
-
- /* set it to 0 if there are no waiters left: */
-diff --git a/kernel/notifier.c b/kernel/notifier.c
-index 2d5cc4c..d9ea600 100644
---- a/kernel/notifier.c
-+++ b/kernel/notifier.c
-@@ -5,6 +5,7 @@
- #include <linux/rcupdate.h>
- #include <linux/vmalloc.h>
- #include <linux/reboot.h>
-+#include <linux/mm.h>
-
- /*
- * Notifier list for kernel code which wants to be called
-@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
- while ((*nl) != NULL) {
- if (n->priority > (*nl)->priority)
- break;
-- nl = &((*nl)->next);
-+ nl = (struct notifier_block **)&((*nl)->next);
- }
-- n->next = *nl;
-+ pax_open_kernel();
-+ *(const void **)&n->next = *nl;
- rcu_assign_pointer(*nl, n);
-+ pax_close_kernel();
- return 0;
- }
-
-@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
- return 0;
- if (n->priority > (*nl)->priority)
- break;
-- nl = &((*nl)->next);
-+ nl = (struct notifier_block **)&((*nl)->next);
- }
-- n->next = *nl;
-+ pax_open_kernel();
-+ *(const void **)&n->next = *nl;
- rcu_assign_pointer(*nl, n);
-+ pax_close_kernel();
- return 0;
- }
-
-@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
- {
- while ((*nl) != NULL) {
- if ((*nl) == n) {
-+ pax_open_kernel();
- rcu_assign_pointer(*nl, n->next);
-+ pax_close_kernel();
- return 0;
- }
-- nl = &((*nl)->next);
-+ nl = (struct notifier_block **)&((*nl)->next);
- }
- return -ENOENT;
- }
-diff --git a/kernel/padata.c b/kernel/padata.c
-index b452599..5d68f4e 100644
---- a/kernel/padata.c
-+++ b/kernel/padata.c
-@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
- padata->pd = pd;
- padata->cb_cpu = cb_cpu;
-
-- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
-- atomic_set(&pd->seq_nr, -1);
-+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
-+ atomic_set_unchecked(&pd->seq_nr, -1);
-
-- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
-+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
-
- target_cpu = padata_cpu_hash(padata);
- queue = per_cpu_ptr(pd->pqueue, target_cpu);
-@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
- padata_init_pqueues(pd);
- padata_init_squeues(pd);
- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
-- atomic_set(&pd->seq_nr, -1);
-+ atomic_set_unchecked(&pd->seq_nr, -1);
- atomic_set(&pd->reorder_objects, 0);
- atomic_set(&pd->refcnt, 0);
- pd->pinst = pinst;
-diff --git a/kernel/panic.c b/kernel/panic.c
-index 3458469..3ed0694 100644
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -65,6 +65,14 @@ NORET_TYPE void panic(const char * fmt, ...)
- int state = 0;
-
- /*
-+ * Disable local interrupts. This will prevent panic_smp_self_stop
-+ * from deadlocking the first cpu that invokes the panic, since
-+ * there is nothing to prevent an interrupt handler (that runs
-+ * after the panic_lock is acquired) from invoking panic again.
-+ */
-+ local_irq_disable();
-+
-+ /*
- * It's possible to come here directly from a panic-assertion and
- * not have preempt disabled. Some functions called from here want
- * preempt to be disabled. No point enabling it later though...
-@@ -78,7 +86,11 @@ NORET_TYPE void panic(const char * fmt, ...)
- va_end(args);
- printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
- #ifdef CONFIG_DEBUG_BUGVERBOSE
-- dump_stack();
-+ /*
-+ * Avoid nested stack-dumping if a panic occurs during oops processing
-+ */
-+ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
-+ dump_stack();
- #endif
-
- /*
-@@ -382,7 +394,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
- const char *board;
-
- printk(KERN_WARNING "------------[ cut here ]------------\n");
-- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
-+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
- board = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (board)
- printk(KERN_WARNING "Hardware name: %s\n", board);
-@@ -437,7 +449,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
- */
- void __stack_chk_fail(void)
- {
-- panic("stack-protector: Kernel stack is corrupted in: %p\n",
-+ dump_stack();
-+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(__stack_chk_fail);
-diff --git a/kernel/pid.c b/kernel/pid.c
-index fa5f722..0c93e57 100644
---- a/kernel/pid.c
-+++ b/kernel/pid.c
-@@ -33,6 +33,7 @@
- #include <linux/rculist.h>
- #include <linux/bootmem.h>
- #include <linux/hash.h>
-+#include <linux/security.h>
- #include <linux/pid_namespace.h>
- #include <linux/init_task.h>
- #include <linux/syscalls.h>
-@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
-
- int pid_max = PID_MAX_DEFAULT;
-
--#define RESERVED_PIDS 300
-+#define RESERVED_PIDS 500
-
- int pid_max_min = RESERVED_PIDS + 1;
- int pid_max_max = PID_MAX_LIMIT;
-@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
- */
- struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
- {
-+ struct task_struct *task;
-+
- rcu_lockdep_assert(rcu_read_lock_held(),
- "find_task_by_pid_ns() needs rcu_read_lock()"
- " protection");
-- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
-+
-+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
-+
-+ if (gr_pid_is_chrooted(task))
-+ return NULL;
-+
-+ return task;
- }
-
- struct task_struct *find_task_by_vpid(pid_t vnr)
-@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
- return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
- }
-
-+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
-+{
-+ rcu_lockdep_assert(rcu_read_lock_held(),
-+ "find_task_by_pid_ns() needs rcu_read_lock()"
-+ " protection");
-+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
-+}
-+
- struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
- {
- struct pid *pid;
-diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
-index 962c291..31cf69d7 100644
---- a/kernel/posix-cpu-timers.c
-+++ b/kernel/posix-cpu-timers.c
-@@ -6,9 +6,11 @@
- #include <linux/posix-timers.h>
- #include <linux/errno.h>
- #include <linux/math64.h>
-+#include <linux/security.h>
- #include <asm/uaccess.h>
- #include <linux/kernel_stat.h>
- #include <trace/events/timer.h>
-+#include <linux/random.h>
-
- /*
- * Called after updating RLIMIT_CPU to run cpu timer and update
-@@ -511,6 +513,8 @@ static void cleanup_timers(struct list_head *head,
- */
- void posix_cpu_timers_exit(struct task_struct *tsk)
- {
-+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+ sizeof(unsigned long long));
- cleanup_timers(tsk->cpu_timers,
- tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
-
-@@ -1625,14 +1629,14 @@ struct k_clock clock_posix_cpu = {
-
- static __init int init_posix_cpu_timers(void)
- {
-- struct k_clock process = {
-+ static struct k_clock process = {
- .clock_getres = process_cpu_clock_getres,
- .clock_get = process_cpu_clock_get,
- .timer_create = process_cpu_timer_create,
- .nsleep = process_cpu_nsleep,
- .nsleep_restart = process_cpu_nsleep_restart,
- };
-- struct k_clock thread = {
-+ static struct k_clock thread = {
- .clock_getres = thread_cpu_clock_getres,
- .clock_get = thread_cpu_clock_get,
- .timer_create = thread_cpu_timer_create,
-diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 02824a5..92dc581 100644
---- a/kernel/posix-timers.c
-+++ b/kernel/posix-timers.c
-@@ -43,6 +43,7 @@
- #include <linux/idr.h>
- #include <linux/posix-clock.h>
- #include <linux/posix-timers.h>
-+#include <linux/grsecurity.h>
- #include <linux/syscalls.h>
- #include <linux/wait.h>
- #include <linux/workqueue.h>
-@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
- * which we beg off on and pass to do_sys_settimeofday().
- */
-
--static struct k_clock posix_clocks[MAX_CLOCKS];
-+static struct k_clock *posix_clocks[MAX_CLOCKS];
-
- /*
- * These ones are defined below.
-@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
- */
- static __init int init_posix_timers(void)
- {
-- struct k_clock clock_realtime = {
-+ static struct k_clock clock_realtime = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_clock_realtime_get,
- .clock_set = posix_clock_realtime_set,
-@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
- .timer_get = common_timer_get,
- .timer_del = common_timer_del,
- };
-- struct k_clock clock_monotonic = {
-+ static struct k_clock clock_monotonic = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_ktime_get_ts,
- .nsleep = common_nsleep,
-@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
- .timer_get = common_timer_get,
- .timer_del = common_timer_del,
- };
-- struct k_clock clock_monotonic_raw = {
-+ static struct k_clock clock_monotonic_raw = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_get_monotonic_raw,
- };
-- struct k_clock clock_realtime_coarse = {
-+ static struct k_clock clock_realtime_coarse = {
- .clock_getres = posix_get_coarse_res,
- .clock_get = posix_get_realtime_coarse,
- };
-- struct k_clock clock_monotonic_coarse = {
-+ static struct k_clock clock_monotonic_coarse = {
- .clock_getres = posix_get_coarse_res,
- .clock_get = posix_get_monotonic_coarse,
- };
-- struct k_clock clock_boottime = {
-+ static struct k_clock clock_boottime = {
- .clock_getres = hrtimer_get_res,
- .clock_get = posix_get_boottime,
- .nsleep = common_nsleep,
-@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
- return;
- }
-
-- posix_clocks[clock_id] = *new_clock;
-+ posix_clocks[clock_id] = new_clock;
- }
- EXPORT_SYMBOL_GPL(posix_timers_register_clock);
-
-@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
- return (id & CLOCKFD_MASK) == CLOCKFD ?
- &clock_posix_dynamic : &clock_posix_cpu;
-
-- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
-+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
- return NULL;
-- return &posix_clocks[id];
-+ return posix_clocks[id];
- }
-
- static int common_timer_create(struct k_itimer *new_timer)
-@@ -539,7 +540,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
- struct k_clock *kc = clockid_to_kclock(which_clock);
- struct k_itimer *new_timer;
- int error, new_timer_id;
-- sigevent_t event;
-+ sigevent_t event = { };
- int it_id_set = IT_ID_NOT_SET;
-
- if (!kc)
-@@ -967,6 +968,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
- if (copy_from_user(&new_tp, tp, sizeof (*tp)))
- return -EFAULT;
-
-+ /* only the CLOCK_REALTIME clock can be set, all other clocks
-+ have their clock_set fptr set to a nosettime dummy function
-+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
-+ call common_clock_set, which calls do_sys_settimeofday, which
-+ we hook
-+ */
-+
- return kc->clock_set(which_clock, &new_tp);
- }
-
-diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
-index deb5461..9fc0e9b 100644
---- a/kernel/power/Kconfig
-+++ b/kernel/power/Kconfig
-@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
- config HIBERNATION
- bool "Hibernation (aka 'suspend to disk')"
- depends on SWAP && ARCH_HIBERNATION_POSSIBLE
-+ depends on !GRKERNSEC_KMEM
-+ depends on !PAX_MEMORY_SANITIZE
- select HIBERNATE_CALLBACKS
- select LZO_COMPRESS
- select LZO_DECOMPRESS
-diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
-index d523593..68197a4 100644
---- a/kernel/power/poweroff.c
-+++ b/kernel/power/poweroff.c
-@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
- .enable_mask = SYSRQ_ENABLE_BOOT,
- };
-
--static int pm_sysrq_init(void)
-+static int __init pm_sysrq_init(void)
- {
- register_sysrq_key('o', &sysrq_poweroff_op);
- return 0;
-diff --git a/kernel/power/process.c b/kernel/power/process.c
-index 3d4b954..11af930 100644
---- a/kernel/power/process.c
-+++ b/kernel/power/process.c
-@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
- u64 elapsed_csecs64;
- unsigned int elapsed_csecs;
- bool wakeup = false;
-+ bool timedout = false;
-
- do_gettimeofday(&start);
-
-@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
-
- while (true) {
- todo = 0;
-+ if (time_after(jiffies, end_time))
-+ timedout = true;
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- if (frozen(p) || !freezable(p))
-@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
- * try_to_stop() after schedule() in ptrace/signal
- * stop sees TIF_FREEZE.
- */
-- if (!task_is_stopped_or_traced(p) &&
-- !freezer_should_skip(p))
-+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
- todo++;
-+ if (timedout) {
-+ printk(KERN_ERR "Task refusing to freeze:\n");
-+ sched_show_task(p);
-+ }
-+ }
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
-
-@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
- todo += wq_busy;
- }
-
-- if (!todo || time_after(jiffies, end_time))
-+ if (!todo || timedout)
- break;
-
- if (pm_wakeup_pending()) {
-diff --git a/kernel/printk.c b/kernel/printk.c
-index c073f43..ced569b 100644
---- a/kernel/printk.c
-+++ b/kernel/printk.c
-@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
- if (from_file && type != SYSLOG_ACTION_OPEN)
- return 0;
-
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
-+ return -EPERM;
-+#endif
-+
- if (syslog_action_restricted(type)) {
- if (capable(CAP_SYSLOG))
- return 0;
-diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-new file mode 100644
-index 0000000..ba98f34
---- /dev/null
-+++ b/kernel/printk/printk.c
-@@ -0,0 +1,2912 @@
-+/*
-+ * linux/kernel/printk.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ *
-+ * Modified to make sys_syslog() more flexible: added commands to
-+ * return the last 4k of kernel messages, regardless of whether
-+ * they've been read or not. Added option to suppress kernel printk's
-+ * to the console. Added hook for sending the console messages
-+ * elsewhere, in preparation for a serial line console (someday).
-+ * Ted Ts'o, 2/11/93.
-+ * Modified for sysctl support, 1/8/97, Chris Horn.
-+ * Fixed SMP synchronization, 08/08/99, Manfred Spraul
-+ * manfred@colorfullife.com
-+ * Rewrote bits to get rid of console_lock
-+ * 01Mar01 Andrew Morton
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/tty.h>
-+#include <linux/tty_driver.h>
-+#include <linux/console.h>
-+#include <linux/init.h>
-+#include <linux/jiffies.h>
-+#include <linux/nmi.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/interrupt.h> /* For in_interrupt() */
-+#include <linux/delay.h>
-+#include <linux/smp.h>
-+#include <linux/security.h>
-+#include <linux/bootmem.h>
-+#include <linux/memblock.h>
-+#include <linux/aio.h>
-+#include <linux/syscalls.h>
-+#include <linux/kexec.h>
-+#include <linux/kdb.h>
-+#include <linux/ratelimit.h>
-+#include <linux/kmsg_dump.h>
-+#include <linux/syslog.h>
-+#include <linux/cpu.h>
-+#include <linux/notifier.h>
-+#include <linux/rculist.h>
-+#include <linux/poll.h>
-+#include <linux/irq_work.h>
-+#include <linux/utsname.h>
-+
-+#include <asm/uaccess.h>
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/printk.h>
-+
-+#include "console_cmdline.h"
-+#include "braille.h"
-+
-+/* printk's without a loglevel use this.. */
-+#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
-+
-+/* We show everything that is MORE important than this.. */
-+#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
-+#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
-+
-+int console_printk[4] = {
-+ DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
-+ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
-+ MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
-+ DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
-+};
-+
-+/*
-+ * Low level drivers may need that to know if they can schedule in
-+ * their unblank() callback or not. So let's export it.
-+ */
-+int oops_in_progress;
-+EXPORT_SYMBOL(oops_in_progress);
-+
-+/*
-+ * console_sem protects the console_drivers list, and also
-+ * provides serialisation for access to the entire console
-+ * driver system.
-+ */
-+static DEFINE_SEMAPHORE(console_sem);
-+struct console *console_drivers;
-+EXPORT_SYMBOL_GPL(console_drivers);
-+
-+#ifdef CONFIG_LOCKDEP
-+static struct lockdep_map console_lock_dep_map = {
-+ .name = "console_lock"
-+};
-+#endif
-+
-+/*
-+ * This is used for debugging the mess that is the VT code by
-+ * keeping track if we have the console semaphore held. It's
-+ * definitely not the perfect debug tool (we don't know if _WE_
-+ * hold it are racing, but it helps tracking those weird code
-+ * path in the console code where we end up in places I want
-+ * locked without the console sempahore held
-+ */
-+static int console_locked, console_suspended;
-+
-+/*
-+ * If exclusive_console is non-NULL then only this console is to be printed to.
-+ */
-+static struct console *exclusive_console;
-+
-+/*
-+ * Array of consoles built from command line options (console=)
-+ */
-+
-+#define MAX_CMDLINECONSOLES 8
-+
-+static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-+
-+static int selected_console = -1;
-+static int preferred_console = -1;
-+int console_set_on_cmdline;
-+EXPORT_SYMBOL(console_set_on_cmdline);
-+
-+/* Flag: console code may call schedule() */
-+static int console_may_schedule;
-+
-+/*
-+ * The printk log buffer consists of a chain of concatenated variable
-+ * length records. Every record starts with a record header, containing
-+ * the overall length of the record.
-+ *
-+ * The heads to the first and last entry in the buffer, as well as the
-+ * sequence numbers of these both entries are maintained when messages
-+ * are stored..
-+ *
-+ * If the heads indicate available messages, the length in the header
-+ * tells the start next message. A length == 0 for the next message
-+ * indicates a wrap-around to the beginning of the buffer.
-+ *
-+ * Every record carries the monotonic timestamp in microseconds, as well as
-+ * the standard userspace syslog level and syslog facility. The usual
-+ * kernel messages use LOG_KERN; userspace-injected messages always carry
-+ * a matching syslog facility, by default LOG_USER. The origin of every
-+ * message can be reliably determined that way.
-+ *
-+ * The human readable log message directly follows the message header. The
-+ * length of the message text is stored in the header, the stored message
-+ * is not terminated.
-+ *
-+ * Optionally, a message can carry a dictionary of properties (key/value pairs),
-+ * to provide userspace with a machine-readable message context.
-+ *
-+ * Examples for well-defined, commonly used property names are:
-+ * DEVICE=b12:8 device identifier
-+ * b12:8 block dev_t
-+ * c127:3 char dev_t
-+ * n8 netdev ifindex
-+ * +sound:card0 subsystem:devname
-+ * SUBSYSTEM=pci driver-core subsystem name
-+ *
-+ * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
-+ * follows directly after a '=' character. Every property is terminated by
-+ * a '\0' character. The last property is not terminated.
-+ *
-+ * Example of a message structure:
-+ * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
-+ * 0008 34 00 record is 52 bytes long
-+ * 000a 0b 00 text is 11 bytes long
-+ * 000c 1f 00 dictionary is 23 bytes long
-+ * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
-+ * 0010 69 74 27 73 20 61 20 6c "it's a l"
-+ * 69 6e 65 "ine"
-+ * 001b 44 45 56 49 43 "DEVIC"
-+ * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
-+ * 52 49 56 45 52 3d 62 75 "RIVER=bu"
-+ * 67 "g"
-+ * 0032 00 00 00 padding to next message header
-+ *
-+ * The 'struct printk_log' buffer header must never be directly exported to
-+ * userspace, it is a kernel-private implementation detail that might
-+ * need to be changed in the future, when the requirements change.
-+ *
-+ * /dev/kmsg exports the structured data in the following line format:
-+ * "level,sequnum,timestamp;<message text>\n"
-+ *
-+ * The optional key/value pairs are attached as continuation lines starting
-+ * with a space character and terminated by a newline. All possible
-+ * non-prinatable characters are escaped in the "\xff" notation.
-+ *
-+ * Users of the export format should ignore possible additional values
-+ * separated by ',', and find the message after the ';' character.
-+ */
-+
-+enum log_flags {
-+ LOG_NOCONS = 1, /* already flushed, do not print to console */
-+ LOG_NEWLINE = 2, /* text ended with a newline */
-+ LOG_PREFIX = 4, /* text started with a prefix */
-+ LOG_CONT = 8, /* text is a fragment of a continuation line */
-+};
-+
-+struct printk_log {
-+ u64 ts_nsec; /* timestamp in nanoseconds */
-+ u16 len; /* length of entire record */
-+ u16 text_len; /* length of text buffer */
-+ u16 dict_len; /* length of dictionary buffer */
-+ u8 facility; /* syslog facility */
-+ u8 flags:5; /* internal record flags */
-+ u8 level:3; /* syslog level */
-+};
-+
-+/*
-+ * The logbuf_lock protects kmsg buffer, indices, counters. It is also
-+ * used in interesting ways to provide interlocking in console_unlock();
-+ */
-+static DEFINE_RAW_SPINLOCK(logbuf_lock);
-+
-+#ifdef CONFIG_PRINTK
-+DECLARE_WAIT_QUEUE_HEAD(log_wait);
-+/* the next printk record to read by syslog(READ) or /proc/kmsg */
-+static u64 syslog_seq;
-+static u32 syslog_idx;
-+static enum log_flags syslog_prev;
-+static size_t syslog_partial;
-+
-+/* index and sequence number of the first record stored in the buffer */
-+static u64 log_first_seq;
-+static u32 log_first_idx;
-+
-+/* index and sequence number of the next record to store in the buffer */
-+static u64 log_next_seq;
-+static u32 log_next_idx;
-+
-+/* the next printk record to write to the console */
-+static u64 console_seq;
-+static u32 console_idx;
-+static enum log_flags console_prev;
-+
-+/* the next printk record to read after the last 'clear' command */
-+static u64 clear_seq;
-+static u32 clear_idx;
-+
-+#define PREFIX_MAX 32
-+#define LOG_LINE_MAX 1024 - PREFIX_MAX
-+
-+/* record buffer */
-+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-+#define LOG_ALIGN 4
-+#else
-+#define LOG_ALIGN __alignof__(struct printk_log)
-+#endif
-+#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
-+static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
-+static char *log_buf = __log_buf;
-+static u32 log_buf_len = __LOG_BUF_LEN;
-+
-+/* cpu currently holding logbuf_lock */
-+static volatile unsigned int logbuf_cpu = UINT_MAX;
-+
-+/* human readable text of the record */
-+static char *log_text(const struct printk_log *msg)
-+{
-+ return (char *)msg + sizeof(struct printk_log);
-+}
-+
-+/* optional key/value pair dictionary attached to the record */
-+static char *log_dict(const struct printk_log *msg)
-+{
-+ return (char *)msg + sizeof(struct printk_log) + msg->text_len;
-+}
-+
-+/* get record by index; idx must point to valid msg */
-+static struct printk_log *log_from_idx(u32 idx)
-+{
-+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
-+
-+ /*
-+ * A length == 0 record is the end of buffer marker. Wrap around and
-+ * read the message at the start of the buffer.
-+ */
-+ if (!msg->len)
-+ return (struct printk_log *)log_buf;
-+ return msg;
-+}
-+
-+/* get next record; idx must point to valid msg */
-+static u32 log_next(u32 idx)
-+{
-+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
-+
-+ /* length == 0 indicates the end of the buffer; wrap */
-+ /*
-+ * A length == 0 record is the end of buffer marker. Wrap around and
-+ * read the message at the start of the buffer as *this* one, and
-+ * return the one after that.
-+ */
-+ if (!msg->len) {
-+ msg = (struct printk_log *)log_buf;
-+ return msg->len;
-+ }
-+ return idx + msg->len;
-+}
-+
-+/* insert record into the buffer, discard old ones, update heads */
-+static void log_store(int facility, int level,
-+ enum log_flags flags, u64 ts_nsec,
-+ const char *dict, u16 dict_len,
-+ const char *text, u16 text_len)
-+{
-+ struct printk_log *msg;
-+ u32 size, pad_len;
-+
-+ /* number of '\0' padding bytes to next message */
-+ size = sizeof(struct printk_log) + text_len + dict_len;
-+ pad_len = (-size) & (LOG_ALIGN - 1);
-+ size += pad_len;
-+
-+ while (log_first_seq < log_next_seq) {
-+ u32 free;
-+
-+ if (log_next_idx > log_first_idx)
-+ free = max(log_buf_len - log_next_idx, log_first_idx);
-+ else
-+ free = log_first_idx - log_next_idx;
-+
-+ if (free > size + sizeof(struct printk_log))
-+ break;
-+
-+ /* drop old messages until we have enough contiuous space */
-+ log_first_idx = log_next(log_first_idx);
-+ log_first_seq++;
-+ }
-+
-+ if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
-+ /*
-+ * This message + an additional empty header does not fit
-+ * at the end of the buffer. Add an empty header with len == 0
-+ * to signify a wrap around.
-+ */
-+ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
-+ log_next_idx = 0;
-+ }
-+
-+ /* fill message */
-+ msg = (struct printk_log *)(log_buf + log_next_idx);
-+ memcpy(log_text(msg), text, text_len);
-+ msg->text_len = text_len;
-+ memcpy(log_dict(msg), dict, dict_len);
-+ msg->dict_len = dict_len;
-+ msg->facility = facility;
-+ msg->level = level & 7;
-+ msg->flags = flags & 0x1f;
-+ if (ts_nsec > 0)
-+ msg->ts_nsec = ts_nsec;
-+ else
-+ msg->ts_nsec = local_clock();
-+ memset(log_dict(msg) + dict_len, 0, pad_len);
-+ msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
-+
-+ /* insert message */
-+ log_next_idx += msg->len;
-+ log_next_seq++;
-+}
-+
-+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
-+int dmesg_restrict __read_only = 1;
-+#else
-+int dmesg_restrict __read_only;
-+#endif
-+
-+static int syslog_action_restricted(int type)
-+{
-+ if (dmesg_restrict)
-+ return 1;
-+ /*
-+ * Unless restricted, we allow "read all" and "get buffer size"
-+ * for everybody.
-+ */
-+ return type != SYSLOG_ACTION_READ_ALL &&
-+ type != SYSLOG_ACTION_SIZE_BUFFER;
-+}
-+
-+static int check_syslog_permissions(int type, bool from_file)
-+{
-+ /*
-+ * If this is from /proc/kmsg and we've already opened it, then we've
-+ * already done the capabilities checks at open time.
-+ */
-+ if (from_file && type != SYSLOG_ACTION_OPEN)
-+ return 0;
-+
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
-+ return -EPERM;
-+#endif
-+
-+ if (syslog_action_restricted(type)) {
-+ if (capable(CAP_SYSLOG))
-+ return 0;
-+ /*
-+ * For historical reasons, accept CAP_SYS_ADMIN too, with
-+ * a warning.
-+ */
-+ if (capable(CAP_SYS_ADMIN)) {
-+ pr_warn_once("%s (%d): Attempt to access syslog with "
-+ "CAP_SYS_ADMIN but no CAP_SYSLOG "
-+ "(deprecated).\n",
-+ current->comm, task_pid_nr(current));
-+ return 0;
-+ }
-+ return -EPERM;
-+ }
-+ return security_syslog(type);
-+}
-+
-+
-+/* /dev/kmsg - userspace message inject/listen interface */
-+struct devkmsg_user {
-+ u64 seq;
-+ u32 idx;
-+ enum log_flags prev;
-+ struct mutex lock;
-+ char buf[8192];
-+};
-+
-+static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
-+ unsigned long count, loff_t pos)
-+{
-+ char *buf, *line;
-+ int i;
-+ int level = default_message_loglevel;
-+ int facility = 1; /* LOG_USER */
-+ size_t len = iov_length(iv, count);
-+ ssize_t ret = len;
-+
-+ if (len > LOG_LINE_MAX)
-+ return -EINVAL;
-+ buf = kmalloc(len+1, GFP_KERNEL);
-+ if (buf == NULL)
-+ return -ENOMEM;
-+
-+ line = buf;
-+ for (i = 0; i < count; i++) {
-+ if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+ line += iv[i].iov_len;
-+ }
-+
-+ /*
-+ * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
-+ * the decimal value represents 32bit, the lower 3 bit are the log
-+ * level, the rest are the log facility.
-+ *
-+ * If no prefix or no userspace facility is specified, we
-+ * enforce LOG_USER, to be able to reliably distinguish
-+ * kernel-generated messages from userspace-injected ones.
-+ */
-+ line = buf;
-+ if (line[0] == '<') {
-+ char *endp = NULL;
-+
-+ i = simple_strtoul(line+1, &endp, 10);
-+ if (endp && endp[0] == '>') {
-+ level = i & 7;
-+ if (i >> 3)
-+ facility = i >> 3;
-+ endp++;
-+ len -= endp - line;
-+ line = endp;
-+ }
-+ }
-+ line[len] = '\0';
-+
-+ printk_emit(facility, level, NULL, 0, "%s", line);
-+out:
-+ kfree(buf);
-+ return ret;
-+}
-+
-+static ssize_t devkmsg_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct devkmsg_user *user = file->private_data;
-+ struct printk_log *msg;
-+ u64 ts_usec;
-+ size_t i;
-+ char cont = '-';
-+ size_t len;
-+ ssize_t ret;
-+
-+ if (!user)
-+ return -EBADF;
-+
-+ ret = mutex_lock_interruptible(&user->lock);
-+ if (ret)
-+ return ret;
-+ raw_spin_lock_irq(&logbuf_lock);
-+ while (user->seq == log_next_seq) {
-+ if (file->f_flags & O_NONBLOCK) {
-+ ret = -EAGAIN;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ goto out;
-+ }
-+
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ ret = wait_event_interruptible(log_wait,
-+ user->seq != log_next_seq);
-+ if (ret)
-+ goto out;
-+ raw_spin_lock_irq(&logbuf_lock);
-+ }
-+
-+ if (user->seq < log_first_seq) {
-+ /* our last seen message is gone, return error and reset */
-+ user->idx = log_first_idx;
-+ user->seq = log_first_seq;
-+ ret = -EPIPE;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ goto out;
-+ }
-+
-+ msg = log_from_idx(user->idx);
-+ ts_usec = msg->ts_nsec;
-+ do_div(ts_usec, 1000);
-+
-+ /*
-+ * If we couldn't merge continuation line fragments during the print,
-+ * export the stored flags to allow an optional external merge of the
-+ * records. Merging the records isn't always neccessarily correct, like
-+ * when we hit a race during printing. In most cases though, it produces
-+ * better readable output. 'c' in the record flags mark the first
-+ * fragment of a line, '+' the following.
-+ */
-+ if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT))
-+ cont = 'c';
-+ else if ((msg->flags & LOG_CONT) ||
-+ ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
-+ cont = '+';
-+
-+ len = sprintf(user->buf, "%u,%llu,%llu,%c;",
-+ (msg->facility << 3) | msg->level,
-+ user->seq, ts_usec, cont);
-+ user->prev = msg->flags;
-+
-+ /* escape non-printable characters */
-+ for (i = 0; i < msg->text_len; i++) {
-+ unsigned char c = log_text(msg)[i];
-+
-+ if (c < ' ' || c >= 127 || c == '\\')
-+ len += sprintf(user->buf + len, "\\x%02x", c);
-+ else
-+ user->buf[len++] = c;
-+ }
-+ user->buf[len++] = '\n';
-+
-+ if (msg->dict_len) {
-+ bool line = true;
-+
-+ for (i = 0; i < msg->dict_len; i++) {
-+ unsigned char c = log_dict(msg)[i];
-+
-+ if (line) {
-+ user->buf[len++] = ' ';
-+ line = false;
-+ }
-+
-+ if (c == '\0') {
-+ user->buf[len++] = '\n';
-+ line = true;
-+ continue;
-+ }
-+
-+ if (c < ' ' || c >= 127 || c == '\\') {
-+ len += sprintf(user->buf + len, "\\x%02x", c);
-+ continue;
-+ }
-+
-+ user->buf[len++] = c;
-+ }
-+ user->buf[len++] = '\n';
-+ }
-+
-+ user->idx = log_next(user->idx);
-+ user->seq++;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+
-+ if (len > count) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (copy_to_user(buf, user->buf, len)) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+ ret = len;
-+out:
-+ mutex_unlock(&user->lock);
-+ return ret;
-+}
-+
-+static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
-+{
-+ struct devkmsg_user *user = file->private_data;
-+ loff_t ret = 0;
-+
-+ if (!user)
-+ return -EBADF;
-+ if (offset)
-+ return -ESPIPE;
-+
-+ raw_spin_lock_irq(&logbuf_lock);
-+ switch (whence) {
-+ case SEEK_SET:
-+ /* the first record */
-+ user->idx = log_first_idx;
-+ user->seq = log_first_seq;
-+ break;
-+ case SEEK_DATA:
-+ /*
-+ * The first record after the last SYSLOG_ACTION_CLEAR,
-+ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
-+ * changes no global state, and does not clear anything.
-+ */
-+ user->idx = clear_idx;
-+ user->seq = clear_seq;
-+ break;
-+ case SEEK_END:
-+ /* after the last record */
-+ user->idx = log_next_idx;
-+ user->seq = log_next_seq;
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ }
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ return ret;
-+}
-+
-+static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
-+{
-+ struct devkmsg_user *user = file->private_data;
-+ int ret = 0;
-+
-+ if (!user)
-+ return POLLERR|POLLNVAL;
-+
-+ poll_wait(file, &log_wait, wait);
-+
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (user->seq < log_next_seq) {
-+ /* return error when data has vanished underneath us */
-+ if (user->seq < log_first_seq)
-+ ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
-+ else
-+ ret = POLLIN|POLLRDNORM;
-+ }
-+ raw_spin_unlock_irq(&logbuf_lock);
-+
-+ return ret;
-+}
-+
-+static int devkmsg_open(struct inode *inode, struct file *file)
-+{
-+ struct devkmsg_user *user;
-+ int err;
-+
-+ /* write-only does not need any file context */
-+ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
-+ return 0;
-+
-+ err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
-+ SYSLOG_FROM_READER);
-+ if (err)
-+ return err;
-+
-+ user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
-+ if (!user)
-+ return -ENOMEM;
-+
-+ mutex_init(&user->lock);
-+
-+ raw_spin_lock_irq(&logbuf_lock);
-+ user->idx = log_first_idx;
-+ user->seq = log_first_seq;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+
-+ file->private_data = user;
-+ return 0;
-+}
-+
-+static int devkmsg_release(struct inode *inode, struct file *file)
-+{
-+ struct devkmsg_user *user = file->private_data;
-+
-+ if (!user)
-+ return 0;
-+
-+ mutex_destroy(&user->lock);
-+ kfree(user);
-+ return 0;
-+}
-+
-+const struct file_operations kmsg_fops = {
-+ .open = devkmsg_open,
-+ .read = devkmsg_read,
-+ .aio_write = devkmsg_writev,
-+ .llseek = devkmsg_llseek,
-+ .poll = devkmsg_poll,
-+ .release = devkmsg_release,
-+};
-+
-+#ifdef CONFIG_KEXEC
-+/*
-+ * This appends the listed symbols to /proc/vmcore
-+ *
-+ * /proc/vmcore is used by various utilities, like crash and makedumpfile to
-+ * obtain access to symbols that are otherwise very difficult to locate. These
-+ * symbols are specifically used so that utilities can access and extract the
-+ * dmesg log from a vmcore file after a crash.
-+ */
-+void log_buf_kexec_setup(void)
-+{
-+ VMCOREINFO_SYMBOL(log_buf);
-+ VMCOREINFO_SYMBOL(log_buf_len);
-+ VMCOREINFO_SYMBOL(log_first_idx);
-+ VMCOREINFO_SYMBOL(log_next_idx);
-+ /*
-+ * Export struct printk_log size and field offsets. User space tools can
-+ * parse it and detect any changes to structure down the line.
-+ */
-+ VMCOREINFO_STRUCT_SIZE(printk_log);
-+ VMCOREINFO_OFFSET(printk_log, ts_nsec);
-+ VMCOREINFO_OFFSET(printk_log, len);
-+ VMCOREINFO_OFFSET(printk_log, text_len);
-+ VMCOREINFO_OFFSET(printk_log, dict_len);
-+}
-+#endif
-+
-+/* requested log_buf_len from kernel cmdline */
-+static unsigned long __initdata new_log_buf_len;
-+
-+/* save requested log_buf_len since it's too early to process it */
-+static int __init log_buf_len_setup(char *str)
-+{
-+ unsigned size = memparse(str, &str);
-+
-+ if (size)
-+ size = roundup_pow_of_two(size);
-+ if (size > log_buf_len)
-+ new_log_buf_len = size;
-+
-+ return 0;
-+}
-+early_param("log_buf_len", log_buf_len_setup);
-+
-+void __init setup_log_buf(int early)
-+{
-+ unsigned long flags;
-+ char *new_log_buf;
-+ int free;
-+
-+ if (!new_log_buf_len)
-+ return;
-+
-+ if (early) {
-+ new_log_buf =
-+ memblock_virt_alloc(new_log_buf_len, PAGE_SIZE);
-+ } else {
-+ new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, 0);
-+ }
-+
-+ if (unlikely(!new_log_buf)) {
-+ pr_err("log_buf_len: %ld bytes not available\n",
-+ new_log_buf_len);
-+ return;
-+ }
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ log_buf_len = new_log_buf_len;
-+ log_buf = new_log_buf;
-+ new_log_buf_len = 0;
-+ free = __LOG_BUF_LEN - log_next_idx;
-+ memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+
-+ pr_info("log_buf_len: %d\n", log_buf_len);
-+ pr_info("early log buf free: %d(%d%%)\n",
-+ free, (free * 100) / __LOG_BUF_LEN);
-+}
-+
-+static bool __read_mostly ignore_loglevel;
-+
-+static int __init ignore_loglevel_setup(char *str)
-+{
-+ ignore_loglevel = 1;
-+ pr_info("debug: ignoring loglevel setting.\n");
-+
-+ return 0;
-+}
-+
-+early_param("ignore_loglevel", ignore_loglevel_setup);
-+module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
-+MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
-+ "print all kernel messages to the console.");
-+
-+#ifdef CONFIG_BOOT_PRINTK_DELAY
-+
-+static int boot_delay; /* msecs delay after each printk during bootup */
-+static unsigned long long loops_per_msec; /* based on boot_delay */
-+
-+static int __init boot_delay_setup(char *str)
-+{
-+ unsigned long lpj;
-+
-+ lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
-+ loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
-+
-+ get_option(&str, &boot_delay);
-+ if (boot_delay > 10 * 1000)
-+ boot_delay = 0;
-+
-+ pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
-+ "HZ: %d, loops_per_msec: %llu\n",
-+ boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
-+ return 0;
-+}
-+early_param("boot_delay", boot_delay_setup);
-+
-+static void boot_delay_msec(int level)
-+{
-+ unsigned long long k;
-+ unsigned long timeout;
-+
-+ if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
-+ || (level >= console_loglevel && !ignore_loglevel)) {
-+ return;
-+ }
-+
-+ k = (unsigned long long)loops_per_msec * boot_delay;
-+
-+ timeout = jiffies + msecs_to_jiffies(boot_delay);
-+ while (k) {
-+ k--;
-+ cpu_relax();
-+ /*
-+ * use (volatile) jiffies to prevent
-+ * compiler reduction; loop termination via jiffies
-+ * is secondary and may or may not happen.
-+ */
-+ if (time_after(jiffies, timeout))
-+ break;
-+ touch_nmi_watchdog();
-+ }
-+}
-+#else
-+static inline void boot_delay_msec(int level)
-+{
-+}
-+#endif
-+
-+#if defined(CONFIG_PRINTK_TIME)
-+static bool printk_time = 1;
-+#else
-+static bool printk_time;
-+#endif
-+module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
-+
-+static size_t print_time(u64 ts, char *buf)
-+{
-+ unsigned long rem_nsec;
-+
-+ if (!printk_time)
-+ return 0;
-+
-+ rem_nsec = do_div(ts, 1000000000);
-+
-+ if (!buf)
-+ return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
-+
-+ return sprintf(buf, "[%5lu.%06lu] ",
-+ (unsigned long)ts, rem_nsec / 1000);
-+}
-+
-+static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
-+{
-+ size_t len = 0;
-+ unsigned int prefix = (msg->facility << 3) | msg->level;
-+
-+ if (syslog) {
-+ if (buf) {
-+ len += sprintf(buf, "<%u>", prefix);
-+ } else {
-+ len += 3;
-+ if (prefix > 999)
-+ len += 3;
-+ else if (prefix > 99)
-+ len += 2;
-+ else if (prefix > 9)
-+ len++;
-+ }
-+ }
-+
-+ len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
-+ return len;
-+}
-+
-+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
-+ bool syslog, char *buf, size_t size)
-+{
-+ const char *text = log_text(msg);
-+ size_t text_size = msg->text_len;
-+ bool prefix = true;
-+ bool newline = true;
-+ size_t len = 0;
-+
-+ if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
-+ prefix = false;
-+
-+ if (msg->flags & LOG_CONT) {
-+ if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
-+ prefix = false;
-+
-+ if (!(msg->flags & LOG_NEWLINE))
-+ newline = false;
-+ }
-+
-+ do {
-+ const char *next = memchr(text, '\n', text_size);
-+ size_t text_len;
-+
-+ if (next) {
-+ text_len = next - text;
-+ next++;
-+ text_size -= next - text;
-+ } else {
-+ text_len = text_size;
-+ }
-+
-+ if (buf) {
-+ if (print_prefix(msg, syslog, NULL) +
-+ text_len + 1 >= size - len)
-+ break;
-+
-+ if (prefix)
-+ len += print_prefix(msg, syslog, buf + len);
-+ memcpy(buf + len, text, text_len);
-+ len += text_len;
-+ if (next || newline)
-+ buf[len++] = '\n';
-+ } else {
-+ /* SYSLOG_ACTION_* buffer size only calculation */
-+ if (prefix)
-+ len += print_prefix(msg, syslog, NULL);
-+ len += text_len;
-+ if (next || newline)
-+ len++;
-+ }
-+
-+ prefix = true;
-+ text = next;
-+ } while (text);
-+
-+ return len;
-+}
-+
-+static int syslog_print(char __user *buf, int size)
-+{
-+ char *text;
-+ struct printk_log *msg;
-+ int len = 0;
-+
-+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
-+ if (!text)
-+ return -ENOMEM;
-+
-+ while (size > 0) {
-+ size_t n;
-+ size_t skip;
-+
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (syslog_seq < log_first_seq) {
-+ /* messages are gone, move to first one */
-+ syslog_seq = log_first_seq;
-+ syslog_idx = log_first_idx;
-+ syslog_prev = 0;
-+ syslog_partial = 0;
-+ }
-+ if (syslog_seq == log_next_seq) {
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ break;
-+ }
-+
-+ skip = syslog_partial;
-+ msg = log_from_idx(syslog_idx);
-+ n = msg_print_text(msg, syslog_prev, true, text,
-+ LOG_LINE_MAX + PREFIX_MAX);
-+ if (n - syslog_partial <= size) {
-+ /* message fits into buffer, move forward */
-+ syslog_idx = log_next(syslog_idx);
-+ syslog_seq++;
-+ syslog_prev = msg->flags;
-+ n -= syslog_partial;
-+ syslog_partial = 0;
-+ } else if (!len){
-+ /* partial read(), remember position */
-+ n = size;
-+ syslog_partial += n;
-+ } else
-+ n = 0;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+
-+ if (!n)
-+ break;
-+
-+ if (copy_to_user(buf, text + skip, n)) {
-+ if (!len)
-+ len = -EFAULT;
-+ break;
-+ }
-+
-+ len += n;
-+ size -= n;
-+ buf += n;
-+ }
-+
-+ kfree(text);
-+ return len;
-+}
-+
-+static int syslog_print_all(char __user *buf, int size, bool clear)
-+{
-+ char *text;
-+ int len = 0;
-+
-+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
-+ if (!text)
-+ return -ENOMEM;
-+
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (buf) {
-+ u64 next_seq;
-+ u64 seq;
-+ u32 idx;
-+ enum log_flags prev;
-+
-+ if (clear_seq < log_first_seq) {
-+ /* messages are gone, move to first available one */
-+ clear_seq = log_first_seq;
-+ clear_idx = log_first_idx;
-+ }
-+
-+ /*
-+ * Find first record that fits, including all following records,
-+ * into the user-provided buffer for this dump.
-+ */
-+ seq = clear_seq;
-+ idx = clear_idx;
-+ prev = 0;
-+ while (seq < log_next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ len += msg_print_text(msg, prev, true, NULL, 0);
-+ prev = msg->flags;
-+ idx = log_next(idx);
-+ seq++;
-+ }
-+
-+ /* move first record forward until length fits into the buffer */
-+ seq = clear_seq;
-+ idx = clear_idx;
-+ prev = 0;
-+ while (len > size && seq < log_next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ len -= msg_print_text(msg, prev, true, NULL, 0);
-+ prev = msg->flags;
-+ idx = log_next(idx);
-+ seq++;
-+ }
-+
-+ /* last message fitting into this dump */
-+ next_seq = log_next_seq;
-+
-+ len = 0;
-+ while (len >= 0 && seq < next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+ int textlen;
-+
-+ textlen = msg_print_text(msg, prev, true, text,
-+ LOG_LINE_MAX + PREFIX_MAX);
-+ if (textlen < 0) {
-+ len = textlen;
-+ break;
-+ }
-+ idx = log_next(idx);
-+ seq++;
-+ prev = msg->flags;
-+
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ if (copy_to_user(buf + len, text, textlen))
-+ len = -EFAULT;
-+ else
-+ len += textlen;
-+ raw_spin_lock_irq(&logbuf_lock);
-+
-+ if (seq < log_first_seq) {
-+ /* messages are gone, move to next one */
-+ seq = log_first_seq;
-+ idx = log_first_idx;
-+ prev = 0;
-+ }
-+ }
-+ }
-+
-+ if (clear) {
-+ clear_seq = log_next_seq;
-+ clear_idx = log_next_idx;
-+ }
-+ raw_spin_unlock_irq(&logbuf_lock);
-+
-+ kfree(text);
-+ return len;
-+}
-+
-+int do_syslog(int type, char __user *buf, int len, bool from_file)
-+{
-+ bool clear = false;
-+ static int saved_console_loglevel = -1;
-+ int error;
-+
-+ error = check_syslog_permissions(type, from_file);
-+ if (error)
-+ goto out;
-+
-+ error = security_syslog(type);
-+ if (error)
-+ return error;
-+
-+ switch (type) {
-+ case SYSLOG_ACTION_CLOSE: /* Close log */
-+ break;
-+ case SYSLOG_ACTION_OPEN: /* Open log */
-+ break;
-+ case SYSLOG_ACTION_READ: /* Read from log */
-+ error = -EINVAL;
-+ if (!buf || len < 0)
-+ goto out;
-+ error = 0;
-+ if (!len)
-+ goto out;
-+ if (!access_ok(VERIFY_WRITE, buf, len)) {
-+ error = -EFAULT;
-+ goto out;
-+ }
-+ error = wait_event_interruptible(log_wait,
-+ syslog_seq != log_next_seq);
-+ if (error)
-+ goto out;
-+ error = syslog_print(buf, len);
-+ break;
-+ /* Read/clear last kernel messages */
-+ case SYSLOG_ACTION_READ_CLEAR:
-+ clear = true;
-+ /* FALL THRU */
-+ /* Read last kernel messages */
-+ case SYSLOG_ACTION_READ_ALL:
-+ error = -EINVAL;
-+ if (!buf || len < 0)
-+ goto out;
-+ error = 0;
-+ if (!len)
-+ goto out;
-+ if (!access_ok(VERIFY_WRITE, buf, len)) {
-+ error = -EFAULT;
-+ goto out;
-+ }
-+ error = syslog_print_all(buf, len, clear);
-+ break;
-+ /* Clear ring buffer */
-+ case SYSLOG_ACTION_CLEAR:
-+ syslog_print_all(NULL, 0, true);
-+ break;
-+ /* Disable logging to console */
-+ case SYSLOG_ACTION_CONSOLE_OFF:
-+ if (saved_console_loglevel == -1)
-+ saved_console_loglevel = console_loglevel;
-+ console_loglevel = minimum_console_loglevel;
-+ break;
-+ /* Enable logging to console */
-+ case SYSLOG_ACTION_CONSOLE_ON:
-+ if (saved_console_loglevel != -1) {
-+ console_loglevel = saved_console_loglevel;
-+ saved_console_loglevel = -1;
-+ }
-+ break;
-+ /* Set level of messages printed to console */
-+ case SYSLOG_ACTION_CONSOLE_LEVEL:
-+ error = -EINVAL;
-+ if (len < 1 || len > 8)
-+ goto out;
-+ if (len < minimum_console_loglevel)
-+ len = minimum_console_loglevel;
-+ console_loglevel = len;
-+ /* Implicitly re-enable logging to console */
-+ saved_console_loglevel = -1;
-+ error = 0;
-+ break;
-+ /* Number of chars in the log buffer */
-+ case SYSLOG_ACTION_SIZE_UNREAD:
-+ raw_spin_lock_irq(&logbuf_lock);
-+ if (syslog_seq < log_first_seq) {
-+ /* messages are gone, move to first one */
-+ syslog_seq = log_first_seq;
-+ syslog_idx = log_first_idx;
-+ syslog_prev = 0;
-+ syslog_partial = 0;
-+ }
-+ if (from_file) {
-+ /*
-+ * Short-cut for poll(/"proc/kmsg") which simply checks
-+ * for pending data, not the size; return the count of
-+ * records, not the length.
-+ */
-+ error = log_next_idx - syslog_idx;
-+ } else {
-+ u64 seq = syslog_seq;
-+ u32 idx = syslog_idx;
-+ enum log_flags prev = syslog_prev;
-+
-+ error = 0;
-+ while (seq < log_next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ error += msg_print_text(msg, prev, true, NULL, 0);
-+ idx = log_next(idx);
-+ seq++;
-+ prev = msg->flags;
-+ }
-+ error -= syslog_partial;
-+ }
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ break;
-+ /* Size of the log buffer */
-+ case SYSLOG_ACTION_SIZE_BUFFER:
-+ error = log_buf_len;
-+ break;
-+ default:
-+ error = -EINVAL;
-+ break;
-+ }
-+out:
-+ return error;
-+}
-+
-+SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
-+{
-+ return do_syslog(type, buf, len, SYSLOG_FROM_READER);
-+}
-+
-+/*
-+ * Call the console drivers, asking them to write out
-+ * log_buf[start] to log_buf[end - 1].
-+ * The console_lock must be held.
-+ */
-+static void call_console_drivers(int level, const char *text, size_t len)
-+{
-+ struct console *con;
-+
-+ trace_console(text, len);
-+
-+ if (level >= console_loglevel && !ignore_loglevel)
-+ return;
-+ if (!console_drivers)
-+ return;
-+
-+ for_each_console(con) {
-+ if (exclusive_console && con != exclusive_console)
-+ continue;
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+ if (!con->write)
-+ continue;
-+ if (!cpu_online(smp_processor_id()) &&
-+ !(con->flags & CON_ANYTIME))
-+ continue;
-+ con->write(con, text, len);
-+ }
-+}
-+
-+/*
-+ * Zap console related locks when oopsing. Only zap at most once
-+ * every 10 seconds, to leave time for slow consoles to print a
-+ * full oops.
-+ */
-+static void zap_locks(void)
-+{
-+ static unsigned long oops_timestamp;
-+
-+ if (time_after_eq(jiffies, oops_timestamp) &&
-+ !time_after(jiffies, oops_timestamp + 30 * HZ))
-+ return;
-+
-+ oops_timestamp = jiffies;
-+
-+ debug_locks_off();
-+ /* If a crash is occurring, make sure we can't deadlock */
-+ raw_spin_lock_init(&logbuf_lock);
-+ /* And make sure that we print immediately */
-+ sema_init(&console_sem, 1);
-+}
-+
-+/* Check if we have any console registered that can be called early in boot. */
-+static int have_callable_console(void)
-+{
-+ struct console *con;
-+
-+ for_each_console(con)
-+ if (con->flags & CON_ANYTIME)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Can we actually use the console at this time on this cpu?
-+ *
-+ * Console drivers may assume that per-cpu resources have
-+ * been allocated. So unless they're explicitly marked as
-+ * being able to cope (CON_ANYTIME) don't call them until
-+ * this CPU is officially up.
-+ */
-+static inline int can_use_console(unsigned int cpu)
-+{
-+ return cpu_online(cpu) || have_callable_console();
-+}
-+
-+/*
-+ * Try to get console ownership to actually show the kernel
-+ * messages from a 'printk'. Return true (and with the
-+ * console_lock held, and 'console_locked' set) if it
-+ * is successful, false otherwise.
-+ *
-+ * This gets called with the 'logbuf_lock' spinlock held and
-+ * interrupts disabled. It should return with 'lockbuf_lock'
-+ * released but interrupts still disabled.
-+ */
-+static int console_trylock_for_printk(unsigned int cpu)
-+ __releases(&logbuf_lock)
-+{
-+ int retval = 0, wake = 0;
-+
-+ if (console_trylock()) {
-+ retval = 1;
-+
-+ /*
-+ * If we can't use the console, we need to release
-+ * the console semaphore by hand to avoid flushing
-+ * the buffer. We need to hold the console semaphore
-+ * in order to do this test safely.
-+ */
-+ if (!can_use_console(cpu)) {
-+ console_locked = 0;
-+ wake = 1;
-+ retval = 0;
-+ }
-+ }
-+ logbuf_cpu = UINT_MAX;
-+ raw_spin_unlock(&logbuf_lock);
-+ if (wake)
-+ up(&console_sem);
-+ return retval;
-+}
-+
-+int printk_delay_msec __read_mostly;
-+
-+static inline void printk_delay(void)
-+{
-+ if (unlikely(printk_delay_msec)) {
-+ int m = printk_delay_msec;
-+
-+ while (m--) {
-+ mdelay(1);
-+ touch_nmi_watchdog();
-+ }
-+ }
-+}
-+
-+/*
-+ * Continuation lines are buffered, and not committed to the record buffer
-+ * until the line is complete, or a race forces it. The line fragments
-+ * though, are printed immediately to the consoles to ensure everything has
-+ * reached the console in case of a kernel crash.
-+ */
-+static struct cont {
-+ char buf[LOG_LINE_MAX];
-+ size_t len; /* length == 0 means unused buffer */
-+ size_t cons; /* bytes written to console */
-+ struct task_struct *owner; /* task of first print*/
-+ u64 ts_nsec; /* time of first print */
-+ u8 level; /* log level of first message */
-+ u8 facility; /* log level of first message */
-+ enum log_flags flags; /* prefix, newline flags */
-+ bool flushed:1; /* buffer sealed and committed */
-+} cont;
-+
-+static void cont_flush(enum log_flags flags)
-+{
-+ if (cont.flushed)
-+ return;
-+ if (cont.len == 0)
-+ return;
-+
-+ if (cont.cons) {
-+ /*
-+ * If a fragment of this line was directly flushed to the
-+ * console; wait for the console to pick up the rest of the
-+ * line. LOG_NOCONS suppresses a duplicated output.
-+ */
-+ log_store(cont.facility, cont.level, flags | LOG_NOCONS,
-+ cont.ts_nsec, NULL, 0, cont.buf, cont.len);
-+ cont.flags = flags;
-+ cont.flushed = true;
-+ } else {
-+ /*
-+ * If no fragment of this line ever reached the console,
-+ * just submit it to the store and free the buffer.
-+ */
-+ log_store(cont.facility, cont.level, flags, 0,
-+ NULL, 0, cont.buf, cont.len);
-+ cont.len = 0;
-+ }
-+}
-+
-+static bool cont_add(int facility, int level, const char *text, size_t len)
-+{
-+ if (cont.len && cont.flushed)
-+ return false;
-+
-+ if (cont.len + len > sizeof(cont.buf)) {
-+ /* the line gets too long, split it up in separate records */
-+ cont_flush(LOG_CONT);
-+ return false;
-+ }
-+
-+ if (!cont.len) {
-+ cont.facility = facility;
-+ cont.level = level;
-+ cont.owner = current;
-+ cont.ts_nsec = local_clock();
-+ cont.flags = 0;
-+ cont.cons = 0;
-+ cont.flushed = false;
-+ }
-+
-+ memcpy(cont.buf + cont.len, text, len);
-+ cont.len += len;
-+
-+ if (cont.len > (sizeof(cont.buf) * 80) / 100)
-+ cont_flush(LOG_CONT);
-+
-+ return true;
-+}
-+
-+static size_t cont_print_text(char *text, size_t size)
-+{
-+ size_t textlen = 0;
-+ size_t len;
-+
-+ if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
-+ textlen += print_time(cont.ts_nsec, text);
-+ size -= textlen;
-+ }
-+
-+ len = cont.len - cont.cons;
-+ if (len > 0) {
-+ if (len+1 > size)
-+ len = size-1;
-+ memcpy(text + textlen, cont.buf + cont.cons, len);
-+ textlen += len;
-+ cont.cons = cont.len;
-+ }
-+
-+ if (cont.flushed) {
-+ if (cont.flags & LOG_NEWLINE)
-+ text[textlen++] = '\n';
-+ /* got everything, release buffer */
-+ cont.len = 0;
-+ }
-+ return textlen;
-+}
-+
-+asmlinkage int vprintk_emit(int facility, int level,
-+ const char *dict, size_t dictlen,
-+ const char *fmt, va_list args)
-+{
-+ static int recursion_bug;
-+ static char textbuf[LOG_LINE_MAX];
-+ char *text = textbuf;
-+ size_t text_len;
-+ enum log_flags lflags = 0;
-+ unsigned long flags;
-+ int this_cpu;
-+ int printed_len = 0;
-+
-+ boot_delay_msec(level);
-+ printk_delay();
-+
-+ /* This stops the holder of console_sem just where we want him */
-+ local_irq_save(flags);
-+ this_cpu = smp_processor_id();
-+
-+ /*
-+ * Ouch, printk recursed into itself!
-+ */
-+ if (unlikely(logbuf_cpu == this_cpu)) {
-+ /*
-+ * If a crash is occurring during printk() on this CPU,
-+ * then try to get the crash message out but make sure
-+ * we can't deadlock. Otherwise just return to avoid the
-+ * recursion and return - but flag the recursion so that
-+ * it can be printed at the next appropriate moment:
-+ */
-+ if (!oops_in_progress && !lockdep_recursing(current)) {
-+ recursion_bug = 1;
-+ goto out_restore_irqs;
-+ }
-+ zap_locks();
-+ }
-+
-+ lockdep_off();
-+ raw_spin_lock(&logbuf_lock);
-+ logbuf_cpu = this_cpu;
-+
-+ if (recursion_bug) {
-+ static const char recursion_msg[] =
-+ "BUG: recent printk recursion!";
-+
-+ recursion_bug = 0;
-+ printed_len += strlen(recursion_msg);
-+ /* emit KERN_CRIT message */
-+ log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
-+ NULL, 0, recursion_msg, printed_len);
-+ }
-+
-+ /*
-+ * The printf needs to come first; we need the syslog
-+ * prefix which might be passed-in as a parameter.
-+ */
-+ text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
-+
-+ /* mark and strip a trailing newline */
-+ if (text_len && text[text_len-1] == '\n') {
-+ text_len--;
-+ lflags |= LOG_NEWLINE;
-+ }
-+
-+ /* strip kernel syslog prefix and extract log level or control flags */
-+ if (facility == 0) {
-+ int kern_level = printk_get_level(text);
-+
-+ if (kern_level) {
-+ const char *end_of_header = printk_skip_level(text);
-+ switch (kern_level) {
-+ case '0' ... '7':
-+ if (level == -1)
-+ level = kern_level - '0';
-+ case 'd': /* KERN_DEFAULT */
-+ lflags |= LOG_PREFIX;
-+ case 'c': /* KERN_CONT */
-+ break;
-+ }
-+ text_len -= end_of_header - text;
-+ text = (char *)end_of_header;
-+ }
-+ }
-+
-+ if (level == -1)
-+ level = default_message_loglevel;
-+
-+ if (dict)
-+ lflags |= LOG_PREFIX|LOG_NEWLINE;
-+
-+ if (!(lflags & LOG_NEWLINE)) {
-+ /*
-+ * Flush the conflicting buffer. An earlier newline was missing,
-+ * or another task also prints continuation lines.
-+ */
-+ if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
-+ cont_flush(LOG_NEWLINE);
-+
-+ /* buffer line if possible, otherwise store it right away */
-+ if (!cont_add(facility, level, text, text_len))
-+ log_store(facility, level, lflags | LOG_CONT, 0,
-+ dict, dictlen, text, text_len);
-+ } else {
-+ bool stored = false;
-+
-+ /*
-+ * If an earlier newline was missing and it was the same task,
-+ * either merge it with the current buffer and flush, or if
-+ * there was a race with interrupts (prefix == true) then just
-+ * flush it out and store this line separately.
-+ * If the preceding printk was from a different task and missed
-+ * a newline, flush and append the newline.
-+ */
-+ if (cont.len) {
-+ if (cont.owner == current && !(lflags & LOG_PREFIX))
-+ stored = cont_add(facility, level, text,
-+ text_len);
-+ cont_flush(LOG_NEWLINE);
-+ }
-+
-+ if (!stored)
-+ log_store(facility, level, lflags, 0,
-+ dict, dictlen, text, text_len);
-+ }
-+ printed_len += text_len;
-+
-+ /*
-+ * Try to acquire and then immediately release the console semaphore.
-+ * The release will print out buffers and wake up /dev/kmsg and syslog()
-+ * users.
-+ *
-+ * The console_trylock_for_printk() function will release 'logbuf_lock'
-+ * regardless of whether it actually gets the console semaphore or not.
-+ */
-+ if (console_trylock_for_printk(this_cpu))
-+ console_unlock();
-+
-+ lockdep_on();
-+out_restore_irqs:
-+ local_irq_restore(flags);
-+
-+ return printed_len;
-+}
-+EXPORT_SYMBOL(vprintk_emit);
-+
-+asmlinkage int vprintk(const char *fmt, va_list args)
-+{
-+ return vprintk_emit(0, -1, NULL, 0, fmt, args);
-+}
-+EXPORT_SYMBOL(vprintk);
-+
-+asmlinkage int printk_emit(int facility, int level,
-+ const char *dict, size_t dictlen,
-+ const char *fmt, ...)
-+{
-+ va_list args;
-+ int r;
-+
-+ va_start(args, fmt);
-+ r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
-+ va_end(args);
-+
-+ return r;
-+}
-+EXPORT_SYMBOL(printk_emit);
-+
-+/**
-+ * printk - print a kernel message
-+ * @fmt: format string
-+ *
-+ * This is printk(). It can be called from any context. We want it to work.
-+ *
-+ * We try to grab the console_lock. If we succeed, it's easy - we log the
-+ * output and call the console drivers. If we fail to get the semaphore, we
-+ * place the output into the log buffer and return. The current holder of
-+ * the console_sem will notice the new output in console_unlock(); and will
-+ * send it to the consoles before releasing the lock.
-+ *
-+ * One effect of this deferred printing is that code which calls printk() and
-+ * then changes console_loglevel may break. This is because console_loglevel
-+ * is inspected when the actual printing occurs.
-+ *
-+ * See also:
-+ * printf(3)
-+ *
-+ * See the vsnprintf() documentation for format string extensions over C99.
-+ */
-+asmlinkage int printk(const char *fmt, ...)
-+{
-+ va_list args;
-+ int r;
-+
-+#ifdef CONFIG_KGDB_KDB
-+ if (unlikely(kdb_trap_printk)) {
-+ va_start(args, fmt);
-+ r = vkdb_printf(fmt, args);
-+ va_end(args);
-+ return r;
-+ }
-+#endif
-+ va_start(args, fmt);
-+ r = vprintk_emit(0, -1, NULL, 0, fmt, args);
-+ va_end(args);
-+
-+ return r;
-+}
-+EXPORT_SYMBOL(printk);
-+
-+#else /* CONFIG_PRINTK */
-+
-+#define LOG_LINE_MAX 0
-+#define PREFIX_MAX 0
-+#define LOG_LINE_MAX 0
-+static u64 syslog_seq;
-+static u32 syslog_idx;
-+static u64 console_seq;
-+static u32 console_idx;
-+static enum log_flags syslog_prev;
-+static u64 log_first_seq;
-+static u32 log_first_idx;
-+static u64 log_next_seq;
-+static enum log_flags console_prev;
-+static struct cont {
-+ size_t len;
-+ size_t cons;
-+ u8 level;
-+ bool flushed:1;
-+} cont;
-+static struct printk_log *log_from_idx(u32 idx) { return NULL; }
-+static u32 log_next(u32 idx) { return 0; }
-+static void call_console_drivers(int level, const char *text, size_t len) {}
-+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
-+ bool syslog, char *buf, size_t size) { return 0; }
-+static size_t cont_print_text(char *text, size_t size) { return 0; }
-+
-+#endif /* CONFIG_PRINTK */
-+
-+#ifdef CONFIG_EARLY_PRINTK
-+struct console *early_console;
-+
-+void early_vprintk(const char *fmt, va_list ap)
-+{
-+ if (early_console) {
-+ char buf[512];
-+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-+
-+ early_console->write(early_console, buf, n);
-+ }
-+}
-+
-+asmlinkage void early_printk(const char *fmt, ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ early_vprintk(fmt, ap);
-+ va_end(ap);
-+}
-+#endif
-+
-+static int __add_preferred_console(char *name, int idx, char *options,
-+ char *brl_options)
-+{
-+ struct console_cmdline *c;
-+ int i;
-+
-+ /*
-+ * See if this tty is not yet registered, and
-+ * if we have a slot free.
-+ */
-+ for (i = 0, c = console_cmdline;
-+ i < MAX_CMDLINECONSOLES && c->name[0];
-+ i++, c++) {
-+ if (strcmp(c->name, name) == 0 && c->index == idx) {
-+ if (!brl_options)
-+ selected_console = i;
-+ return 0;
-+ }
-+ }
-+ if (i == MAX_CMDLINECONSOLES)
-+ return -E2BIG;
-+ if (!brl_options)
-+ selected_console = i;
-+ strlcpy(c->name, name, sizeof(c->name));
-+ c->options = options;
-+ braille_set_options(c, brl_options);
-+
-+ c->index = idx;
-+ return 0;
-+}
-+/*
-+ * Set up a list of consoles. Called from init/main.c
-+ */
-+static int __init console_setup(char *str)
-+{
-+ char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
-+ char *s, *options, *brl_options = NULL;
-+ int idx;
-+
-+ if (_braille_console_setup(&str, &brl_options))
-+ return 1;
-+
-+ /*
-+ * Decode str into name, index, options.
-+ */
-+ if (str[0] >= '0' && str[0] <= '9') {
-+ strcpy(buf, "ttyS");
-+ strncpy(buf + 4, str, sizeof(buf) - 5);
-+ } else {
-+ strncpy(buf, str, sizeof(buf) - 1);
-+ }
-+ buf[sizeof(buf) - 1] = 0;
-+ if ((options = strchr(str, ',')) != NULL)
-+ *(options++) = 0;
-+#ifdef __sparc__
-+ if (!strcmp(str, "ttya"))
-+ strcpy(buf, "ttyS0");
-+ if (!strcmp(str, "ttyb"))
-+ strcpy(buf, "ttyS1");
-+#endif
-+ for (s = buf; *s; s++)
-+ if ((*s >= '0' && *s <= '9') || *s == ',')
-+ break;
-+ idx = simple_strtoul(s, NULL, 10);
-+ *s = 0;
-+
-+ __add_preferred_console(buf, idx, options, brl_options);
-+ console_set_on_cmdline = 1;
-+ return 1;
-+}
-+__setup("console=", console_setup);
-+
-+/**
-+ * add_preferred_console - add a device to the list of preferred consoles.
-+ * @name: device name
-+ * @idx: device index
-+ * @options: options for this console
-+ *
-+ * The last preferred console added will be used for kernel messages
-+ * and stdin/out/err for init. Normally this is used by console_setup
-+ * above to handle user-supplied console arguments; however it can also
-+ * be used by arch-specific code either to override the user or more
-+ * commonly to provide a default console (ie from PROM variables) when
-+ * the user has not supplied one.
-+ */
-+int add_preferred_console(char *name, int idx, char *options)
-+{
-+ return __add_preferred_console(name, idx, options, NULL);
-+}
-+
-+int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
-+{
-+ struct console_cmdline *c;
-+ int i;
-+
-+ for (i = 0, c = console_cmdline;
-+ i < MAX_CMDLINECONSOLES && c->name[0];
-+ i++, c++)
-+ if (strcmp(c->name, name) == 0 && c->index == idx) {
-+ strlcpy(c->name, name_new, sizeof(c->name));
-+ c->name[sizeof(c->name) - 1] = 0;
-+ c->options = options;
-+ c->index = idx_new;
-+ return i;
-+ }
-+ /* not found */
-+ return -1;
-+}
-+
-+bool console_suspend_enabled = 1;
-+EXPORT_SYMBOL(console_suspend_enabled);
-+
-+static int __init console_suspend_disable(char *str)
-+{
-+ console_suspend_enabled = 0;
-+ return 1;
-+}
-+__setup("no_console_suspend", console_suspend_disable);
-+module_param_named(console_suspend, console_suspend_enabled,
-+ bool, S_IRUGO | S_IWUSR);
-+MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
-+ " and hibernate operations");
-+
-+/**
-+ * suspend_console - suspend the console subsystem
-+ *
-+ * This disables printk() while we go into suspend states
-+ */
-+void suspend_console(void)
-+{
-+ if (!console_suspend_enabled)
-+ return;
-+ printk("Suspending console(s) (use no_console_suspend to debug)\n");
-+ console_lock();
-+ console_suspended = 1;
-+ up(&console_sem);
-+}
-+
-+void resume_console(void)
-+{
-+ if (!console_suspend_enabled)
-+ return;
-+ down(&console_sem);
-+ console_suspended = 0;
-+ console_unlock();
-+}
-+
-+/**
-+ * console_cpu_notify - print deferred console messages after CPU hotplug
-+ * @self: notifier struct
-+ * @action: CPU hotplug event
-+ * @hcpu: unused
-+ *
-+ * If printk() is called from a CPU that is not online yet, the messages
-+ * will be spooled but will not show up on the console. This function is
-+ * called when a new CPU comes online (or fails to come up), and ensures
-+ * that any such output gets printed.
-+ */
-+static int console_cpu_notify(struct notifier_block *self,
-+ unsigned long action, void *hcpu)
-+{
-+ switch (action) {
-+ case CPU_ONLINE:
-+ case CPU_DEAD:
-+ case CPU_DOWN_FAILED:
-+ case CPU_UP_CANCELED:
-+ console_lock();
-+ console_unlock();
-+ }
-+ return NOTIFY_OK;
-+}
-+
-+/**
-+ * console_lock - lock the console system for exclusive use.
-+ *
-+ * Acquires a lock which guarantees that the caller has
-+ * exclusive access to the console system and the console_drivers list.
-+ *
-+ * Can sleep, returns nothing.
-+ */
-+void console_lock(void)
-+{
-+ might_sleep();
-+
-+ down(&console_sem);
-+ if (console_suspended)
-+ return;
-+ console_locked = 1;
-+ console_may_schedule = 1;
-+ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
-+}
-+EXPORT_SYMBOL(console_lock);
-+
-+/**
-+ * console_trylock - try to lock the console system for exclusive use.
-+ *
-+ * Tried to acquire a lock which guarantees that the caller has
-+ * exclusive access to the console system and the console_drivers list.
-+ *
-+ * returns 1 on success, and 0 on failure to acquire the lock.
-+ */
-+int console_trylock(void)
-+{
-+ if (down_trylock(&console_sem))
-+ return 0;
-+ if (console_suspended) {
-+ up(&console_sem);
-+ return 0;
-+ }
-+ console_locked = 1;
-+ console_may_schedule = 0;
-+ mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
-+ return 1;
-+}
-+EXPORT_SYMBOL(console_trylock);
-+
-+int is_console_locked(void)
-+{
-+ return console_locked;
-+}
-+
-+static void console_cont_flush(char *text, size_t size)
-+{
-+ unsigned long flags;
-+ size_t len;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+
-+ if (!cont.len)
-+ goto out;
-+
-+ /*
-+ * We still queue earlier records, likely because the console was
-+ * busy. The earlier ones need to be printed before this one, we
-+ * did not flush any fragment so far, so just let it queue up.
-+ */
-+ if (console_seq < log_next_seq && !cont.cons)
-+ goto out;
-+
-+ len = cont_print_text(text, size);
-+ raw_spin_unlock(&logbuf_lock);
-+ stop_critical_timings();
-+ call_console_drivers(cont.level, text, len);
-+ start_critical_timings();
-+ local_irq_restore(flags);
-+ return;
-+out:
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+}
-+
-+/**
-+ * console_unlock - unlock the console system
-+ *
-+ * Releases the console_lock which the caller holds on the console system
-+ * and the console driver list.
-+ *
-+ * While the console_lock was held, console output may have been buffered
-+ * by printk(). If this is the case, console_unlock(); emits
-+ * the output prior to releasing the lock.
-+ *
-+ * If there is output waiting, we wake /dev/kmsg and syslog() users.
-+ *
-+ * console_unlock(); may be called from any context.
-+ */
-+void console_unlock(void)
-+{
-+ static char text[LOG_LINE_MAX + PREFIX_MAX];
-+ static u64 seen_seq;
-+ unsigned long flags;
-+ bool wake_klogd = false;
-+ bool retry;
-+
-+ if (console_suspended) {
-+ up(&console_sem);
-+ return;
-+ }
-+
-+ console_may_schedule = 0;
-+
-+ /* flush buffered message fragment immediately to console */
-+ console_cont_flush(text, sizeof(text));
-+again:
-+ for (;;) {
-+ struct printk_log *msg;
-+ size_t len;
-+ int level;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ if (seen_seq != log_next_seq) {
-+ wake_klogd = true;
-+ seen_seq = log_next_seq;
-+ }
-+
-+ if (console_seq < log_first_seq) {
-+ /* messages are gone, move to first one */
-+ console_seq = log_first_seq;
-+ console_idx = log_first_idx;
-+ console_prev = 0;
-+ }
-+skip:
-+ if (console_seq == log_next_seq)
-+ break;
-+
-+ msg = log_from_idx(console_idx);
-+ if (msg->flags & LOG_NOCONS) {
-+ /*
-+ * Skip record we have buffered and already printed
-+ * directly to the console when we received it.
-+ */
-+ console_idx = log_next(console_idx);
-+ console_seq++;
-+ /*
-+ * We will get here again when we register a new
-+ * CON_PRINTBUFFER console. Clear the flag so we
-+ * will properly dump everything later.
-+ */
-+ msg->flags &= ~LOG_NOCONS;
-+ console_prev = msg->flags;
-+ goto skip;
-+ }
-+
-+ level = msg->level;
-+ len = msg_print_text(msg, console_prev, false,
-+ text, sizeof(text));
-+ console_idx = log_next(console_idx);
-+ console_seq++;
-+ console_prev = msg->flags;
-+ raw_spin_unlock(&logbuf_lock);
-+
-+ stop_critical_timings(); /* don't trace print latency */
-+ call_console_drivers(level, text, len);
-+ start_critical_timings();
-+ local_irq_restore(flags);
-+ }
-+ console_locked = 0;
-+ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
-+
-+ /* Release the exclusive_console once it is used */
-+ if (unlikely(exclusive_console))
-+ exclusive_console = NULL;
-+
-+ raw_spin_unlock(&logbuf_lock);
-+
-+ up(&console_sem);
-+
-+ /*
-+ * Someone could have filled up the buffer again, so re-check if there's
-+ * something to flush. In case we cannot trylock the console_sem again,
-+ * there's a new owner and the console_unlock() from them will do the
-+ * flush, no worries.
-+ */
-+ raw_spin_lock(&logbuf_lock);
-+ retry = console_seq != log_next_seq;
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+
-+ if (retry && console_trylock())
-+ goto again;
-+
-+ if (wake_klogd)
-+ wake_up_klogd();
-+}
-+EXPORT_SYMBOL(console_unlock);
-+
-+/**
-+ * console_conditional_schedule - yield the CPU if required
-+ *
-+ * If the console code is currently allowed to sleep, and
-+ * if this CPU should yield the CPU to another task, do
-+ * so here.
-+ *
-+ * Must be called within console_lock();.
-+ */
-+void __sched console_conditional_schedule(void)
-+{
-+ if (console_may_schedule)
-+ cond_resched();
-+}
-+EXPORT_SYMBOL(console_conditional_schedule);
-+
-+void console_unblank(void)
-+{
-+ struct console *c;
-+
-+ /*
-+ * console_unblank can no longer be called in interrupt context unless
-+ * oops_in_progress is set to 1..
-+ */
-+ if (oops_in_progress) {
-+ if (down_trylock(&console_sem) != 0)
-+ return;
-+ } else
-+ console_lock();
-+
-+ console_locked = 1;
-+ console_may_schedule = 0;
-+ for_each_console(c)
-+ if ((c->flags & CON_ENABLED) && c->unblank)
-+ c->unblank();
-+ console_unlock();
-+}
-+
-+/*
-+ * Return the console tty driver structure and its associated index
-+ */
-+struct tty_driver *console_device(int *index)
-+{
-+ struct console *c;
-+ struct tty_driver *driver = NULL;
-+
-+ console_lock();
-+ for_each_console(c) {
-+ if (!c->device)
-+ continue;
-+ driver = c->device(c, index);
-+ if (driver)
-+ break;
-+ }
-+ console_unlock();
-+ return driver;
-+}
-+
-+/*
-+ * Prevent further output on the passed console device so that (for example)
-+ * serial drivers can disable console output before suspending a port, and can
-+ * re-enable output afterwards.
-+ */
-+void console_stop(struct console *console)
-+{
-+ console_lock();
-+ console->flags &= ~CON_ENABLED;
-+ console_unlock();
-+}
-+EXPORT_SYMBOL(console_stop);
-+
-+void console_start(struct console *console)
-+{
-+ console_lock();
-+ console->flags |= CON_ENABLED;
-+ console_unlock();
-+}
-+EXPORT_SYMBOL(console_start);
-+
-+static int __read_mostly keep_bootcon;
-+
-+static int __init keep_bootcon_setup(char *str)
-+{
-+ keep_bootcon = 1;
-+ pr_info("debug: skip boot console de-registration.\n");
-+
-+ return 0;
-+}
-+
-+early_param("keep_bootcon", keep_bootcon_setup);
-+
-+/*
-+ * The console driver calls this routine during kernel initialization
-+ * to register the console printing procedure with printk() and to
-+ * print any messages that were printed by the kernel before the
-+ * console driver was initialized.
-+ *
-+ * This can happen pretty early during the boot process (because of
-+ * early_printk) - sometimes before setup_arch() completes - be careful
-+ * of what kernel features are used - they may not be initialised yet.
-+ *
-+ * There are two types of consoles - bootconsoles (early_printk) and
-+ * "real" consoles (everything which is not a bootconsole) which are
-+ * handled differently.
-+ * - Any number of bootconsoles can be registered at any time.
-+ * - As soon as a "real" console is registered, all bootconsoles
-+ * will be unregistered automatically.
-+ * - Once a "real" console is registered, any attempt to register a
-+ * bootconsoles will be rejected
-+ */
-+void register_console(struct console *newcon)
-+{
-+ int i;
-+ unsigned long flags;
-+ struct console *bcon = NULL;
-+ struct console_cmdline *c;
-+
-+ if (console_drivers)
-+ for_each_console(bcon)
-+ if (WARN(bcon == newcon,
-+ "console '%s%d' already registered\n",
-+ bcon->name, bcon->index))
-+ return;
-+
-+ /*
-+ * before we register a new CON_BOOT console, make sure we don't
-+ * already have a valid console
-+ */
-+ if (console_drivers && newcon->flags & CON_BOOT) {
-+ /* find the last or real console */
-+ for_each_console(bcon) {
-+ if (!(bcon->flags & CON_BOOT)) {
-+ pr_info("Too late to register bootconsole %s%d\n",
-+ newcon->name, newcon->index);
-+ return;
-+ }
-+ }
-+ }
-+
-+ if (console_drivers && console_drivers->flags & CON_BOOT)
-+ bcon = console_drivers;
-+
-+ if (preferred_console < 0 || bcon || !console_drivers)
-+ preferred_console = selected_console;
-+
-+ if (newcon->early_setup)
-+ newcon->early_setup();
-+
-+ /*
-+ * See if we want to use this console driver. If we
-+ * didn't select a console we take the first one
-+ * that registers here.
-+ */
-+ if (preferred_console < 0) {
-+ if (newcon->index < 0)
-+ newcon->index = 0;
-+ if (newcon->setup == NULL ||
-+ newcon->setup(newcon, NULL) == 0) {
-+ newcon->flags |= CON_ENABLED;
-+ if (newcon->device) {
-+ newcon->flags |= CON_CONSDEV;
-+ preferred_console = 0;
-+ }
-+ }
-+ }
-+
-+ /*
-+ * See if this console matches one we selected on
-+ * the command line.
-+ */
-+ for (i = 0, c = console_cmdline;
-+ i < MAX_CMDLINECONSOLES && c->name[0];
-+ i++, c++) {
-+ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
-+ if (strcmp(c->name, newcon->name) != 0)
-+ continue;
-+ if (newcon->index >= 0 &&
-+ newcon->index != c->index)
-+ continue;
-+ if (newcon->index < 0)
-+ newcon->index = c->index;
-+
-+ if (_braille_register_console(newcon, c))
-+ return;
-+
-+ if (newcon->setup &&
-+ newcon->setup(newcon, console_cmdline[i].options) != 0)
-+ break;
-+ newcon->flags |= CON_ENABLED;
-+ newcon->index = c->index;
-+ if (i == selected_console) {
-+ newcon->flags |= CON_CONSDEV;
-+ preferred_console = selected_console;
-+ }
-+ break;
-+ }
-+
-+ if (!(newcon->flags & CON_ENABLED))
-+ return;
-+
-+ /*
-+ * If we have a bootconsole, and are switching to a real console,
-+ * don't print everything out again, since when the boot console, and
-+ * the real console are the same physical device, it's annoying to
-+ * see the beginning boot messages twice
-+ */
-+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
-+ newcon->flags &= ~CON_PRINTBUFFER;
-+
-+ /*
-+ * Put this console in the list - keep the
-+ * preferred driver at the head of the list.
-+ */
-+ console_lock();
-+ if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
-+ newcon->next = console_drivers;
-+ console_drivers = newcon;
-+ if (newcon->next)
-+ newcon->next->flags &= ~CON_CONSDEV;
-+ } else {
-+ newcon->next = console_drivers->next;
-+ console_drivers->next = newcon;
-+ }
-+ if (newcon->flags & CON_PRINTBUFFER) {
-+ /*
-+ * console_unlock(); will print out the buffered messages
-+ * for us.
-+ */
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ console_seq = syslog_seq;
-+ console_idx = syslog_idx;
-+ console_prev = syslog_prev;
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ /*
-+ * We're about to replay the log buffer. Only do this to the
-+ * just-registered console to avoid excessive message spam to
-+ * the already-registered consoles.
-+ */
-+ exclusive_console = newcon;
-+ }
-+ console_unlock();
-+ console_sysfs_notify();
-+
-+ /*
-+ * By unregistering the bootconsoles after we enable the real console
-+ * we get the "console xxx enabled" message on all the consoles -
-+ * boot consoles, real consoles, etc - this is to ensure that end
-+ * users know there might be something in the kernel's log buffer that
-+ * went to the bootconsole (that they do not see on the real console)
-+ */
-+ pr_info("%sconsole [%s%d] enabled\n",
-+ (newcon->flags & CON_BOOT) ? "boot" : "" ,
-+ newcon->name, newcon->index);
-+ if (bcon &&
-+ ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
-+ !keep_bootcon) {
-+ /* We need to iterate through all boot consoles, to make
-+ * sure we print everything out, before we unregister them.
-+ */
-+ for_each_console(bcon)
-+ if (bcon->flags & CON_BOOT)
-+ unregister_console(bcon);
-+ }
-+}
-+EXPORT_SYMBOL(register_console);
-+
-+int unregister_console(struct console *console)
-+{
-+ struct console *a, *b;
-+ int res;
-+
-+ pr_info("%sconsole [%s%d] disabled\n",
-+ (console->flags & CON_BOOT) ? "boot" : "" ,
-+ console->name, console->index);
-+
-+ res = _braille_unregister_console(console);
-+ if (res)
-+ return res;
-+
-+ res = 1;
-+ console_lock();
-+ if (console_drivers == console) {
-+ console_drivers=console->next;
-+ res = 0;
-+ } else if (console_drivers) {
-+ for (a=console_drivers->next, b=console_drivers ;
-+ a; b=a, a=b->next) {
-+ if (a == console) {
-+ b->next = a->next;
-+ res = 0;
-+ break;
-+ }
-+ }
-+ }
-+
-+ /*
-+ * If this isn't the last console and it has CON_CONSDEV set, we
-+ * need to set it on the next preferred console.
-+ */
-+ if (console_drivers != NULL && console->flags & CON_CONSDEV)
-+ console_drivers->flags |= CON_CONSDEV;
-+
-+ console_unlock();
-+ console_sysfs_notify();
-+ return res;
-+}
-+EXPORT_SYMBOL(unregister_console);
-+
-+static int __init printk_late_init(void)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ if (!keep_bootcon && con->flags & CON_BOOT) {
-+ unregister_console(con);
-+ }
-+ }
-+ hotcpu_notifier(console_cpu_notify, 0);
-+ return 0;
-+}
-+late_initcall(printk_late_init);
-+
-+#if defined CONFIG_PRINTK
-+/*
-+ * Delayed printk version, for scheduler-internal messages:
-+ */
-+#define PRINTK_BUF_SIZE 512
-+
-+#define PRINTK_PENDING_WAKEUP 0x01
-+#define PRINTK_PENDING_SCHED 0x02
-+
-+static DEFINE_PER_CPU(int, printk_pending);
-+static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
-+
-+static void wake_up_klogd_work_func(struct irq_work *irq_work)
-+{
-+ int pending = __this_cpu_xchg(printk_pending, 0);
-+
-+ if (pending & PRINTK_PENDING_SCHED) {
-+ char *buf = __get_cpu_var(printk_sched_buf);
-+ pr_warn("[sched_delayed] %s", buf);
-+ }
-+
-+ if (pending & PRINTK_PENDING_WAKEUP)
-+ wake_up_interruptible(&log_wait);
-+}
-+
-+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
-+ .func = wake_up_klogd_work_func,
-+ .flags = IRQ_WORK_LAZY,
-+};
-+
-+void wake_up_klogd(void)
-+{
-+ preempt_disable();
-+ if (waitqueue_active(&log_wait)) {
-+ this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
-+ irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
-+ }
-+ preempt_enable();
-+}
-+
-+int printk_deferred(const char *fmt, ...)
-+{
-+ unsigned long flags;
-+ va_list args;
-+ char *buf;
-+ int r;
-+
-+ local_irq_save(flags);
-+ buf = __get_cpu_var(printk_sched_buf);
-+
-+ va_start(args, fmt);
-+ r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
-+ va_end(args);
-+
-+ __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
-+ irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
-+ local_irq_restore(flags);
-+
-+ return r;
-+}
-+
-+/*
-+ * printk rate limiting, lifted from the networking subsystem.
-+ *
-+ * This enforces a rate limit: not more than 10 kernel messages
-+ * every 5s to make a denial-of-service attack impossible.
-+ */
-+DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
-+
-+int __printk_ratelimit(const char *func)
-+{
-+ return ___ratelimit(&printk_ratelimit_state, func);
-+}
-+EXPORT_SYMBOL(__printk_ratelimit);
-+
-+/**
-+ * printk_timed_ratelimit - caller-controlled printk ratelimiting
-+ * @caller_jiffies: pointer to caller's state
-+ * @interval_msecs: minimum interval between prints
-+ *
-+ * printk_timed_ratelimit() returns true if more than @interval_msecs
-+ * milliseconds have elapsed since the last time printk_timed_ratelimit()
-+ * returned true.
-+ */
-+bool printk_timed_ratelimit(unsigned long *caller_jiffies,
-+ unsigned int interval_msecs)
-+{
-+ if (*caller_jiffies == 0
-+ || !time_in_range(jiffies, *caller_jiffies,
-+ *caller_jiffies
-+ + msecs_to_jiffies(interval_msecs))) {
-+ *caller_jiffies = jiffies;
-+ return true;
-+ }
-+ return false;
-+}
-+EXPORT_SYMBOL(printk_timed_ratelimit);
-+
-+static DEFINE_SPINLOCK(dump_list_lock);
-+static LIST_HEAD(dump_list);
-+
-+/**
-+ * kmsg_dump_register - register a kernel log dumper.
-+ * @dumper: pointer to the kmsg_dumper structure
-+ *
-+ * Adds a kernel log dumper to the system. The dump callback in the
-+ * structure will be called when the kernel oopses or panics and must be
-+ * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
-+ */
-+int kmsg_dump_register(struct kmsg_dumper *dumper)
-+{
-+ unsigned long flags;
-+ int err = -EBUSY;
-+
-+ /* The dump callback needs to be set */
-+ if (!dumper->dump)
-+ return -EINVAL;
-+
-+ spin_lock_irqsave(&dump_list_lock, flags);
-+ /* Don't allow registering multiple times */
-+ if (!dumper->registered) {
-+ dumper->registered = 1;
-+ list_add_tail_rcu(&dumper->list, &dump_list);
-+ err = 0;
-+ }
-+ spin_unlock_irqrestore(&dump_list_lock, flags);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(kmsg_dump_register);
-+
-+/**
-+ * kmsg_dump_unregister - unregister a kmsg dumper.
-+ * @dumper: pointer to the kmsg_dumper structure
-+ *
-+ * Removes a dump device from the system. Returns zero on success and
-+ * %-EINVAL otherwise.
-+ */
-+int kmsg_dump_unregister(struct kmsg_dumper *dumper)
-+{
-+ unsigned long flags;
-+ int err = -EINVAL;
-+
-+ spin_lock_irqsave(&dump_list_lock, flags);
-+ if (dumper->registered) {
-+ dumper->registered = 0;
-+ list_del_rcu(&dumper->list);
-+ err = 0;
-+ }
-+ spin_unlock_irqrestore(&dump_list_lock, flags);
-+ synchronize_rcu();
-+
-+ return err;
-+}
-+EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
-+
-+static bool always_kmsg_dump;
-+module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
-+
-+/**
-+ * kmsg_dump - dump kernel log to kernel message dumpers.
-+ * @reason: the reason (oops, panic etc) for dumping
-+ *
-+ * Call each of the registered dumper's dump() callback, which can
-+ * retrieve the kmsg records with kmsg_dump_get_line() or
-+ * kmsg_dump_get_buffer().
-+ */
-+void kmsg_dump(enum kmsg_dump_reason reason)
-+{
-+ struct kmsg_dumper *dumper;
-+ unsigned long flags;
-+
-+ if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
-+ return;
-+
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(dumper, &dump_list, list) {
-+ if (dumper->max_reason && reason > dumper->max_reason)
-+ continue;
-+
-+ /* initialize iterator with data about the stored records */
-+ dumper->active = true;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ dumper->cur_seq = clear_seq;
-+ dumper->cur_idx = clear_idx;
-+ dumper->next_seq = log_next_seq;
-+ dumper->next_idx = log_next_idx;
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+
-+ /* invoke dumper which will iterate over records */
-+ dumper->dump(dumper, reason);
-+
-+ /* reset iterator */
-+ dumper->active = false;
-+ }
-+ rcu_read_unlock();
-+}
-+
-+/**
-+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
-+ * @dumper: registered kmsg dumper
-+ * @syslog: include the "<4>" prefixes
-+ * @line: buffer to copy the line to
-+ * @size: maximum size of the buffer
-+ * @len: length of line placed into buffer
-+ *
-+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
-+ * record, and copy one record into the provided buffer.
-+ *
-+ * Consecutive calls will return the next available record moving
-+ * towards the end of the buffer with the youngest messages.
-+ *
-+ * A return value of FALSE indicates that there are no more records to
-+ * read.
-+ *
-+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
-+ */
-+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
-+ char *line, size_t size, size_t *len)
-+{
-+ struct printk_log *msg;
-+ size_t l = 0;
-+ bool ret = false;
-+
-+ if (!dumper->active)
-+ goto out;
-+
-+ if (dumper->cur_seq < log_first_seq) {
-+ /* messages are gone, move to first available one */
-+ dumper->cur_seq = log_first_seq;
-+ dumper->cur_idx = log_first_idx;
-+ }
-+
-+ /* last entry */
-+ if (dumper->cur_seq >= log_next_seq)
-+ goto out;
-+
-+ msg = log_from_idx(dumper->cur_idx);
-+ l = msg_print_text(msg, 0, syslog, line, size);
-+
-+ dumper->cur_idx = log_next(dumper->cur_idx);
-+ dumper->cur_seq++;
-+ ret = true;
-+out:
-+ if (len)
-+ *len = l;
-+ return ret;
-+}
-+
-+/**
-+ * kmsg_dump_get_line - retrieve one kmsg log line
-+ * @dumper: registered kmsg dumper
-+ * @syslog: include the "<4>" prefixes
-+ * @line: buffer to copy the line to
-+ * @size: maximum size of the buffer
-+ * @len: length of line placed into buffer
-+ *
-+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
-+ * record, and copy one record into the provided buffer.
-+ *
-+ * Consecutive calls will return the next available record moving
-+ * towards the end of the buffer with the youngest messages.
-+ *
-+ * A return value of FALSE indicates that there are no more records to
-+ * read.
-+ */
-+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
-+ char *line, size_t size, size_t *len)
-+{
-+ unsigned long flags;
-+ bool ret;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
-+
-+/**
-+ * kmsg_dump_get_buffer - copy kmsg log lines
-+ * @dumper: registered kmsg dumper
-+ * @syslog: include the "<4>" prefixes
-+ * @buf: buffer to copy the line to
-+ * @size: maximum size of the buffer
-+ * @len: length of line placed into buffer
-+ *
-+ * Start at the end of the kmsg buffer and fill the provided buffer
-+ * with as many of the the *youngest* kmsg records that fit into it.
-+ * If the buffer is large enough, all available kmsg records will be
-+ * copied with a single call.
-+ *
-+ * Consecutive calls will fill the buffer with the next block of
-+ * available older records, not including the earlier retrieved ones.
-+ *
-+ * A return value of FALSE indicates that there are no more records to
-+ * read.
-+ */
-+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
-+ char *buf, size_t size, size_t *len)
-+{
-+ unsigned long flags;
-+ u64 seq;
-+ u32 idx;
-+ u64 next_seq;
-+ u32 next_idx;
-+ enum log_flags prev;
-+ size_t l = 0;
-+ bool ret = false;
-+
-+ if (!dumper->active)
-+ goto out;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ if (dumper->cur_seq < log_first_seq) {
-+ /* messages are gone, move to first available one */
-+ dumper->cur_seq = log_first_seq;
-+ dumper->cur_idx = log_first_idx;
-+ }
-+
-+ /* last entry */
-+ if (dumper->cur_seq >= dumper->next_seq) {
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ goto out;
-+ }
-+
-+ /* calculate length of entire buffer */
-+ seq = dumper->cur_seq;
-+ idx = dumper->cur_idx;
-+ prev = 0;
-+ while (seq < dumper->next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ l += msg_print_text(msg, prev, true, NULL, 0);
-+ idx = log_next(idx);
-+ seq++;
-+ prev = msg->flags;
-+ }
-+
-+ /* move first record forward until length fits into the buffer */
-+ seq = dumper->cur_seq;
-+ idx = dumper->cur_idx;
-+ prev = 0;
-+ while (l > size && seq < dumper->next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ l -= msg_print_text(msg, prev, true, NULL, 0);
-+ idx = log_next(idx);
-+ seq++;
-+ prev = msg->flags;
-+ }
-+
-+ /* last message in next interation */
-+ next_seq = seq;
-+ next_idx = idx;
-+
-+ l = 0;
-+ while (seq < dumper->next_seq) {
-+ struct printk_log *msg = log_from_idx(idx);
-+
-+ l += msg_print_text(msg, prev, syslog, buf + l, size - l);
-+ idx = log_next(idx);
-+ seq++;
-+ prev = msg->flags;
-+ }
-+
-+ dumper->next_seq = next_seq;
-+ dumper->next_idx = next_idx;
-+ ret = true;
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+out:
-+ if (len)
-+ *len = l;
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
-+
-+/**
-+ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
-+ * @dumper: registered kmsg dumper
-+ *
-+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
-+ * kmsg_dump_get_buffer() can be called again and used multiple
-+ * times within the same dumper.dump() callback.
-+ *
-+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
-+ */
-+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
-+{
-+ dumper->cur_seq = clear_seq;
-+ dumper->cur_idx = clear_idx;
-+ dumper->next_seq = log_next_seq;
-+ dumper->next_idx = log_next_idx;
-+}
-+
-+/**
-+ * kmsg_dump_rewind - reset the interator
-+ * @dumper: registered kmsg dumper
-+ *
-+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
-+ * kmsg_dump_get_buffer() can be called again and used multiple
-+ * times within the same dumper.dump() callback.
-+ */
-+void kmsg_dump_rewind(struct kmsg_dumper *dumper)
-+{
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
-+ kmsg_dump_rewind_nolock(dumper);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
-+
-+static char dump_stack_arch_desc_str[128];
-+
-+/**
-+ * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
-+ * @fmt: printf-style format string
-+ * @...: arguments for the format string
-+ *
-+ * The configured string will be printed right after utsname during task
-+ * dumps. Usually used to add arch-specific system identifiers. If an
-+ * arch wants to make use of such an ID string, it should initialize this
-+ * as soon as possible during boot.
-+ */
-+void __init dump_stack_set_arch_desc(const char *fmt, ...)
-+{
-+ va_list args;
-+
-+ va_start(args, fmt);
-+ vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
-+ fmt, args);
-+ va_end(args);
-+}
-+
-+/**
-+ * dump_stack_print_info - print generic debug info for dump_stack()
-+ * @log_lvl: log level
-+ *
-+ * Arch-specific dump_stack() implementations can use this function to
-+ * print out the same debug information as the generic dump_stack().
-+ */
-+void dump_stack_print_info(const char *log_lvl)
-+{
-+ printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
-+ log_lvl, raw_smp_processor_id(), current->pid, current->comm,
-+ print_tainted(), init_utsname()->release,
-+ (int)strcspn(init_utsname()->version, " "),
-+ init_utsname()->version);
-+
-+ if (dump_stack_arch_desc_str[0] != '\0')
-+ printk("%sHardware name: %s\n",
-+ log_lvl, dump_stack_arch_desc_str);
-+
-+ print_worker_info(log_lvl, current);
-+}
-+
-+/**
-+ * show_regs_print_info - print generic debug info for show_regs()
-+ * @log_lvl: log level
-+ *
-+ * show_regs() implementations can use this function to print out generic
-+ * debug information.
-+ */
-+void show_regs_print_info(const char *log_lvl)
-+{
-+ dump_stack_print_info(log_lvl);
-+
-+ printk("%stask: %p ti: %p task.ti: %p\n",
-+ log_lvl, current, current_thread_info(),
-+ task_thread_info(current));
-+}
-+
-+#endif
-diff --git a/kernel/profile.c b/kernel/profile.c
-index 76b8e77..a2930e8 100644
---- a/kernel/profile.c
-+++ b/kernel/profile.c
-@@ -39,7 +39,7 @@ struct profile_hit {
- /* Oprofile timer tick hook */
- static int (*timer_hook)(struct pt_regs *) __read_mostly;
-
--static atomic_t *prof_buffer;
-+static atomic_unchecked_t *prof_buffer;
- static unsigned long prof_len, prof_shift;
-
- int prof_on __read_mostly;
-@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
- hits[i].pc = 0;
- continue;
- }
-- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
-+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
- hits[i].hits = hits[i].pc = 0;
- }
- }
-@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
- * Add the current hit(s) and flush the write-queue out
- * to the global buffer:
- */
-- atomic_add(nr_hits, &prof_buffer[pc]);
-+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
- for (i = 0; i < NR_PROFILE_HIT; ++i) {
-- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
-+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
- hits[i].pc = hits[i].hits = 0;
- }
- out:
-@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
- {
- unsigned long pc;
- pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
-+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
- }
- #endif /* !CONFIG_SMP */
-
-@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
- return -EFAULT;
- buf++; p++; count--; read++;
- }
-- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
-+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
- if (copy_to_user(buf, (void *)pnt, count))
- return -EFAULT;
- read += count;
-@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
- }
- #endif
- profile_discard_flip_buffers();
-- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
-+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
- return count;
- }
-
-diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index f79803a..0dcc1be 100644
---- a/kernel/ptrace.c
-+++ b/kernel/ptrace.c
-@@ -211,7 +211,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
- return ret;
- }
-
--int __ptrace_may_access(struct task_struct *task, unsigned int mode)
-+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
-+ unsigned int log)
- {
- const struct cred *cred = current_cred(), *tcred;
-
-@@ -237,7 +238,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
- cred->gid == tcred->sgid &&
- cred->gid == tcred->gid))
- goto ok;
-- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
-+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
-+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
- goto ok;
- rcu_read_unlock();
- return -EPERM;
-@@ -247,7 +249,8 @@ ok:
- if (task->mm)
- dumpable = get_dumpable(task->mm);
- if (dumpable != SUID_DUMP_USER &&
-- !task_ns_capable(task, CAP_SYS_PTRACE))
-+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
-+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
- return -EPERM;
-
- return security_ptrace_access_check(task, mode);
-@@ -257,7 +260,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
- {
- int err;
- task_lock(task);
-- err = __ptrace_may_access(task, mode);
-+ err = __ptrace_may_access(task, mode, 0);
-+ task_unlock(task);
-+ return !err;
-+}
-+
-+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
-+{
-+ return __ptrace_may_access(task, mode, 0);
-+}
-+
-+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
-+{
-+ int err;
-+ task_lock(task);
-+ err = __ptrace_may_access(task, mode, 1);
- task_unlock(task);
- return !err;
- }
-@@ -302,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
- goto out;
-
- task_lock(task);
-- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
-+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
- task_unlock(task);
- if (retval)
- goto unlock_creds;
-@@ -317,7 +334,7 @@ static int ptrace_attach(struct task_struct *task, long request,
- task->ptrace = PT_PTRACED;
- if (seize)
- task->ptrace |= PT_SEIZED;
-- if (task_ns_capable(task, CAP_SYS_PTRACE))
-+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
- task->ptrace |= PT_PTRACE_CAP;
-
- __ptrace_link(task, current);
-@@ -523,7 +540,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
- break;
- return -EIO;
- }
-- if (copy_to_user(dst, buf, retval))
-+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
- return -EFAULT;
- copied += retval;
- src += retval;
-@@ -583,6 +600,9 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data)
- if (data & PTRACE_O_TRACEEXIT)
- child->ptrace |= PT_TRACE_EXIT;
-
-+ if (data & PTRACE_O_TRACESECCOMP)
-+ child->ptrace |= PT_TRACE_SECCOMP;
-+
- return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
- }
-
-@@ -720,7 +740,7 @@ int ptrace_request(struct task_struct *child, long request,
- bool seized = child->ptrace & PT_SEIZED;
- int ret = -EIO;
- siginfo_t siginfo, *si;
-- void __user *datavp = (void __user *) data;
-+ void __user *datavp = (__force void __user *) data;
- unsigned long __user *datalp = datavp;
- unsigned long flags;
-
-@@ -922,14 +942,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
- goto out;
- }
-
-+ if (gr_handle_ptrace(child, request)) {
-+ ret = -EPERM;
-+ goto out_put_task_struct;
-+ }
-+
- if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
- ret = ptrace_attach(child, request, data);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
-- if (!ret)
-+ if (!ret) {
- arch_ptrace_attach(child);
-+ gr_audit_ptrace(child);
-+ }
- goto out_put_task_struct;
- }
-
-@@ -957,7 +984,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
- if (copied != sizeof(tmp))
- return -EIO;
-- return put_user(tmp, (unsigned long __user *)data);
-+ return put_user(tmp, (__force unsigned long __user *)data);
- }
-
- int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
-@@ -1051,7 +1078,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
- }
-
- asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-- compat_long_t addr, compat_long_t data)
-+ compat_ulong_t addr, compat_ulong_t data)
- {
- struct task_struct *child;
- long ret;
-@@ -1067,14 +1094,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- goto out;
- }
-
-+ if (gr_handle_ptrace(child, request)) {
-+ ret = -EPERM;
-+ goto out_put_task_struct;
-+ }
-+
- if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
- ret = ptrace_attach(child, request, data);
- /*
- * Some architectures need to do book-keeping after
- * a ptrace attach.
- */
-- if (!ret)
-+ if (!ret) {
- arch_ptrace_attach(child);
-+ gr_audit_ptrace(child);
-+ }
- goto out_put_task_struct;
- }
-
-diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
-index 636af6d..90b936f 100644
---- a/kernel/rcutiny.c
-+++ b/kernel/rcutiny.c
-@@ -46,7 +46,7 @@
- struct rcu_ctrlblk;
- static void invoke_rcu_callbacks(void);
- static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
--static void rcu_process_callbacks(struct softirq_action *unused);
-+static void rcu_process_callbacks(void);
- static void __call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu),
- struct rcu_ctrlblk *rcp);
-@@ -186,7 +186,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
- RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
- }
-
--static void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- __rcu_process_callbacks(&rcu_sched_ctrlblk);
- __rcu_process_callbacks(&rcu_bh_ctrlblk);
-diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
-index 2b0484a..07955ab 100644
---- a/kernel/rcutiny_plugin.h
-+++ b/kernel/rcutiny_plugin.h
-@@ -907,7 +907,7 @@ static int rcu_kthread(void *arg)
- have_rcu_kthread_work = morework;
- local_irq_restore(flags);
- if (work)
-- rcu_process_callbacks(NULL);
-+ rcu_process_callbacks();
- schedule_timeout_interruptible(1); /* Leave CPU for others. */
- }
-
-diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
-index 764825c..3aa6ac4 100644
---- a/kernel/rcutorture.c
-+++ b/kernel/rcutorture.c
-@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
- { 0 };
- static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
- { 0 };
--static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
--static atomic_t n_rcu_torture_alloc;
--static atomic_t n_rcu_torture_alloc_fail;
--static atomic_t n_rcu_torture_free;
--static atomic_t n_rcu_torture_mberror;
--static atomic_t n_rcu_torture_error;
-+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
-+static atomic_unchecked_t n_rcu_torture_alloc;
-+static atomic_unchecked_t n_rcu_torture_alloc_fail;
-+static atomic_unchecked_t n_rcu_torture_free;
-+static atomic_unchecked_t n_rcu_torture_mberror;
-+static atomic_unchecked_t n_rcu_torture_error;
- static long n_rcu_torture_boost_ktrerror;
- static long n_rcu_torture_boost_rterror;
- static long n_rcu_torture_boost_failure;
-@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
-
- spin_lock_bh(&rcu_torture_lock);
- if (list_empty(&rcu_torture_freelist)) {
-- atomic_inc(&n_rcu_torture_alloc_fail);
-+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
- spin_unlock_bh(&rcu_torture_lock);
- return NULL;
- }
-- atomic_inc(&n_rcu_torture_alloc);
-+ atomic_inc_unchecked(&n_rcu_torture_alloc);
- p = rcu_torture_freelist.next;
- list_del_init(p);
- spin_unlock_bh(&rcu_torture_lock);
-@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
- static void
- rcu_torture_free(struct rcu_torture *p)
- {
-- atomic_inc(&n_rcu_torture_free);
-+ atomic_inc_unchecked(&n_rcu_torture_free);
- spin_lock_bh(&rcu_torture_lock);
- list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock_bh(&rcu_torture_lock);
-@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
-- atomic_inc(&rcu_torture_wcount[i]);
-+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- rcu_torture_free(rp);
-@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
-- atomic_inc(&rcu_torture_wcount[i]);
-+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- list_del(&rp->rtort_free);
-@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
- i = old_rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
-- atomic_inc(&rcu_torture_wcount[i]);
-+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
- old_rp->rtort_pipe_count++;
- cur_ops->deferred_free(old_rp);
- }
-@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
- return;
- }
- if (p->rtort_mbtest == 0)
-- atomic_inc(&n_rcu_torture_mberror);
-+ atomic_inc_unchecked(&n_rcu_torture_mberror);
- spin_lock(&rand_lock);
- cur_ops->read_delay(&rand);
- n_rcu_torture_timers++;
-@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
- continue;
- }
- if (p->rtort_mbtest == 0)
-- atomic_inc(&n_rcu_torture_mberror);
-+ atomic_inc_unchecked(&n_rcu_torture_mberror);
- cur_ops->read_delay(&rand);
- preempt_disable();
- pipe_count = p->rtort_pipe_count;
-@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
- rcu_torture_current,
- rcu_torture_current_version,
- list_empty(&rcu_torture_freelist),
-- atomic_read(&n_rcu_torture_alloc),
-- atomic_read(&n_rcu_torture_alloc_fail),
-- atomic_read(&n_rcu_torture_free),
-- atomic_read(&n_rcu_torture_mberror),
-+ atomic_read_unchecked(&n_rcu_torture_alloc),
-+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
-+ atomic_read_unchecked(&n_rcu_torture_free),
-+ atomic_read_unchecked(&n_rcu_torture_mberror),
- n_rcu_torture_boost_ktrerror,
- n_rcu_torture_boost_rterror,
- n_rcu_torture_boost_failure,
- n_rcu_torture_boosts,
- n_rcu_torture_timers);
-- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
-+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
- n_rcu_torture_boost_ktrerror != 0 ||
- n_rcu_torture_boost_rterror != 0 ||
- n_rcu_torture_boost_failure != 0)
-@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
- cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
- if (i > 1) {
- cnt += sprintf(&page[cnt], "!!! ");
-- atomic_inc(&n_rcu_torture_error);
-+ atomic_inc_unchecked(&n_rcu_torture_error);
- WARN_ON_ONCE(1);
- }
- cnt += sprintf(&page[cnt], "Reader Pipe: ");
-@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
- cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- cnt += sprintf(&page[cnt], " %d",
-- atomic_read(&rcu_torture_wcount[i]));
-+ atomic_read_unchecked(&rcu_torture_wcount[i]));
- }
- cnt += sprintf(&page[cnt], "\n");
- if (cur_ops->stats)
-@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
-
- if (cur_ops->cleanup)
- cur_ops->cleanup();
-- if (atomic_read(&n_rcu_torture_error))
-+ if (atomic_read_unchecked(&n_rcu_torture_error))
- rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
- else
- rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
-@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
-
- rcu_torture_current = NULL;
- rcu_torture_current_version = 0;
-- atomic_set(&n_rcu_torture_alloc, 0);
-- atomic_set(&n_rcu_torture_alloc_fail, 0);
-- atomic_set(&n_rcu_torture_free, 0);
-- atomic_set(&n_rcu_torture_mberror, 0);
-- atomic_set(&n_rcu_torture_error, 0);
-+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
-+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
-+ atomic_set_unchecked(&n_rcu_torture_free, 0);
-+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
-+ atomic_set_unchecked(&n_rcu_torture_error, 0);
- n_rcu_torture_boost_ktrerror = 0;
- n_rcu_torture_boost_rterror = 0;
- n_rcu_torture_boost_failure = 0;
- n_rcu_torture_boosts = 0;
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
-- atomic_set(&rcu_torture_wcount[i], 0);
-+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
- for_each_possible_cpu(cpu) {
- for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- per_cpu(rcu_torture_count, cpu)[i] = 0;
-diff --git a/kernel/rcutree.c b/kernel/rcutree.c
-index 1aa52af..d2875ad 100644
---- a/kernel/rcutree.c
-+++ b/kernel/rcutree.c
-@@ -369,9 +369,9 @@ void rcu_enter_nohz(void)
- trace_rcu_dyntick("Start");
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
-- atomic_inc(&rdtp->dynticks);
-+ atomic_inc_unchecked(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
-- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
-+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
- local_irq_restore(flags);
- }
-
-@@ -393,10 +393,10 @@ void rcu_exit_nohz(void)
- return;
- }
- smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
-- atomic_inc(&rdtp->dynticks);
-+ atomic_inc_unchecked(&rdtp->dynticks);
- /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
-- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
-+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
- trace_rcu_dyntick("End");
- local_irq_restore(flags);
- }
-@@ -413,14 +413,14 @@ void rcu_nmi_enter(void)
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
-
- if (rdtp->dynticks_nmi_nesting == 0 &&
-- (atomic_read(&rdtp->dynticks) & 0x1))
-+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
- return;
- rdtp->dynticks_nmi_nesting++;
- smp_mb__before_atomic_inc(); /* Force delay from prior write. */
-- atomic_inc(&rdtp->dynticks);
-+ atomic_inc_unchecked(&rdtp->dynticks);
- /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
-- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
-+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
- }
-
- /**
-@@ -439,9 +439,9 @@ void rcu_nmi_exit(void)
- return;
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
-- atomic_inc(&rdtp->dynticks);
-+ atomic_inc_unchecked(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force delay to next write. */
-- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
-+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
- }
-
- /**
-@@ -476,7 +476,7 @@ void rcu_irq_exit(void)
- */
- static int dyntick_save_progress_counter(struct rcu_data *rdp)
- {
-- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
-+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
- return 0;
- }
-
-@@ -491,7 +491,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
- unsigned int curr;
- unsigned int snap;
-
-- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
-+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
- snap = (unsigned int)rdp->dynticks_snap;
-
- /*
-@@ -1554,7 +1554,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
- /*
- * Do RCU core processing for the current CPU.
- */
--static void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- trace_rcu_utilization("Start RCU core");
- __rcu_process_callbacks(&rcu_sched_state,
-diff --git a/kernel/rcutree.h b/kernel/rcutree.h
-index 849ce9e..74bc9de 100644
---- a/kernel/rcutree.h
-+++ b/kernel/rcutree.h
-@@ -86,7 +86,7 @@
- struct rcu_dynticks {
- int dynticks_nesting; /* Track irq/process nesting level. */
- int dynticks_nmi_nesting; /* Track NMI nesting level. */
-- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
-+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
- };
-
- /* RCU's kthread states for tracing. */
-diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
-index 4b9b9f8..2326053 100644
---- a/kernel/rcutree_plugin.h
-+++ b/kernel/rcutree_plugin.h
-@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
-
- /* Clean up and exit. */
- smp_mb(); /* ensure expedited GP seen before counter increment. */
-- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
-+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
- unlock_mb_ret:
- mutex_unlock(&sync_rcu_preempt_exp_mutex);
- mb_ret:
-@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-
- #else /* #ifndef CONFIG_SMP */
-
--static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
--static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
-+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
-+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
-
- static int synchronize_sched_expedited_cpu_stop(void *data)
- {
-@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
- int firstsnap, s, snap, trycount = 0;
-
- /* Note that atomic_inc_return() implies full memory barrier. */
-- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
-+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
- get_online_cpus();
-
- /*
-@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
- }
-
- /* Check to see if someone else did our work for us. */
-- s = atomic_read(&sync_sched_expedited_done);
-+ s = atomic_read_unchecked(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
- smp_mb(); /* ensure test happens before caller kfree */
- return;
-@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
- * grace period works for us.
- */
- get_online_cpus();
-- snap = atomic_read(&sync_sched_expedited_started) - 1;
-+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
- smp_mb(); /* ensure read is before try_stop_cpus(). */
- }
-
-@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
- * than we did beat us to the punch.
- */
- do {
-- s = atomic_read(&sync_sched_expedited_done);
-+ s = atomic_read_unchecked(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
- smp_mb(); /* ensure test happens before caller kfree */
- break;
- }
-- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
-+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
-
- put_online_cpus();
- }
-@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
- for_each_online_cpu(thatcpu) {
- if (thatcpu == cpu)
- continue;
-- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
-+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
- thatcpu).dynticks);
- smp_mb(); /* Order sampling of snap with end of grace period. */
- if ((snap & 0x1) != 0) {
-diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
-index 9feffa4..54058df 100644
---- a/kernel/rcutree_trace.c
-+++ b/kernel/rcutree_trace.c
-@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
- rdp->qs_pending);
- #ifdef CONFIG_NO_HZ
- seq_printf(m, " dt=%d/%d/%d df=%lu",
-- atomic_read(&rdp->dynticks->dynticks),
-+ atomic_read_unchecked(&rdp->dynticks->dynticks),
- rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi_nesting,
- rdp->dynticks_fqs);
-@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
- rdp->qs_pending);
- #ifdef CONFIG_NO_HZ
- seq_printf(m, ",%d,%d,%d,%lu",
-- atomic_read(&rdp->dynticks->dynticks),
-+ atomic_read_unchecked(&rdp->dynticks->dynticks),
- rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi_nesting,
- rdp->dynticks_fqs);
-diff --git a/kernel/resource.c b/kernel/resource.c
-index 08aa28e..b958c1c 100644
---- a/kernel/resource.c
-+++ b/kernel/resource.c
-@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
-
- static int __init ioresources_init(void)
- {
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
-+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
-+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
-+#endif
-+#else
- proc_create("ioports", 0, NULL, &proc_ioports_operations);
- proc_create("iomem", 0, NULL, &proc_iomem_operations);
-+#endif
- return 0;
- }
- __initcall(ioresources_init);
-diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
-index 3d9f31c..7fefc9e 100644
---- a/kernel/rtmutex-tester.c
-+++ b/kernel/rtmutex-tester.c
-@@ -20,7 +20,7 @@
- #define MAX_RT_TEST_MUTEXES 8
-
- static spinlock_t rttest_lock;
--static atomic_t rttest_event;
-+static atomic_unchecked_t rttest_event;
-
- struct test_thread_data {
- int opcode;
-@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
-
- case RTTEST_LOCKCONT:
- td->mutexes[td->opdata] = 1;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- return 0;
-
- case RTTEST_RESET:
-@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
- return 0;
-
- case RTTEST_RESETEVENT:
-- atomic_set(&rttest_event, 0);
-+ atomic_set_unchecked(&rttest_event, 0);
- return 0;
-
- default:
-@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
- return ret;
-
- td->mutexes[id] = 1;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- rt_mutex_lock(&mutexes[id]);
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- td->mutexes[id] = 4;
- return 0;
-
-@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
- return ret;
-
- td->mutexes[id] = 1;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- td->mutexes[id] = ret ? 0 : 4;
- return ret ? -EINTR : 0;
-
-@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
- if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
- return ret;
-
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- rt_mutex_unlock(&mutexes[id]);
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- td->mutexes[id] = 0;
- return 0;
-
-@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
- break;
-
- td->mutexes[dat] = 2;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- break;
-
- default:
-@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
- return;
-
- td->mutexes[dat] = 3;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- break;
-
- case RTTEST_LOCKNOWAIT:
-@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
- return;
-
- td->mutexes[dat] = 1;
-- td->event = atomic_add_return(1, &rttest_event);
-+ td->event = atomic_add_return_unchecked(1, &rttest_event);
- return;
-
- default:
-diff --git a/kernel/sched.c b/kernel/sched.c
-index fe33d0f..b5d0b8a 100644
---- a/kernel/sched.c
-+++ b/kernel/sched.c
-@@ -3307,8 +3307,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
- next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
- enter_lazy_tlb(oldmm, next);
-- } else
-+ } else {
- switch_mm(oldmm, mm, next);
-+ populate_stack();
-+ }
-
- if (!prev->mm) {
- prev->active_mm = NULL;
-@@ -5050,7 +5052,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
- */
--long __sched
-+long __sched __intentional_overflow(-1)
- wait_for_completion_interruptible_timeout(struct completion *x,
- unsigned long timeout)
- {
-@@ -5067,7 +5069,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
--int __sched wait_for_completion_killable(struct completion *x)
-+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
- {
- long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
- if (t == -ERESTARTSYS)
-@@ -5088,7 +5090,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
- */
--long __sched
-+long __sched __intentional_overflow(-1)
- wait_for_completion_killable_timeout(struct completion *x,
- unsigned long timeout)
- {
-@@ -5300,6 +5302,8 @@ int can_nice(const struct task_struct *p, const int nice)
- /* convert nice value [19,-20] to rlimit style value [1,40] */
- int nice_rlim = 20 - nice;
-
-+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
-+
- return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
- capable(CAP_SYS_NICE));
- }
-@@ -5333,7 +5337,8 @@ SYSCALL_DEFINE1(nice, int, increment)
- if (nice > 19)
- nice = 19;
-
-- if (increment < 0 && !can_nice(current, nice))
-+ if (increment < 0 && (!can_nice(current, nice) ||
-+ gr_handle_chroot_nice()))
- return -EPERM;
-
- retval = security_task_setnice(current, nice);
-@@ -5490,6 +5495,7 @@ recheck:
- unsigned long rlim_rtprio =
- task_rlimit(p, RLIMIT_RTPRIO);
-
-+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
- /* can't set/change the rt policy */
- if (policy != p->policy && !rlim_rtprio)
- return -EPERM;
-@@ -6524,8 +6530,10 @@ void idle_task_exit(void)
-
- BUG_ON(cpu_online(smp_processor_id()));
-
-- if (mm != &init_mm)
-+ if (mm != &init_mm) {
- switch_mm(mm, &init_mm, current);
-+ populate_stack();
-+ }
- mmdrop(mm);
- }
-
-@@ -6632,7 +6640,7 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
-
- #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-
--static struct ctl_table sd_ctl_dir[] = {
-+static ctl_table_no_const sd_ctl_dir[] __read_only = {
- {
- .procname = "sched_domain",
- .mode = 0555,
-@@ -6649,17 +6657,17 @@ static struct ctl_table sd_ctl_root[] = {
- {}
- };
-
--static struct ctl_table *sd_alloc_ctl_entry(int n)
-+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
- {
-- struct ctl_table *entry =
-+ ctl_table_no_const *entry =
- kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
-
- return entry;
- }
-
--static void sd_free_ctl_entry(struct ctl_table **tablep)
-+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
- {
-- struct ctl_table *entry;
-+ ctl_table_no_const *entry;
-
- /*
- * In the intermediate directories, both the child directory and
-@@ -6667,22 +6675,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
- * will always be set. In the lowest directory the names are
- * static strings and all have proc handlers.
- */
-- for (entry = *tablep; entry->mode; entry++) {
-- if (entry->child)
-- sd_free_ctl_entry(&entry->child);
-+ for (entry = tablep; entry->mode; entry++) {
-+ if (entry->child) {
-+ sd_free_ctl_entry(entry->child);
-+ pax_open_kernel();
-+ entry->child = NULL;
-+ pax_close_kernel();
-+ }
- if (entry->proc_handler == NULL)
- kfree(entry->procname);
- }
-
-- kfree(*tablep);
-- *tablep = NULL;
-+ kfree(tablep);
- }
-
- static int min_load_idx = 0;
- static int max_load_idx = CPU_LOAD_IDX_MAX-1;
-
- static void
--set_table_entry(struct ctl_table *entry,
-+set_table_entry(ctl_table_no_const *entry,
- const char *procname, void *data, int maxlen,
- mode_t mode, proc_handler *proc_handler,
- bool load_idx)
-@@ -6702,7 +6713,7 @@ set_table_entry(struct ctl_table *entry,
- static struct ctl_table *
- sd_alloc_ctl_domain_table(struct sched_domain *sd)
- {
-- struct ctl_table *table = sd_alloc_ctl_entry(13);
-+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
-
- if (table == NULL)
- return NULL;
-@@ -6737,9 +6748,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
- return table;
- }
-
--static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
-+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
- {
-- struct ctl_table *entry, *table;
-+ ctl_table_no_const *entry, *table;
- struct sched_domain *sd;
- int domain_num = 0, i;
- char buf[32];
-@@ -6766,11 +6777,13 @@ static struct ctl_table_header *sd_sysctl_header;
- static void register_sched_domain_sysctl(void)
- {
- int i, cpu_num = num_possible_cpus();
-- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
-+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
- char buf[32];
-
- WARN_ON(sd_ctl_dir[0].child);
-+ pax_open_kernel();
- sd_ctl_dir[0].child = entry;
-+ pax_close_kernel();
-
- if (entry == NULL)
- return;
-@@ -6793,8 +6806,12 @@ static void unregister_sched_domain_sysctl(void)
- if (sd_sysctl_header)
- unregister_sysctl_table(sd_sysctl_header);
- sd_sysctl_header = NULL;
-- if (sd_ctl_dir[0].child)
-- sd_free_ctl_entry(&sd_ctl_dir[0].child);
-+ if (sd_ctl_dir[0].child) {
-+ sd_free_ctl_entry(sd_ctl_dir[0].child);
-+ pax_open_kernel();
-+ sd_ctl_dir[0].child = NULL;
-+ pax_close_kernel();
-+ }
- }
- #else
- static void register_sched_domain_sysctl(void)
-@@ -6892,7 +6909,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
- * happens before everything else. This has to be lower priority than
- * the notifier in the perf_event subsystem, though.
- */
--static struct notifier_block __cpuinitdata migration_notifier = {
-+static struct notifier_block migration_notifier = {
- .notifier_call = migration_call,
- .priority = CPU_PRI_MIGRATION,
- };
-diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
-index cb0a950..47dfae9 100644
---- a/kernel/sched_autogroup.c
-+++ b/kernel/sched_autogroup.c
-@@ -7,7 +7,7 @@
-
- unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
- static struct autogroup autogroup_default;
--static atomic_t autogroup_seq_nr;
-+static atomic_unchecked_t autogroup_seq_nr;
-
- static void __init autogroup_init(struct task_struct *init_task)
- {
-@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
-
- kref_init(&ag->kref);
- init_rwsem(&ag->lock);
-- ag->id = atomic_inc_return(&autogroup_seq_nr);
-+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
- ag->tg = tg;
- #ifdef CONFIG_RT_GROUP_SCHED
- /*
-diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
-index 4c6dae1..0050e446 100644
---- a/kernel/sched_fair.c
-+++ b/kernel/sched_fair.c
-@@ -4803,7 +4803,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
- * run_rebalance_domains is triggered when needed from the scheduler tick.
- * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
- */
--static void run_rebalance_domains(struct softirq_action *h)
-+static __latent_entropy void run_rebalance_domains(void)
- {
- int this_cpu = smp_processor_id();
- struct rq *this_rq = cpu_rq(this_cpu);
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c
-index 57d4b13..bc84054 100644
---- a/kernel/seccomp.c
-+++ b/kernel/seccomp.c
-@@ -3,15 +3,353 @@
- *
- * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
- *
-- * This defines a simple but solid secure-computing mode.
-+ * Copyright (C) 2012 Google, Inc.
-+ * Will Drewry <wad@chromium.org>
-+ *
-+ * This defines a simple but solid secure-computing facility.
-+ *
-+ * Mode 1 uses a fixed list of allowed system calls.
-+ * Mode 2 allows user-defined system call filters in the form
-+ * of Berkeley Packet Filters/Linux Socket Filters.
- */
-
--#include <linux/seccomp.h>
--#include <linux/sched.h>
-+#include <linux/atomic.h>
-+#include <linux/audit.h>
- #include <linux/compat.h>
-+#include <linux/filter.h>
-+#include <linux/ptrace.h>
-+#include <linux/sched.h>
-+#include <linux/seccomp.h>
-+#include <linux/security.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+
-+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-+#include <asm/syscall.h>
-+#endif
-
- /* #define SECCOMP_DEBUG 1 */
--#define NR_SECCOMP_MODES 1
-+
-+#ifdef CONFIG_SECCOMP_FILTER
-+/**
-+ * struct seccomp_filter - container for seccomp BPF programs
-+ *
-+ * @usage: reference count to manage the object liftime.
-+ * get/put helpers should be used when accessing an instance
-+ * outside of a lifetime-guarded section. In general, this
-+ * is only needed for handling filters shared across tasks.
-+ * @prev: points to a previously installed, or inherited, filter
-+ * @len: the number of instructions in the program
-+ * @insns: the BPF program instructions to evaluate
-+ *
-+ * seccomp_filter objects are organized in a tree linked via the @prev
-+ * pointer. For any task, it appears to be a singly-linked list starting
-+ * with current->seccomp.filter, the most recently attached or inherited filter.
-+ * However, multiple filters may share a @prev node, by way of fork(), which
-+ * results in a unidirectional tree existing in memory. This is similar to
-+ * how namespaces work.
-+ *
-+ * seccomp_filter objects should never be modified after being attached
-+ * to a task_struct (other than @usage).
-+ */
-+struct seccomp_filter {
-+ atomic_t usage;
-+ struct seccomp_filter *prev;
-+ unsigned short len; /* Instruction count */
-+ struct sock_filter insns[];
-+};
-+
-+/* Limit any path through the tree to 256KB worth of instructions. */
-+#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
-+
-+/**
-+ * get_u32 - returns a u32 offset into data
-+ * @data: a unsigned 64 bit value
-+ * @index: 0 or 1 to return the first or second 32-bits
-+ *
-+ * This inline exists to hide the length of unsigned long.
-+ * If a 32-bit unsigned long is passed in, it will be extended
-+ * and the top 32-bits will be 0. If it is a 64-bit unsigned
-+ * long, then whatever data is resident will be properly returned.
-+ */
-+static inline u32 get_u32(u64 data, int index)
-+{
-+ return ((u32 *)&data)[index];
-+}
-+
-+/* Helper for bpf_load below. */
-+#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
-+/**
-+ * bpf_load: checks and returns a pointer to the requested offset
-+ * @off: offset into struct seccomp_data to load from
-+ *
-+ * Returns the requested 32-bits of data.
-+ * seccomp_chk_filter() should assure that @off is 32-bit aligned
-+ * and not out of bounds. Failure to do so is a BUG.
-+ */
-+u32 seccomp_bpf_load(int off)
-+{
-+ struct pt_regs *regs = task_pt_regs(current);
-+ if (off == BPF_DATA(nr))
-+ return syscall_get_nr(current, regs);
-+ if (off == BPF_DATA(arch))
-+ return syscall_get_arch(current, regs);
-+ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
-+ unsigned long value;
-+ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
-+ int index = !!(off % sizeof(u64));
-+ syscall_get_arguments(current, regs, arg, 1, &value);
-+ return get_u32(value, index);
-+ }
-+ if (off == BPF_DATA(instruction_pointer))
-+ return get_u32(KSTK_EIP(current), 0);
-+ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
-+ return get_u32(KSTK_EIP(current), 1);
-+ /* seccomp_chk_filter should make this impossible. */
-+ BUG();
-+}
-+
-+/**
-+ * seccomp_chk_filter - verify seccomp filter code
-+ * @filter: filter to verify
-+ * @flen: length of filter
-+ *
-+ * Takes a previously checked filter (by sk_chk_filter) and
-+ * redirects all filter code that loads struct sk_buff data
-+ * and related data through seccomp_bpf_load. It also
-+ * enforces length and alignment checking of those loads.
-+ *
-+ * Returns 0 if the rule set is legal or -EINVAL if not.
-+ */
-+static int seccomp_chk_filter(struct sock_filter *filter, unsigned int flen)
-+{
-+ int pc;
-+ for (pc = 0; pc < flen; pc++) {
-+ struct sock_filter *ftest = &filter[pc];
-+ u16 code = ftest->code;
-+ u32 k = ftest->k;
-+ switch (code) {
-+ case BPF_S_LD_W_ABS:
-+ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
-+ /* 32-bit aligned and not out of bounds. */
-+ if (k >= sizeof(struct seccomp_data) || k & 3)
-+ return -EINVAL;
-+ continue;
-+ case BPF_S_LD_W_LEN:
-+ ftest->code = BPF_S_LD_IMM;
-+ ftest->k = sizeof(struct seccomp_data);
-+ continue;
-+ case BPF_S_LDX_W_LEN:
-+ ftest->code = BPF_S_LDX_IMM;
-+ ftest->k = sizeof(struct seccomp_data);
-+ continue;
-+ /* Explicitly include allowed calls. */
-+ case BPF_S_RET_K:
-+ case BPF_S_RET_A:
-+ case BPF_S_ALU_ADD_K:
-+ case BPF_S_ALU_ADD_X:
-+ case BPF_S_ALU_SUB_K:
-+ case BPF_S_ALU_SUB_X:
-+ case BPF_S_ALU_MUL_K:
-+ case BPF_S_ALU_MUL_X:
-+ case BPF_S_ALU_DIV_X:
-+ case BPF_S_ALU_AND_K:
-+ case BPF_S_ALU_AND_X:
-+ case BPF_S_ALU_OR_K:
-+ case BPF_S_ALU_OR_X:
-+ case BPF_S_ALU_LSH_K:
-+ case BPF_S_ALU_LSH_X:
-+ case BPF_S_ALU_RSH_K:
-+ case BPF_S_ALU_RSH_X:
-+ case BPF_S_ALU_NEG:
-+ case BPF_S_LD_IMM:
-+ case BPF_S_LDX_IMM:
-+ case BPF_S_MISC_TAX:
-+ case BPF_S_MISC_TXA:
-+ case BPF_S_ALU_DIV_K:
-+ case BPF_S_LD_MEM:
-+ case BPF_S_LDX_MEM:
-+ case BPF_S_ST:
-+ case BPF_S_STX:
-+ case BPF_S_JMP_JA:
-+ case BPF_S_JMP_JEQ_K:
-+ case BPF_S_JMP_JEQ_X:
-+ case BPF_S_JMP_JGE_K:
-+ case BPF_S_JMP_JGE_X:
-+ case BPF_S_JMP_JGT_K:
-+ case BPF_S_JMP_JGT_X:
-+ case BPF_S_JMP_JSET_K:
-+ case BPF_S_JMP_JSET_X:
-+ continue;
-+ default:
-+ return -EINVAL;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * seccomp_run_filters - evaluates all seccomp filters against @syscall
-+ * @syscall: number of the current system call
-+ *
-+ * Returns valid seccomp BPF response codes.
-+ */
-+static u32 seccomp_run_filters(int syscall)
-+{
-+ struct seccomp_filter *f;
-+ u32 ret = SECCOMP_RET_ALLOW;
-+
-+ /* Ensure unexpected behavior doesn't result in failing open. */
-+ if (WARN_ON(current->seccomp.filter == NULL))
-+ return SECCOMP_RET_KILL;
-+
-+ /*
-+ * All filters are evaluated in order of youngest to oldest. The lowest
-+ * BPF return value (ignoring the DATA) always takes priority.
-+ */
-+ for (f = current->seccomp.filter; f; f = f->prev) {
-+ u32 cur_ret = sk_run_filter(NULL, f->insns);
-+ if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
-+ ret = cur_ret;
-+ }
-+ return ret;
-+}
-+
-+/**
-+ * seccomp_attach_filter: Attaches a seccomp filter to current.
-+ * @fprog: BPF program to install
-+ *
-+ * Returns 0 on success or an errno on failure.
-+ */
-+static long seccomp_attach_filter(struct sock_fprog *fprog)
-+{
-+ struct seccomp_filter *filter;
-+ unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
-+ unsigned long total_insns = fprog->len;
-+ long ret;
-+
-+ if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
-+ return -EINVAL;
-+
-+ for (filter = current->seccomp.filter; filter; filter = filter->prev)
-+ total_insns += filter->len + 4; /* include a 4 instr penalty */
-+ if (total_insns > MAX_INSNS_PER_PATH)
-+ return -ENOMEM;
-+
-+ /*
-+ * Installing a seccomp filter requires that the task have
-+ * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
-+ * This avoids scenarios where unprivileged tasks can affect the
-+ * behavior of privileged children.
-+ */
-+ if (!current->no_new_privs &&
-+ security_real_capable_noaudit(current, current_user_ns(),
-+ CAP_SYS_ADMIN) != 0)
-+ return -EACCES;
-+
-+ /* Allocate a new seccomp_filter */
-+ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, GFP_KERNEL);
-+ if (!filter)
-+ return -ENOMEM;
-+ atomic_set(&filter->usage, 1);
-+ filter->len = fprog->len;
-+
-+ /* Copy the instructions from fprog. */
-+ ret = -EFAULT;
-+ if (copy_from_user(filter->insns, fprog->filter, fp_size))
-+ goto fail;
-+
-+ /* Check and rewrite the fprog via the skb checker */
-+ ret = sk_chk_filter(filter->insns, filter->len);
-+ if (ret)
-+ goto fail;
-+
-+ /* Check and rewrite the fprog for seccomp use */
-+ ret = seccomp_chk_filter(filter->insns, filter->len);
-+ if (ret)
-+ goto fail;
-+
-+ /*
-+ * If there is an existing filter, make it the prev and don't drop its
-+ * task reference.
-+ */
-+ filter->prev = current->seccomp.filter;
-+ current->seccomp.filter = filter;
-+ return 0;
-+fail:
-+ kfree(filter);
-+ return ret;
-+}
-+
-+/**
-+ * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
-+ * @user_filter: pointer to the user data containing a sock_fprog.
-+ *
-+ * Returns 0 on success and non-zero otherwise.
-+ */
-+long seccomp_attach_user_filter(char __user *user_filter)
-+{
-+ struct sock_fprog fprog;
-+ long ret = -EFAULT;
-+
-+#ifdef CONFIG_COMPAT
-+ if (is_compat_task()) {
-+ struct compat_sock_fprog fprog32;
-+ if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
-+ goto out;
-+ fprog.len = fprog32.len;
-+ fprog.filter = compat_ptr(fprog32.filter);
-+ } else /* falls through to the if below. */
-+#endif
-+ if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
-+ goto out;
-+ ret = seccomp_attach_filter(&fprog);
-+out:
-+ return ret;
-+}
-+
-+/* get_seccomp_filter - increments the reference count of the filter on @tsk */
-+void get_seccomp_filter(struct task_struct *tsk)
-+{
-+ struct seccomp_filter *orig = tsk->seccomp.filter;
-+ if (!orig)
-+ return;
-+ /* Reference count is bounded by the number of total processes. */
-+ atomic_inc(&orig->usage);
-+}
-+
-+/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-+void put_seccomp_filter(struct task_struct *tsk)
-+{
-+ struct seccomp_filter *orig = tsk->seccomp.filter;
-+ /* Clean up single-reference branches iteratively. */
-+ while (orig && atomic_dec_and_test(&orig->usage)) {
-+ struct seccomp_filter *freeme = orig;
-+ orig = orig->prev;
-+ kfree(freeme);
-+ }
-+}
-+
-+/**
-+ * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
-+ * @syscall: syscall number to send to userland
-+ * @reason: filter-supplied reason code to send to userland (via si_errno)
-+ *
-+ * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
-+ */
-+static void seccomp_send_sigsys(int syscall, int reason)
-+{
-+ struct siginfo info;
-+ memset(&info, 0, sizeof(info));
-+ info.si_signo = SIGSYS;
-+ info.si_code = SYS_SECCOMP;
-+ info.si_call_addr = (void __user *)KSTK_EIP(current);
-+ info.si_errno = reason;
-+ info.si_arch = syscall_get_arch(current, task_pt_regs(current));
-+ info.si_syscall = syscall;
-+ force_sig_info(SIGSYS, &info, current);
-+}
-+#endif /* CONFIG_SECCOMP_FILTER */
-
- /*
- * Secure computing mode 1 allows only read/write/exit/sigreturn.
-@@ -32,11 +370,21 @@ static int mode1_syscalls_32[] = {
-
- void __secure_computing(int this_syscall)
- {
-+ /* Filter calls should never use this function. */
-+ BUG_ON(current->seccomp.mode == SECCOMP_MODE_FILTER);
-+ __secure_computing_int(this_syscall);
-+}
-+
-+int __secure_computing_int(int this_syscall)
-+{
- int mode = current->seccomp.mode;
-- int * syscall;
-+ int exit_sig = 0;
-+ int *syscall;
-+ u32 ret = SECCOMP_RET_KILL;
-+ int data;
-
- switch (mode) {
-- case 1:
-+ case SECCOMP_MODE_STRICT:
- syscall = mode1_syscalls;
- #ifdef CONFIG_COMPAT
- if (is_compat_task())
-@@ -44,9 +392,44 @@ void __secure_computing(int this_syscall)
- #endif
- do {
- if (*syscall == this_syscall)
-- return;
-+ return 0;
- } while (*++syscall);
-+ exit_sig = SIGKILL;
- break;
-+#ifdef CONFIG_SECCOMP_FILTER
-+ case SECCOMP_MODE_FILTER:
-+ ret = seccomp_run_filters(this_syscall);
-+ data = ret & SECCOMP_RET_DATA;
-+ switch (ret & SECCOMP_RET_ACTION) {
-+ case SECCOMP_RET_ERRNO:
-+ /* Set the low-order 16-bits as a errno. */
-+ syscall_set_return_value(current, task_pt_regs(current),
-+ -data, 0);
-+ goto skip;
-+ case SECCOMP_RET_TRAP:
-+ /* Show the handler the original registers. */
-+ syscall_rollback(current, task_pt_regs(current));
-+ /* Let the filter pass back 16 bits of data. */
-+ seccomp_send_sigsys(this_syscall, data);
-+ goto skip;
-+ case SECCOMP_RET_TRACE:
-+ /* Skip these calls if there is no tracer. */
-+ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
-+ goto skip;
-+ /* Allow the BPF to provide the event message */
-+ ptrace_event(PTRACE_EVENT_SECCOMP, data);
-+ if (fatal_signal_pending(current))
-+ break;
-+ return 0;
-+ case SECCOMP_RET_ALLOW:
-+ return 0;
-+ case SECCOMP_RET_KILL:
-+ default:
-+ break;
-+ }
-+ exit_sig = SIGSYS;
-+ break;
-+#endif
- default:
- BUG();
- }
-@@ -54,7 +437,11 @@ void __secure_computing(int this_syscall)
- #ifdef SECCOMP_DEBUG
- dump_stack();
- #endif
-- do_exit(SIGKILL);
-+ __audit_seccomp(this_syscall, exit_sig, ret);
-+ do_exit(exit_sig);
-+skip:
-+ audit_seccomp(this_syscall, exit_sig, ret);
-+ return -1;
- }
-
- long prctl_get_seccomp(void)
-@@ -62,25 +449,48 @@ long prctl_get_seccomp(void)
- return current->seccomp.mode;
- }
-
--long prctl_set_seccomp(unsigned long seccomp_mode)
-+/**
-+ * prctl_set_seccomp: configures current->seccomp.mode
-+ * @seccomp_mode: requested mode to use
-+ * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
-+ *
-+ * This function may be called repeatedly with a @seccomp_mode of
-+ * SECCOMP_MODE_FILTER to install additional filters. Every filter
-+ * successfully installed will be evaluated (in reverse order) for each system
-+ * call the task makes.
-+ *
-+ * Once current->seccomp.mode is non-zero, it may not be changed.
-+ *
-+ * Returns 0 on success or -EINVAL on failure.
-+ */
-+long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
- {
-- long ret;
-+ long ret = -EINVAL;
-
-- /* can set it only once to be even more secure */
-- ret = -EPERM;
-- if (unlikely(current->seccomp.mode))
-+ if (current->seccomp.mode &&
-+ current->seccomp.mode != seccomp_mode)
- goto out;
-
-- ret = -EINVAL;
-- if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
-- current->seccomp.mode = seccomp_mode;
-- set_thread_flag(TIF_SECCOMP);
-+ switch (seccomp_mode) {
-+ case SECCOMP_MODE_STRICT:
-+ ret = 0;
- #ifdef TIF_NOTSC
- disable_TSC();
- #endif
-- ret = 0;
-+ break;
-+#ifdef CONFIG_SECCOMP_FILTER
-+ case SECCOMP_MODE_FILTER:
-+ ret = seccomp_attach_user_filter(filter);
-+ if (ret)
-+ goto out;
-+ break;
-+#endif
-+ default:
-+ goto out;
- }
-
-- out:
-+ current->seccomp.mode = seccomp_mode;
-+ set_thread_flag(TIF_SECCOMP);
-+out:
- return ret;
- }
-diff --git a/kernel/signal.c b/kernel/signal.c
-index 3ecf574..0541e21 100644
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
-
- int print_fatal_signals __read_mostly;
-
--static void __user *sig_handler(struct task_struct *t, int sig)
-+static __sighandler_t sig_handler(struct task_struct *t, int sig)
- {
- return t->sighand->action[sig - 1].sa.sa_handler;
- }
-
--static int sig_handler_ignored(void __user *handler, int sig)
-+static int sig_handler_ignored(__sighandler_t handler, int sig)
- {
- /* Is it explicitly or implicitly ignored? */
- return handler == SIG_IGN ||
-@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
- static int sig_task_ignored(struct task_struct *t, int sig,
- int from_ancestor_ns)
- {
-- void __user *handler;
-+ __sighandler_t handler;
-
- handler = sig_handler(t, sig);
-
-@@ -159,7 +159,7 @@ void recalc_sigpending(void)
-
- #define SYNCHRONOUS_MASK \
- (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
-- sigmask(SIGTRAP) | sigmask(SIGFPE))
-+ sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
-
- int next_signal(struct sigpending *pending, sigset_t *mask)
- {
-@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
- atomic_inc(&user->sigpending);
- rcu_read_unlock();
-
-+ if (!override_rlimit)
-+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
-+
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
-@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
-
- int unhandled_signal(struct task_struct *tsk, int sig)
- {
-- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
-+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
- if (is_global_init(tsk))
- return 1;
- if (handler != SIG_IGN && handler != SIG_DFL)
-@@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
- }
- }
-
-+ /* allow glibc communication via tgkill to other threads in our
-+ thread group */
-+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
-+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
-+ && gr_handle_signal(t, sig))
-+ return -EPERM;
-+
- return security_task_kill(t, info, sig, 0);
- }
-
-@@ -1162,7 +1172,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
- return send_signal(sig, info, p, 1);
- }
-
--static int
-+int
- specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- {
- return send_signal(sig, info, t, 0);
-@@ -1199,6 +1209,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- unsigned long int flags;
- int ret, blocked, ignored;
- struct k_sigaction *action;
-+ int is_unhandled = 0;
-
- spin_lock_irqsave(&t->sighand->siglock, flags);
- action = &t->sighand->action[sig-1];
-@@ -1213,9 +1224,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- }
- if (action->sa.sa_handler == SIG_DFL)
- t->signal->flags &= ~SIGNAL_UNKILLABLE;
-+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
-+ is_unhandled = 1;
- ret = specific_send_sig_info(sig, info, t);
- spin_unlock_irqrestore(&t->sighand->siglock, flags);
-
-+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
-+ normal operation */
-+ if (is_unhandled) {
-+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
-+ gr_handle_crash(t, sig);
-+ }
-+
- return ret;
- }
-
-@@ -1282,8 +1302,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
- ret = check_kill_permission(sig, info, p);
- rcu_read_unlock();
-
-- if (!ret && sig)
-+ if (!ret && sig) {
- ret = do_send_sig_info(sig, info, p, true);
-+ if (!ret)
-+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
-+ }
-
- return ret;
- }
-@@ -2631,6 +2654,13 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_ptr, &to->si_ptr);
- break;
-+#ifdef __ARCH_SIGSYS
-+ case __SI_SYS:
-+ err |= __put_user(from->si_call_addr, &to->si_call_addr);
-+ err |= __put_user(from->si_syscall, &to->si_syscall);
-+ err |= __put_user(from->si_arch, &to->si_arch);
-+ break;
-+#endif
- default: /* this is just in case for now ... */
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
-@@ -2765,7 +2795,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
- int error = -ESRCH;
-
- rcu_read_lock();
-- p = find_task_by_vpid(pid);
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+ /* allow glibc communication via tgkill to other threads in our
-+ thread group */
-+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
-+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
-+ p = find_task_by_vpid_unrestricted(pid);
-+ else
-+#endif
-+ p = find_task_by_vpid(pid);
- if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
- error = check_kill_permission(sig, info, p);
- /*
-diff --git a/kernel/smp.c b/kernel/smp.c
-index 9e800b2..1533ba5 100644
---- a/kernel/smp.c
-+++ b/kernel/smp.c
-@@ -75,7 +75,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
-+static struct notifier_block hotplug_cfd_notifier = {
- .notifier_call = hotplug_cfd,
- };
-
-@@ -591,22 +591,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
- }
- EXPORT_SYMBOL(smp_call_function);
-
--void ipi_call_lock(void)
-+void ipi_call_lock(void) __acquires(call_function.lock)
- {
- raw_spin_lock(&call_function.lock);
- }
-
--void ipi_call_unlock(void)
-+void ipi_call_unlock(void) __releases(call_function.lock)
- {
- raw_spin_unlock(&call_function.lock);
- }
-
--void ipi_call_lock_irq(void)
-+void ipi_call_lock_irq(void) __acquires(call_function.lock)
- {
- raw_spin_lock_irq(&call_function.lock);
- }
-
--void ipi_call_unlock_irq(void)
-+void ipi_call_unlock_irq(void) __releases(call_function.lock)
- {
- raw_spin_unlock_irq(&call_function.lock);
- }
-diff --git a/kernel/softirq.c b/kernel/softirq.c
-index 2c71d91..6b690a4 100644
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -52,11 +52,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
- EXPORT_SYMBOL(irq_stat);
- #endif
-
--static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
-
- DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-
--char *softirq_to_name[NR_SOFTIRQS] = {
-+const char * const softirq_to_name[NR_SOFTIRQS] = {
- "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
- "TASKLET", "SCHED", "HRTIMER", "RCU"
- };
-@@ -235,7 +235,7 @@ restart:
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
-- h->action(h);
-+ h->action();
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %u %s %p"
-@@ -385,7 +385,7 @@ void raise_softirq(unsigned int nr)
- local_irq_restore(flags);
- }
-
--void open_softirq(int nr, void (*action)(struct softirq_action *))
-+void __init open_softirq(int nr, void (*action)(void))
- {
- softirq_vec[nr].action = action;
- }
-@@ -441,7 +441,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
-
- EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-
--static void tasklet_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_action(void)
- {
- struct tasklet_struct *list;
-
-@@ -476,7 +476,7 @@ static void tasklet_action(struct softirq_action *a)
- }
- }
-
--static void tasklet_hi_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_hi_action(void)
- {
- struct tasklet_struct *list;
-
-@@ -712,7 +712,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
-+static struct notifier_block remote_softirq_cpu_notifier = {
- .notifier_call = remote_softirq_cpu_notify,
- };
-
-@@ -894,7 +894,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata cpu_nfb = {
-+static struct notifier_block cpu_nfb = {
- .notifier_call = cpu_callback
- };
-
-diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
-index 2f194e9..2c05ea9 100644
---- a/kernel/stop_machine.c
-+++ b/kernel/stop_machine.c
-@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
- * cpu notifiers. It currently shares the same priority as sched
- * migration_notifier.
- */
--static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
-+static struct notifier_block cpu_stop_cpu_notifier = {
- .notifier_call = cpu_stop_cpu_callback,
- .priority = 10,
- };
-diff --git a/kernel/sys.c b/kernel/sys.c
-index 9d557df..7207dae 100644
---- a/kernel/sys.c
-+++ b/kernel/sys.c
-@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
- error = -EACCES;
- goto out;
- }
-+
-+ if (gr_handle_chroot_setpriority(p, niceval)) {
-+ error = -EACCES;
-+ goto out;
-+ }
-+
- no_nice = security_task_setnice(p, niceval);
- if (no_nice) {
- error = no_nice;
-@@ -597,6 +603,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
- goto error;
- }
-
-+ if (gr_check_group_change(new->gid, new->egid, -1))
-+ goto error;
-+
-+ if (new->gid != old->gid) {
-+ /* make sure we generate a learn log for what will
-+ end up being a role transition after a full-learning
-+ policy is generated
-+ CAP_SETGID is required to perform a transition
-+ we may not log a CAP_SETGID check above, e.g.
-+ in the case where new rgid = old egid
-+ */
-+ gr_learn_cap(current, new, CAP_SETGID);
-+ }
-+
- if (rgid != (gid_t) -1 ||
- (egid != (gid_t) -1 && egid != old->gid))
- new->sgid = new->egid;
-@@ -626,6 +646,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
- old = current_cred();
-
- retval = -EPERM;
-+
-+ if (gr_check_group_change(gid, gid, gid))
-+ goto error;
-+
- if (nsown_capable(CAP_SETGID))
- new->gid = new->egid = new->sgid = new->fsgid = gid;
- else if (gid == old->gid || gid == old->sgid)
-@@ -643,7 +667,7 @@ error:
- /*
- * change the user struct in a credentials set to match the new UID
- */
--static int set_user(struct cred *new)
-+int set_user(struct cred *new)
- {
- struct user_struct *new_user;
-
-@@ -713,7 +737,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
- goto error;
- }
-
-+ if (gr_check_user_change(new->uid, new->euid, -1))
-+ goto error;
-+
- if (new->uid != old->uid) {
-+ /* make sure we generate a learn log for what will
-+ end up being a role transition after a full-learning
-+ policy is generated
-+ CAP_SETUID is required to perform a transition
-+ we may not log a CAP_SETUID check above, e.g.
-+ in the case where new ruid = old euid
-+ */
-+ gr_learn_cap(current, new, CAP_SETUID);
- retval = set_user(new);
- if (retval < 0)
- goto error;
-@@ -757,6 +792,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
- old = current_cred();
-
- retval = -EPERM;
-+
-+ if (gr_check_crash_uid(uid))
-+ goto error;
-+ if (gr_check_user_change(uid, uid, uid))
-+ goto error;
-+
- if (nsown_capable(CAP_SETUID)) {
- new->suid = new->uid = uid;
- if (uid != old->uid) {
-@@ -811,6 +852,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
- goto error;
- }
-
-+ if (gr_check_user_change(ruid, euid, -1))
-+ goto error;
-+
- if (ruid != (uid_t) -1) {
- new->uid = ruid;
- if (ruid != old->uid) {
-@@ -875,6 +919,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
- goto error;
- }
-
-+ if (gr_check_group_change(rgid, egid, -1))
-+ goto error;
-+
- if (rgid != (gid_t) -1)
- new->gid = rgid;
- if (egid != (gid_t) -1)
-@@ -925,12 +972,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
- uid == old->suid || uid == old->fsuid ||
- nsown_capable(CAP_SETUID)) {
- if (uid != old_fsuid) {
-+ if (gr_check_user_change(-1, -1, uid))
-+ goto error;
-+
- new->fsuid = uid;
- if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
- goto change_okay;
- }
- }
-
-+error:
- abort_creds(new);
- return old_fsuid;
-
-@@ -957,12 +1008,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
- if (gid == old->gid || gid == old->egid ||
- gid == old->sgid || gid == old->fsgid ||
- nsown_capable(CAP_SETGID)) {
-+ if (gr_check_group_change(-1, -1, gid))
-+ goto error;
-+
- if (gid != old_fsgid) {
- new->fsgid = gid;
- goto change_okay;
- }
- }
-
-+error:
- abort_creds(new);
- return old_fsgid;
-
-@@ -1270,19 +1325,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
- return -EFAULT;
-
- down_read(&uts_sem);
-- error = __copy_to_user(&name->sysname, &utsname()->sysname,
-+ error = __copy_to_user(name->sysname, &utsname()->sysname,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
-- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
-+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
-- error |= __copy_to_user(&name->release, &utsname()->release,
-+ error |= __copy_to_user(name->release, &utsname()->release,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->release + __OLD_UTS_LEN);
-- error |= __copy_to_user(&name->version, &utsname()->version,
-+ error |= __copy_to_user(name->version, &utsname()->version,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->version + __OLD_UTS_LEN);
-- error |= __copy_to_user(&name->machine, &utsname()->machine,
-+ error |= __copy_to_user(name->machine, &utsname()->machine,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
- up_read(&uts_sem);
-@@ -1484,6 +1539,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
- */
- new_rlim->rlim_cur = 1;
- }
-+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
-+ is changed to a lower value. Since tasks can be created by the same
-+ user in between this limit change and an execve by this task, force
-+ a recheck only for this task by setting PF_NPROC_EXCEEDED
-+ */
-+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
-+ tsk->flags |= PF_NPROC_EXCEEDED;
- }
- if (!retval) {
- if (old_rlim)
-@@ -1747,7 +1809,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
- error = get_dumpable(me->mm);
- break;
- case PR_SET_DUMPABLE:
-- if (arg2 < 0 || arg2 > 1) {
-+ if (arg2 > 1) {
- error = -EINVAL;
- break;
- }
-@@ -1808,7 +1870,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
- error = prctl_get_seccomp();
- break;
- case PR_SET_SECCOMP:
-- error = prctl_set_seccomp(arg2);
-+ error = prctl_set_seccomp(arg2, (char __user *)arg3);
- break;
- case PR_GET_TSC:
- error = GET_TSC_CTL(arg2);
-@@ -1868,6 +1930,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
- else
- error = PR_MCE_KILL_DEFAULT;
- break;
-+ case PR_SET_NO_NEW_PRIVS:
-+ if (arg2 != 1 || arg3 || arg4 || arg5)
-+ return -EINVAL;
-+
-+ current->no_new_privs = 1;
-+ break;
-+ case PR_GET_NO_NEW_PRIVS:
-+ if (arg2 || arg3 || arg4 || arg5)
-+ return -EINVAL;
-+ return current->no_new_privs ? 1 : 0;
- default:
- error = -EINVAL;
- break;
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index ea7ec7f..da588ba 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -86,6 +86,13 @@
-
-
- #if defined(CONFIG_SYSCTL)
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
-+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
-+ const int op);
-+extern int gr_handle_chroot_sysctl(const int op);
-
- /* External variables not in a header file. */
- extern int sysctl_overcommit_memory;
-@@ -109,21 +116,22 @@ extern int sysctl_nr_trim_pages;
- #ifdef CONFIG_BLOCK
- extern int blk_iopoll_enabled;
- #endif
-+extern int sysctl_modify_ldt;
-
- /* Constants used for minimum and maximum */
- #ifdef CONFIG_LOCKUP_DETECTOR
--static int sixty = 60;
--static int neg_one = -1;
-+static int sixty __read_only = 60;
- #endif
-
--static int zero;
--static int __maybe_unused one = 1;
--static int __maybe_unused two = 2;
--static int __maybe_unused three = 3;
--static unsigned long one_ul = 1;
--static int one_hundred = 100;
-+static int neg_one __read_only = -1;
-+static int zero __read_only = 0;
-+static int __maybe_unused one __read_only = 1;
-+static int __maybe_unused two __read_only = 2;
-+static int __maybe_unused three __read_only = 3;
-+static unsigned long one_ul __read_only = 1;
-+static int one_hundred __read_only = 100;
- #ifdef CONFIG_PRINTK
--static int ten_thousand = 10000;
-+static int ten_thousand __read_only = 10000;
- #endif
-
- /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
-@@ -165,10 +173,13 @@ static int proc_taint(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
- #endif
-
--#ifdef CONFIG_PRINTK
--static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
-+static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
--#endif
-+
-+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos);
-+static int proc_dostring_coredump(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos);
-
- #ifdef CONFIG_MAGIC_SYSRQ
- /* Note: sysrq code uses it's own private copy */
-@@ -191,6 +202,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
- }
-
- #endif
-+extern struct ctl_table grsecurity_table[];
-
- static struct ctl_table root_table[];
- static struct ctl_table_root sysctl_table_root;
-@@ -220,6 +232,20 @@ extern struct ctl_table epoll_table[];
- int sysctl_legacy_va_layout;
- #endif
-
-+#ifdef CONFIG_PAX_SOFTMODE
-+static ctl_table pax_table[] = {
-+ {
-+ .procname = "softmode",
-+ .data = &pax_softmode,
-+ .maxlen = sizeof(unsigned int),
-+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
-+ },
-+
-+ { }
-+};
-+#endif
-+
- /* The default sysctl tables: */
-
- static struct ctl_table root_table[] = {
-@@ -266,6 +292,22 @@ static int max_extfrag_threshold = 1000;
- #endif
-
- static struct ctl_table kern_table[] = {
-+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
-+ {
-+ .procname = "grsecurity",
-+ .mode = 0500,
-+ .child = grsecurity_table,
-+ },
-+#endif
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+ {
-+ .procname = "pax",
-+ .mode = 0500,
-+ .child = pax_table,
-+ },
-+#endif
-+
- {
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
-@@ -420,7 +462,7 @@ static struct ctl_table kern_table[] = {
- .data = core_pattern,
- .maxlen = CORENAME_MAX_SIZE,
- .mode = 0644,
-- .proc_handler = proc_dostring,
-+ .proc_handler = proc_dostring_coredump,
- },
- {
- .procname = "core_pipe_limit",
-@@ -550,7 +592,7 @@ static struct ctl_table kern_table[] = {
- .data = &modprobe_path,
- .maxlen = KMOD_PATH_LEN,
- .mode = 0644,
-- .proc_handler = proc_dostring,
-+ .proc_handler = proc_dostring_modpriv,
- },
- {
- .procname = "modules_disabled",
-@@ -558,7 +600,7 @@ static struct ctl_table kern_table[] = {
- .maxlen = sizeof(int),
- .mode = 0644,
- /* only handle a transition from default "0" to "1" */
-- .proc_handler = proc_dointvec_minmax,
-+ .proc_handler = proc_dointvec_minmax_secure,
- .extra1 = &one,
- .extra2 = &one,
- },
-@@ -713,20 +755,24 @@ static struct ctl_table kern_table[] = {
- .data = &dmesg_restrict,
- .maxlen = sizeof(int),
- .mode = 0644,
-- .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
- .extra1 = &zero,
- .extra2 = &one,
- },
-+#endif
- {
- .procname = "kptr_restrict",
- .data = &kptr_restrict,
- .maxlen = sizeof(int),
- .mode = 0644,
-- .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ .extra1 = &two,
-+#else
- .extra1 = &zero,
-+#endif
- .extra2 = &two,
- },
--#endif
- {
- .procname = "ngroups_max",
- .data = &ngroups_max,
-@@ -831,6 +877,15 @@ static struct ctl_table kern_table[] = {
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-+ {
-+ .procname = "modify_ldt",
-+ .data = &sysctl_modify_ldt,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
- #endif
- #if defined(CONFIG_MMU)
- {
-@@ -957,10 +1012,17 @@ static struct ctl_table kern_table[] = {
- */
- {
- .procname = "perf_event_paranoid",
-- .data = &sysctl_perf_event_paranoid,
-- .maxlen = sizeof(sysctl_perf_event_paranoid),
-+ .data = &sysctl_perf_event_legitimately_concerned,
-+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
- .mode = 0644,
-- .proc_handler = proc_dointvec,
-+ /* go ahead, be a hero */
-+ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
-+ .extra1 = &neg_one,
-+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
-+ .extra2 = &three,
-+#else
-+ .extra2 = &two,
-+#endif
- },
- {
- .procname = "perf_event_mlock_kb",
-@@ -1216,6 +1278,13 @@ static struct ctl_table vm_table[] = {
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
-+ {
-+ .procname = "heap_stack_gap",
-+ .data = &sysctl_heap_stack_gap,
-+ .maxlen = sizeof(sysctl_heap_stack_gap),
-+ .mode = 0644,
-+ .proc_handler = proc_doulongvec_minmax,
-+ },
- #else
- {
- .procname = "nr_trim_pages",
-@@ -1499,7 +1568,7 @@ static struct ctl_table fs_table[] = {
- .data = &suid_dumpable,
- .maxlen = sizeof(int),
- .mode = 0644,
-- .proc_handler = proc_dointvec_minmax,
-+ .proc_handler = proc_dointvec_minmax_coredump,
- .extra1 = &zero,
- .extra2 = &two,
- },
-@@ -1720,6 +1789,17 @@ static int test_perm(int mode, int op)
- int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
- {
- int mode;
-+ int error;
-+
-+ if (table->parent != NULL && table->parent->procname != NULL &&
-+ table->procname != NULL &&
-+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
-+ return -EACCES;
-+ if (gr_handle_chroot_sysctl(op))
-+ return -EACCES;
-+ error = gr_handle_sysctl(table, op);
-+ if (error)
-+ return error;
-
- if (root->permissions)
- mode = root->permissions(root, current->nsproxy, table);
-@@ -1732,7 +1812,9 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
- static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
- {
- for (; table->procname; table++) {
-- table->parent = parent;
-+ pax_open_kernel();
-+ *(void **)&table->parent = (ctl_table_no_const *)parent;
-+ pax_close_kernel();
- if (table->child)
- sysctl_set_parent(table, table->child);
- }
-@@ -1856,7 +1938,8 @@ struct ctl_table_header *__register_sysctl_paths(
- const struct ctl_path *path, struct ctl_table *table)
- {
- struct ctl_table_header *header;
-- struct ctl_table *new, **prevp;
-+ struct ctl_table **prevp;
-+ ctl_table_no_const *new;
- unsigned int n, npath;
- struct ctl_table_set *set;
-
-@@ -1877,7 +1960,7 @@ struct ctl_table_header *__register_sysctl_paths(
- if (!header)
- return NULL;
-
-- new = (struct ctl_table *) (header + 1);
-+ new = (ctl_table_no_const *) (header + 1);
-
- /* Now connect the dots */
- prevp = &header->ctl_table;
-@@ -2124,6 +2207,16 @@ int proc_dostring(struct ctl_table *table, int write,
- buffer, lenp, ppos);
- }
-
-+int proc_dostring_modpriv(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ if (write && !capable(CAP_SYS_MODULE))
-+ return -EPERM;
-+
-+ return _proc_do_string(table->data, table->maxlen, write,
-+ buffer, lenp, ppos);
-+}
-+
- static size_t proc_skip_spaces(char **buf)
- {
- size_t ret;
-@@ -2229,6 +2322,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
- len = strlen(tmp);
- if (len > *size)
- len = *size;
-+ if (len > sizeof(tmp))
-+ len = sizeof(tmp);
- if (copy_to_user(*buf, tmp, len))
- return -EFAULT;
- *size -= len;
-@@ -2386,6 +2481,44 @@ int proc_dointvec(struct ctl_table *table, int write,
- NULL,NULL);
- }
-
-+static int do_proc_dointvec_conv_secure(bool *negp, unsigned long *lvalp,
-+ int *valp,
-+ int write, void *data)
-+{
-+ if (write) {
-+ if (*negp) {
-+ if (*lvalp > (unsigned long) INT_MAX + 1)
-+ return -EINVAL;
-+ pax_open_kernel();
-+ *valp = -*lvalp;
-+ pax_close_kernel();
-+ } else {
-+ if (*lvalp > (unsigned long) INT_MAX)
-+ return -EINVAL;
-+ pax_open_kernel();
-+ *valp = *lvalp;
-+ pax_close_kernel();
-+ }
-+ } else {
-+ int val = *valp;
-+ if (val < 0) {
-+ *negp = true;
-+ *lvalp = (unsigned long)-val;
-+ } else {
-+ *negp = false;
-+ *lvalp = (unsigned long)val;
-+ }
-+ }
-+ return 0;
-+}
-+
-+int proc_dointvec_secure(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ return do_proc_dointvec(table,write,buffer,lenp,ppos,
-+ do_proc_dointvec_conv_secure,NULL);
-+}
-+
- /*
- * Taint values can only be increased
- * This means we can safely use a temporary.
-@@ -2393,7 +2526,7 @@ int proc_dointvec(struct ctl_table *table, int write,
- static int proc_taint(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table t;
-+ ctl_table_no_const t;
- unsigned long tmptaint = get_taint();
- int err;
-
-@@ -2421,16 +2554,14 @@ static int proc_taint(struct ctl_table *table, int write,
- return err;
- }
-
--#ifdef CONFIG_PRINTK
--static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
-+static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
-- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-+ return proc_dointvec_minmax_secure(table, write, buffer, lenp, ppos);
- }
--#endif
-
- struct do_proc_dointvec_minmax_conv_param {
- int *min;
-@@ -2461,6 +2592,32 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
- return 0;
- }
-
-+static int do_proc_dointvec_minmax_conv_secure(bool *negp, unsigned long *lvalp,
-+ int *valp,
-+ int write, void *data)
-+{
-+ struct do_proc_dointvec_minmax_conv_param *param = data;
-+ if (write) {
-+ int val = *negp ? -*lvalp : *lvalp;
-+ if ((param->min && *param->min > val) ||
-+ (param->max && *param->max < val))
-+ return -EINVAL;
-+ pax_open_kernel();
-+ *valp = val;
-+ pax_close_kernel();
-+ } else {
-+ int val = *valp;
-+ if (val < 0) {
-+ *negp = true;
-+ *lvalp = (unsigned long)-val;
-+ } else {
-+ *negp = false;
-+ *lvalp = (unsigned long)val;
-+ }
-+ }
-+ return 0;
-+}
-+
- /**
- * proc_dointvec_minmax - read a vector of integers with min/max values
- * @table: the sysctl table
-@@ -2488,6 +2645,45 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
- do_proc_dointvec_minmax_conv, &param);
- }
-
-+int proc_dointvec_minmax_secure(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ struct do_proc_dointvec_minmax_conv_param param = {
-+ .min = (int *) table->extra1,
-+ .max = (int *) table->extra2,
-+ };
-+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
-+ do_proc_dointvec_minmax_conv_secure, &param);
-+}
-+
-+static void validate_coredump_safety(void)
-+{
-+ if (suid_dumpable == SUID_DUMPABLE_SAFE &&
-+ core_pattern[0] != '/' && core_pattern[0] != '|') {
-+ printk(KERN_WARNING "Unsafe core_pattern used with "\
-+ "suid_dumpable=2. Pipe handler or fully qualified "\
-+ "core dump path required.\n");
-+ }
-+}
-+
-+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-+ if (!error)
-+ validate_coredump_safety();
-+ return error;
-+}
-+
-+static int proc_dostring_coredump(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ int error = proc_dostring(table, write, buffer, lenp, ppos);
-+ if (!error)
-+ validate_coredump_safety();
-+ return error;
-+}
-+
- static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos,
-@@ -2545,8 +2741,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
- *i = val;
- } else {
- val = convdiv * (*i) / convmul;
-- if (!first)
-+ if (!first) {
- err = proc_put_char(&buffer, &left, '\t');
-+ if (err)
-+ break;
-+ }
- err = proc_put_long(&buffer, &left, val, false);
- if (err)
- break;
-@@ -2941,6 +3140,12 @@ int proc_dostring(struct ctl_table *table, int write,
- return -ENOSYS;
- }
-
-+int proc_dostring_modpriv(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ return -ENOSYS;
-+}
-+
- int proc_dointvec(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-@@ -2997,6 +3202,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
- EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
- EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
- EXPORT_SYMBOL(proc_dostring);
-+EXPORT_SYMBOL(proc_dostring_modpriv);
- EXPORT_SYMBOL(proc_doulongvec_minmax);
- EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
- EXPORT_SYMBOL(register_sysctl_table);
-diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
-index 9f9aa32..d0c4f42 100644
---- a/kernel/sysctl_binary.c
-+++ b/kernel/sysctl_binary.c
-@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
- int i;
-
- set_fs(KERNEL_DS);
-- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
-+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out_kfree;
-@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
- }
-
- set_fs(KERNEL_DS);
-- result = vfs_write(file, buffer, str - buffer, &pos);
-+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out_kfree;
-@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
- int i;
-
- set_fs(KERNEL_DS);
-- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
-+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out_kfree;
-@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
- }
-
- set_fs(KERNEL_DS);
-- result = vfs_write(file, buffer, str - buffer, &pos);
-+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out_kfree;
-@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
- int i;
-
- set_fs(KERNEL_DS);
-- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
-+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out;
-@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
- __le16 dnaddr;
-
- set_fs(KERNEL_DS);
-- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
-+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out;
-@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
- le16_to_cpu(dnaddr) & 0x3ff);
-
- set_fs(KERNEL_DS);
-- result = vfs_write(file, buf, len, &pos);
-+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
- set_fs(old_fs);
- if (result < 0)
- goto out;
-diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
-index 362da65..ab8ef8c 100644
---- a/kernel/sysctl_check.c
-+++ b/kernel/sysctl_check.c
-@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
- set_fail(&fail, table, "Directory with extra2");
- } else {
- if ((table->proc_handler == proc_dostring) ||
-+ (table->proc_handler == proc_dostring_modpriv) ||
- (table->proc_handler == proc_dointvec) ||
- (table->proc_handler == proc_dointvec_minmax) ||
- (table->proc_handler == proc_dointvec_jiffies) ||
-diff --git a/kernel/taskstats.c b/kernel/taskstats.c
-index e660464..c8b9e67 100644
---- a/kernel/taskstats.c
-+++ b/kernel/taskstats.c
-@@ -27,9 +27,12 @@
- #include <linux/cgroup.h>
- #include <linux/fs.h>
- #include <linux/file.h>
-+#include <linux/grsecurity.h>
- #include <net/genetlink.h>
- #include <linux/atomic.h>
-
-+extern int gr_is_taskstats_denied(int pid);
-+
- /*
- * Maximum length of a cpumask that can be specified in
- * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
-@@ -556,6 +559,9 @@ err:
-
- static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
- {
-+ if (gr_is_taskstats_denied(current->pid))
-+ return -EACCES;
-+
- if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
- return cmd_attr_register_cpumask(info);
- else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
-diff --git a/kernel/time.c b/kernel/time.c
-index f64e88b..9406590 100644
---- a/kernel/time.c
-+++ b/kernel/time.c
-@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
- return error;
-
- if (tz) {
-+ /* we log in do_settimeofday called below, so don't log twice
-+ */
-+ if (!tv)
-+ gr_log_timechange();
-+
- /* SMP safe, global irq locking makes it work. */
- sys_tz = *tz;
- update_vsyscall_tz();
-diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
-index 7eaf162..e2615e7 100644
---- a/kernel/time/alarmtimer.c
-+++ b/kernel/time/alarmtimer.c
-@@ -807,7 +807,7 @@ static int __init alarmtimer_init(void)
- struct platform_device *pdev;
- int error = 0;
- int i;
-- struct k_clock alarm_clock = {
-+ static struct k_clock alarm_clock = {
- .clock_getres = alarm_clock_getres,
- .clock_get = alarm_clock_get,
- .timer_create = alarm_timer_create,
-diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
-index c3509fb..ebec319 100644
---- a/kernel/time/tick-broadcast.c
-+++ b/kernel/time/tick-broadcast.c
-@@ -120,7 +120,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
- * then clear the broadcast bit.
- */
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
-- int cpu = smp_processor_id();
-+ cpu = smp_processor_id();
-
- cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
- tick_broadcast_clear_oneshot(cpu);
-diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 068c092..13b1bff 100644
---- a/kernel/time/timekeeping.c
-+++ b/kernel/time/timekeeping.c
-@@ -14,6 +14,7 @@
- #include <linux/init.h>
- #include <linux/mm.h>
- #include <linux/sched.h>
-+#include <linux/grsecurity.h>
- #include <linux/syscore_ops.h>
- #include <linux/clocksource.h>
- #include <linux/jiffies.h>
-@@ -385,6 +386,8 @@ int do_settimeofday(const struct timespec *tv)
- if (!timespec_valid_strict(tv))
- return -EINVAL;
-
-+ gr_log_timechange();
-+
- write_seqlock_irqsave(&xtime_lock, flags);
-
- timekeeping_forward_now();
-diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
-index 3258455..f35227d 100644
---- a/kernel/time/timer_list.c
-+++ b/kernel/time/timer_list.c
-@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
-
- static void print_name_offset(struct seq_file *m, void *sym)
- {
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ SEQ_printf(m, "<%p>", NULL);
-+#else
- char symname[KSYM_NAME_LEN];
-
- if (lookup_symbol_name((unsigned long)sym, symname) < 0)
- SEQ_printf(m, "<%pK>", sym);
- else
- SEQ_printf(m, "%s", symname);
-+#endif
- }
-
- static void
-@@ -112,7 +116,11 @@ next_one:
- static void
- print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
- {
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ SEQ_printf(m, " .base: %p\n", NULL);
-+#else
- SEQ_printf(m, " .base: %pK\n", base);
-+#endif
- SEQ_printf(m, " .index: %d\n",
- base->index);
- SEQ_printf(m, " .resolution: %Lu nsecs\n",
-@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
- {
- struct proc_dir_entry *pe;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
-+#else
- pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
-+#endif
- if (!pe)
- return -ENOMEM;
- return 0;
-diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
-index 0b537f2..40d6c20 100644
---- a/kernel/time/timer_stats.c
-+++ b/kernel/time/timer_stats.c
-@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
- static unsigned long nr_entries;
- static struct entry entries[MAX_ENTRIES];
-
--static atomic_t overflow_count;
-+static atomic_unchecked_t overflow_count;
-
- /*
- * The entries are in a hash-table, for fast lookup:
-@@ -140,7 +140,7 @@ static void reset_entries(void)
- nr_entries = 0;
- memset(entries, 0, sizeof(entries));
- memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
-- atomic_set(&overflow_count, 0);
-+ atomic_set_unchecked(&overflow_count, 0);
- }
-
- static struct entry *alloc_entry(void)
-@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- if (likely(entry))
- entry->count++;
- else
-- atomic_inc(&overflow_count);
-+ atomic_inc_unchecked(&overflow_count);
-
- out_unlock:
- raw_spin_unlock_irqrestore(lock, flags);
-@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-
- static void print_name_offset(struct seq_file *m, unsigned long addr)
- {
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ seq_printf(m, "<%p>", NULL);
-+#else
- char symname[KSYM_NAME_LEN];
-
- if (lookup_symbol_name(addr, symname) < 0)
-- seq_printf(m, "<%p>", (void *)addr);
-+ seq_printf(m, "<%pK>", (void *)addr);
- else
- seq_printf(m, "%s", symname);
-+#endif
- }
-
- static int tstats_show(struct seq_file *m, void *v)
-@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
-
- seq_puts(m, "Timer Stats Version: v0.2\n");
- seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
-- if (atomic_read(&overflow_count))
-+ if (atomic_read_unchecked(&overflow_count))
- seq_printf(m, "Overflow: %d entries\n",
-- atomic_read(&overflow_count));
-+ atomic_read_unchecked(&overflow_count));
-
- for (i = 0; i < nr_entries; i++) {
- entry = entries + i;
-@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
- {
- struct proc_dir_entry *pe;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
-+#else
- pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
-+#endif
- if (!pe)
- return -ENOMEM;
- return 0;
-diff --git a/kernel/timer.c b/kernel/timer.c
-index 349953e..6262b04 100644
---- a/kernel/timer.c
-+++ b/kernel/timer.c
-@@ -1308,7 +1308,7 @@ void update_process_times(int user_tick)
- /*
- * This function runs timers and the timer-tq in bottom half context.
- */
--static void run_timer_softirq(struct softirq_action *h)
-+static __latent_entropy void run_timer_softirq(void)
- {
- struct tvec_base *base = __this_cpu_read(tvec_bases);
-
-@@ -1435,7 +1435,7 @@ static void process_timeout(unsigned long __data)
- *
- * In all cases the return value is guaranteed to be non-negative.
- */
--signed long __sched schedule_timeout(signed long timeout)
-+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
- {
- struct timer_list timer;
- unsigned long expire;
-@@ -1727,7 +1727,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata timers_nb = {
-+static struct notifier_block timers_nb = {
- .notifier_call = timer_cpu_notify,
- };
-
-diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
-index 92cac05..89f0de9 100644
---- a/kernel/trace/blktrace.c
-+++ b/kernel/trace/blktrace.c
-@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
- struct blk_trace *bt = filp->private_data;
- char buf[16];
-
-- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
-+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
-
- return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- }
-@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
- return 1;
-
- bt = buf->chan->private_data;
-- atomic_inc(&bt->dropped);
-+ atomic_inc_unchecked(&bt->dropped);
- return 0;
- }
-
-@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
-
- bt->dir = dir;
- bt->dev = dev;
-- atomic_set(&bt->dropped, 0);
-+ atomic_set_unchecked(&bt->dropped, 0);
-
- ret = -EIO;
- bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
-diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 08e043b..a000091 100644
---- a/kernel/trace/ftrace.c
-+++ b/kernel/trace/ftrace.c
-@@ -1616,12 +1616,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
- if (unlikely(ftrace_disabled))
- return 0;
-
-+ ret = ftrace_arch_code_modify_prepare();
-+ FTRACE_WARN_ON(ret);
-+ if (ret)
-+ return 0;
-+
- ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
-+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
- if (ret) {
- ftrace_bug(ret, ip);
-- return 0;
- }
-- return 1;
-+ return ret ? 0 : 1;
- }
-
- /*
-@@ -2713,7 +2718,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
-
- int
- register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
-- void *data)
-+ void *data)
- {
- struct ftrace_func_probe *entry;
- struct ftrace_page *pg;
-@@ -4062,8 +4067,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
-
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
--static struct notifier_block ftrace_suspend_notifier;
--
- int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
- {
- return 0;
-@@ -4105,7 +4108,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
-
- if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
-- atomic_set(&t->trace_overrun, 0);
-+ atomic_set_unchecked(&t->trace_overrun, 0);
- t->curr_ret_stack = -1;
- /* Make sure the tasks see the -1 first: */
- smp_wmb();
-@@ -4208,6 +4211,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
- return NOTIFY_DONE;
- }
-
-+static struct notifier_block ftrace_suspend_notifier = {
-+ .notifier_call = ftrace_suspend_notifier_call
-+};
-+
- /* Just a place holder for function graph */
- static struct ftrace_ops fgraph_ops __read_mostly = {
- .func = ftrace_stub,
-@@ -4251,7 +4258,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- goto out;
- }
-
-- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
- register_pm_notifier(&ftrace_suspend_notifier);
-
- ftrace_graph_active++;
-@@ -4305,7 +4311,7 @@ static void
- graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
- {
- atomic_set(&t->tracing_graph_pause, 0);
-- atomic_set(&t->trace_overrun, 0);
-+ atomic_set_unchecked(&t->trace_overrun, 0);
- t->ftrace_timestamp = 0;
- /* make curr_ret_stack visible before we add the ret_stack */
- smp_wmb();
-diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index b252661..c3a5113 100644
---- a/kernel/trace/ring_buffer.c
-+++ b/kernel/trace/ring_buffer.c
-@@ -376,9 +376,9 @@ struct buffer_data_page {
- */
- struct buffer_page {
- struct list_head list; /* list of buffer pages */
-- local_t write; /* index for next write */
-+ local_unchecked_t write; /* index for next write */
- unsigned read; /* index for next read */
-- local_t entries; /* entries on this page */
-+ local_unchecked_t entries; /* entries on this page */
- unsigned long real_end; /* real end of data */
- struct buffer_data_page *page; /* Actual data page */
- };
-@@ -489,11 +489,11 @@ struct ring_buffer_per_cpu {
- unsigned long lost_events;
- unsigned long last_overrun;
- local_t entries_bytes;
-- local_t commit_overrun;
-- local_t overrun;
-+ local_unchecked_t commit_overrun;
-+ local_unchecked_t overrun;
- local_t entries;
- local_t committing;
-- local_t commits;
-+ local_unchecked_t commits;
- unsigned long read;
- unsigned long read_bytes;
- u64 write_stamp;
-@@ -884,8 +884,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
- *
- * We add a counter to the write field to denote this.
- */
-- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
-- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
-+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
-+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
-
- /*
- * Just make sure we have seen our old_write and synchronize
-@@ -913,8 +913,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
- * cmpxchg to only update if an interrupt did not already
- * do it for us. If the cmpxchg fails, we don't care.
- */
-- (void)local_cmpxchg(&next_page->write, old_write, val);
-- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
-+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
-+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
-
- /*
- * No need to worry about races with clearing out the commit.
-@@ -1481,7 +1481,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
-
- static inline unsigned long rb_page_write(struct buffer_page *bpage)
- {
-- return local_read(&bpage->write) & RB_WRITE_MASK;
-+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
- }
-
- static inline unsigned rb_page_commit(struct buffer_page *bpage)
-@@ -1491,7 +1491,7 @@ static inline unsigned rb_page_commit(struct buffer_page *bpage)
-
- static inline unsigned long rb_page_entries(struct buffer_page *bpage)
- {
-- return local_read(&bpage->entries) & RB_WRITE_MASK;
-+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
- }
-
- /* Size is determined by what has been committed */
-@@ -1709,7 +1709,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
- * it is our responsibility to update
- * the counters.
- */
-- local_add(entries, &cpu_buffer->overrun);
-+ local_add_unchecked(entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
-
- /*
-@@ -1859,7 +1859,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
- if (tail == BUF_PAGE_SIZE)
- tail_page->real_end = 0;
-
-- local_sub(length, &tail_page->write);
-+ local_sub_unchecked(length, &tail_page->write);
- return;
- }
-
-@@ -1894,7 +1894,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
- rb_event_set_padding(event);
-
- /* Set the write back to the previous setting */
-- local_sub(length, &tail_page->write);
-+ local_sub_unchecked(length, &tail_page->write);
- return;
- }
-
-@@ -1906,7 +1906,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
-
- /* Set write to end of buffer */
- length = (tail + length) - BUF_PAGE_SIZE;
-- local_sub(length, &tail_page->write);
-+ local_sub_unchecked(length, &tail_page->write);
- }
-
- /*
-@@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
- * about it.
- */
- if (unlikely(next_page == commit_page)) {
-- local_inc(&cpu_buffer->commit_overrun);
-+ local_inc_unchecked(&cpu_buffer->commit_overrun);
- goto out_reset;
- }
-
-@@ -1986,7 +1986,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
- cpu_buffer->tail_page) &&
- (cpu_buffer->commit_page ==
- cpu_buffer->reader_page))) {
-- local_inc(&cpu_buffer->commit_overrun);
-+ local_inc_unchecked(&cpu_buffer->commit_overrun);
- goto out_reset;
- }
- }
-@@ -2034,7 +2034,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
- length += RB_LEN_TIME_EXTEND;
-
- tail_page = cpu_buffer->tail_page;
-- write = local_add_return(length, &tail_page->write);
-+ write = local_add_return_unchecked(length, &tail_page->write);
-
- /* set write to only the index of the write */
- write &= RB_WRITE_MASK;
-@@ -2058,7 +2058,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
- kmemcheck_annotate_bitfield(event, bitfield);
- rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
-
-- local_inc(&tail_page->entries);
-+ local_inc_unchecked(&tail_page->entries);
-
- /*
- * If this is the first commit on the page, then update
-@@ -2091,7 +2091,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
-
- if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
- unsigned long write_mask =
-- local_read(&bpage->write) & ~RB_WRITE_MASK;
-+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
- unsigned long event_length = rb_event_length(event);
- /*
- * This is on the tail page. It is possible that
-@@ -2101,7 +2101,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
- */
- old_index += write_mask;
- new_index += write_mask;
-- index = local_cmpxchg(&bpage->write, old_index, new_index);
-+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
- if (index == old_index) {
- /* update counters */
- local_sub(event_length, &cpu_buffer->entries_bytes);
-@@ -2116,7 +2116,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
- static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
- {
- local_inc(&cpu_buffer->committing);
-- local_inc(&cpu_buffer->commits);
-+ local_inc_unchecked(&cpu_buffer->commits);
- }
-
- static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
-@@ -2128,7 +2128,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
- return;
-
- again:
-- commits = local_read(&cpu_buffer->commits);
-+ commits = local_read_unchecked(&cpu_buffer->commits);
- /* synchronize with interrupts */
- barrier();
- if (local_read(&cpu_buffer->committing) == 1)
-@@ -2144,7 +2144,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
- * updating of the commit page and the clearing of the
- * committing counter.
- */
-- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
-+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
- !local_read(&cpu_buffer->committing)) {
- local_inc(&cpu_buffer->committing);
- goto again;
-@@ -2174,7 +2174,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
- barrier();
- if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
- local_dec(&cpu_buffer->committing);
-- local_dec(&cpu_buffer->commits);
-+ local_dec_unchecked(&cpu_buffer->commits);
- return NULL;
- }
- #endif
-@@ -2440,7 +2440,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
-
- /* Do the likely case first */
- if (likely(bpage->page == (void *)addr)) {
-- local_dec(&bpage->entries);
-+ local_dec_unchecked(&bpage->entries);
- return;
- }
-
-@@ -2452,7 +2452,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
- start = bpage;
- do {
- if (bpage->page == (void *)addr) {
-- local_dec(&bpage->entries);
-+ local_dec_unchecked(&bpage->entries);
- return;
- }
- rb_inc_page(cpu_buffer, &bpage);
-@@ -2677,7 +2677,7 @@ static inline unsigned long
- rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
- {
- return local_read(&cpu_buffer->entries) -
-- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
-+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
- }
-
- /**
-@@ -2765,7 +2765,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
- return 0;
-
- cpu_buffer = buffer->buffers[cpu];
-- ret = local_read(&cpu_buffer->overrun);
-+ ret = local_read_unchecked(&cpu_buffer->overrun);
-
- return ret;
- }
-@@ -2786,7 +2786,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
- return 0;
-
- cpu_buffer = buffer->buffers[cpu];
-- ret = local_read(&cpu_buffer->commit_overrun);
-+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
-
- return ret;
- }
-@@ -2831,7 +2831,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
- /* if you care about this being correct, lock the buffer */
- for_each_buffer_cpu(buffer, cpu) {
- cpu_buffer = buffer->buffers[cpu];
-- overruns += local_read(&cpu_buffer->overrun);
-+ overruns += local_read_unchecked(&cpu_buffer->overrun);
- }
-
- return overruns;
-@@ -2998,8 +2998,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
- /*
- * Reset the reader page to size zero.
- */
-- local_set(&cpu_buffer->reader_page->write, 0);
-- local_set(&cpu_buffer->reader_page->entries, 0);
-+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
-+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
- local_set(&cpu_buffer->reader_page->page->commit, 0);
- cpu_buffer->reader_page->real_end = 0;
-
-@@ -3033,7 +3033,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
- * want to compare with the last_overrun.
- */
- smp_mb();
-- overwrite = local_read(&(cpu_buffer->overrun));
-+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
-
- /*
- * Here's the tricky part.
-@@ -3583,8 +3583,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
-
- cpu_buffer->head_page
- = list_entry(cpu_buffer->pages, struct buffer_page, list);
-- local_set(&cpu_buffer->head_page->write, 0);
-- local_set(&cpu_buffer->head_page->entries, 0);
-+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
-+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
- local_set(&cpu_buffer->head_page->page->commit, 0);
-
- cpu_buffer->head_page->read = 0;
-@@ -3593,17 +3593,17 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
- cpu_buffer->commit_page = cpu_buffer->head_page;
-
- INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
-- local_set(&cpu_buffer->reader_page->write, 0);
-- local_set(&cpu_buffer->reader_page->entries, 0);
-+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
-+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
- local_set(&cpu_buffer->reader_page->page->commit, 0);
- cpu_buffer->reader_page->read = 0;
-
-- local_set(&cpu_buffer->commit_overrun, 0);
-+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
- local_set(&cpu_buffer->entries_bytes, 0);
-- local_set(&cpu_buffer->overrun, 0);
-+ local_set_unchecked(&cpu_buffer->overrun, 0);
- local_set(&cpu_buffer->entries, 0);
- local_set(&cpu_buffer->committing, 0);
-- local_set(&cpu_buffer->commits, 0);
-+ local_set_unchecked(&cpu_buffer->commits, 0);
- cpu_buffer->read = 0;
- cpu_buffer->read_bytes = 0;
-
-@@ -3998,8 +3998,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
- rb_init_page(bpage);
- bpage = reader->page;
- reader->page = *data_page;
-- local_set(&reader->write, 0);
-- local_set(&reader->entries, 0);
-+ local_set_unchecked(&reader->write, 0);
-+ local_set_unchecked(&reader->entries, 0);
- reader->read = 0;
- *data_page = bpage;
-
-diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 0c348a6..454324b 100644
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -2656,7 +2656,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
- return 0;
- }
-
--int set_tracer_flag(unsigned int mask, int enabled)
-+int set_tracer_flag(unsigned long mask, int enabled)
- {
- /* do nothing if flag is already set */
- if (!!(trace_flags & mask) == !!enabled)
-@@ -4246,10 +4246,9 @@ static const struct file_operations tracing_dyn_info_fops = {
- };
- #endif
-
--static struct dentry *d_tracer;
--
- struct dentry *tracing_init_dentry(void)
- {
-+ static struct dentry *d_tracer;
- static int once;
-
- if (d_tracer)
-@@ -4269,10 +4268,9 @@ struct dentry *tracing_init_dentry(void)
- return d_tracer;
- }
-
--static struct dentry *d_percpu;
--
- struct dentry *tracing_dentry_percpu(void)
- {
-+ static struct dentry *d_percpu;
- static int once;
- struct dentry *d_tracer;
-
-diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
-index c3c3f6b..7d8dbdc 100644
---- a/kernel/trace/trace.h
-+++ b/kernel/trace/trace.h
-@@ -820,7 +820,7 @@ extern const char *__start___trace_bprintk_fmt[];
- extern const char *__stop___trace_bprintk_fmt[];
-
- int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
--int set_tracer_flag(unsigned int mask, int enabled);
-+int set_tracer_flag(unsigned long mask, int enabled);
-
- #undef FTRACE_ENTRY
- #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
-diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
-index 3947835..8c0488b 100644
---- a/kernel/trace/trace_clock.c
-+++ b/kernel/trace/trace_clock.c
-@@ -114,7 +114,7 @@ u64 notrace trace_clock_global(void)
- return now;
- }
-
--static atomic64_t trace_counter;
-+static atomic64_unchecked_t trace_counter;
-
- /*
- * trace_clock_counter(): simply an atomic counter.
-@@ -123,5 +123,5 @@ static atomic64_t trace_counter;
- */
- u64 notrace trace_clock_counter(void)
- {
-- return atomic64_add_return(1, &trace_counter);
-+ return atomic64_add_return_unchecked(1, &trace_counter);
- }
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 875fed4..7a76cbb 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
- struct ftrace_module_file_ops {
- struct list_head list;
- struct module *mod;
-- struct file_operations id;
-- struct file_operations enable;
-- struct file_operations format;
-- struct file_operations filter;
- };
-
- static struct ftrace_module_file_ops *
-@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
-
- file_ops->mod = mod;
-
-- file_ops->id = ftrace_event_id_fops;
-- file_ops->id.owner = mod;
--
-- file_ops->enable = ftrace_enable_fops;
-- file_ops->enable.owner = mod;
--
-- file_ops->filter = ftrace_event_filter_fops;
-- file_ops->filter.owner = mod;
--
-- file_ops->format = ftrace_event_format_fops;
-- file_ops->format.owner = mod;
-+ pax_open_kernel();
-+ mod->trace_id.owner = mod;
-+ mod->trace_enable.owner = mod;
-+ mod->trace_filter.owner = mod;
-+ mod->trace_format.owner = mod;
-+ pax_close_kernel();
-
- list_add(&file_ops->list, &ftrace_module_file_list);
-
-@@ -1367,8 +1358,8 @@ static void trace_module_add_events(struct module *mod)
-
- for_each_event(call, start, end) {
- __trace_add_event_call(*call, mod,
-- &file_ops->id, &file_ops->enable,
-- &file_ops->filter, &file_ops->format);
-+ &mod->trace_id, &mod->trace_enable,
-+ &mod->trace_filter, &mod->trace_format);
- }
- }
-
-diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
-index b0996c1..9c39703 100644
---- a/kernel/trace/trace_events_filter.c
-+++ b/kernel/trace/trace_events_filter.c
-@@ -1027,6 +1027,9 @@ static void parse_init(struct filter_parse_state *ps,
-
- static char infix_next(struct filter_parse_state *ps)
- {
-+ if (!ps->infix.cnt)
-+ return 0;
-+
- ps->infix.cnt--;
-
- return ps->infix.string[ps->infix.tail++];
-@@ -1042,6 +1045,9 @@ static char infix_peek(struct filter_parse_state *ps)
-
- static void infix_advance(struct filter_parse_state *ps)
- {
-+ if (!ps->infix.cnt)
-+ return;
-+
- ps->infix.cnt--;
- ps->infix.tail++;
- }
-@@ -1343,19 +1349,27 @@ static int check_preds(struct filter_parse_state *ps)
- {
- int n_normal_preds = 0, n_logical_preds = 0;
- struct postfix_elt *elt;
-+ int cnt = 0;
-
- list_for_each_entry(elt, &ps->postfix, list) {
-- if (elt->op == OP_NONE)
-+ if (elt->op == OP_NONE) {
-+ cnt++;
- continue;
-+ }
-
- if (elt->op == OP_AND || elt->op == OP_OR) {
- n_logical_preds++;
-+ cnt--;
- continue;
- }
-+ // OP_NOT is not supported in this kernel, will get
-+ // a reject here when it's backported
-+ cnt--;
- n_normal_preds++;
-+ WARN_ON_ONCE(cnt < 0);
- }
-
-- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
-+ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
- parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
- return -EINVAL;
- }
-diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
-index a7d2a4c..b034c76 100644
---- a/kernel/trace/trace_functions_graph.c
-+++ b/kernel/trace/trace_functions_graph.c
-@@ -108,7 +108,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
-
- /* The return trace stack is full */
- if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-- atomic_inc(&current->trace_overrun);
-+ atomic_inc_unchecked(&current->trace_overrun);
- return -EBUSY;
- }
-
-@@ -171,7 +171,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
- *ret = current->ret_stack[index].ret;
- trace->func = current->ret_stack[index].func;
- trace->calltime = current->ret_stack[index].calltime;
-- trace->overrun = atomic_read(&current->trace_overrun);
-+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
- trace->depth = index;
- }
-
-diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
-index 00d527c..7c5b1a3 100644
---- a/kernel/trace/trace_kprobe.c
-+++ b/kernel/trace/trace_kprobe.c
-@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- long ret;
- int maxlen = get_rloc_len(*(u32 *)dest);
- u8 *dst = get_rloc_data(dest);
-- u8 *src = addr;
-+ const u8 __user *src = (const u8 __force_user *)addr;
- mm_segment_t old_fs = get_fs();
- if (!maxlen)
- return;
-@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- pagefault_disable();
- do
- ret = __copy_from_user_inatomic(dst++, src++, 1);
-- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
-+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
- dst[-1] = '\0';
- pagefault_enable();
- set_fs(old_fs);
-@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
- ((u8 *)get_rloc_data(dest))[0] = '\0';
- *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
- } else
-- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
-+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
- get_rloc_offs(*(u32 *)dest));
- }
- /* Return the length of string -- including null terminal byte */
-@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
- set_fs(KERNEL_DS);
- pagefault_disable();
- do {
-- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
-+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
- len++;
- } while (c && ret == 0 && len < MAX_STRING_SIZE);
- pagefault_enable();
-diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
-index fd3c8aa..5f324a6 100644
---- a/kernel/trace/trace_mmiotrace.c
-+++ b/kernel/trace/trace_mmiotrace.c
-@@ -24,7 +24,7 @@ struct header_iter {
- static struct trace_array *mmio_trace_array;
- static bool overrun_detected;
- static unsigned long prev_overruns;
--static atomic_t dropped_count;
-+static atomic_unchecked_t dropped_count;
-
- static void mmio_reset_data(struct trace_array *tr)
- {
-@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
-
- static unsigned long count_overruns(struct trace_iterator *iter)
- {
-- unsigned long cnt = atomic_xchg(&dropped_count, 0);
-+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
- unsigned long over = ring_buffer_overruns(iter->tr->buffer);
-
- if (over > prev_overruns)
-@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
- event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
- sizeof(*entry), 0, pc);
- if (!event) {
-- atomic_inc(&dropped_count);
-+ atomic_inc_unchecked(&dropped_count);
- return;
- }
- entry = ring_buffer_event_data(event);
-@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
- event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
- sizeof(*entry), 0, pc);
- if (!event) {
-- atomic_inc(&dropped_count);
-+ atomic_inc_unchecked(&dropped_count);
- return;
- }
- entry = ring_buffer_event_data(event);
-diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index 1dcf253..e1568b3 100644
---- a/kernel/trace/trace_output.c
-+++ b/kernel/trace/trace_output.c
-@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
-
- p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
- if (!IS_ERR(p)) {
-- p = mangle_path(s->buffer + s->len, p, "\n");
-+ p = mangle_path(s->buffer + s->len, p, "\n\\");
- if (p) {
- s->len = p - s->buffer;
- return 1;
-@@ -810,14 +810,16 @@ int register_ftrace_event(struct trace_event *event)
- goto out;
- }
-
-+ pax_open_kernel();
- if (event->funcs->trace == NULL)
-- event->funcs->trace = trace_nop_print;
-+ *(void **)&event->funcs->trace = trace_nop_print;
- if (event->funcs->raw == NULL)
-- event->funcs->raw = trace_nop_print;
-+ *(void **)&event->funcs->raw = trace_nop_print;
- if (event->funcs->hex == NULL)
-- event->funcs->hex = trace_nop_print;
-+ *(void **)&event->funcs->hex = trace_nop_print;
- if (event->funcs->binary == NULL)
-- event->funcs->binary = trace_nop_print;
-+ *(void **)&event->funcs->binary = trace_nop_print;
-+ pax_close_kernel();
-
- key = event->type & (EVENT_HASHSIZE - 1);
-
-diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
-index c5b20a3..6b38c73 100644
---- a/kernel/trace/trace_stack.c
-+++ b/kernel/trace/trace_stack.c
-@@ -66,7 +66,7 @@ check_stack(unsigned long ip, unsigned long *stack)
- return;
-
- /* we do not handle interrupt stacks yet */
-- if (!object_is_on_stack(stack))
-+ if (!object_starts_on_stack(stack))
- return;
-
- local_irq_save(flags);
-diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
-index 1129062..f32b331 100644
---- a/kernel/trace/trace_syscalls.c
-+++ b/kernel/trace/trace_syscalls.c
-@@ -556,6 +556,8 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
- int num;
-
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
-+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
-+ return -EINVAL;
-
- mutex_lock(&syscall_trace_lock);
- if (!sys_perf_refcount_enter)
-@@ -576,6 +578,8 @@ void perf_sysenter_disable(struct ftrace_event_call *call)
- int num;
-
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
-+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
-+ return;
-
- mutex_lock(&syscall_trace_lock);
- sys_perf_refcount_enter--;
-@@ -634,6 +638,8 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
- int num;
-
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
-+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
-+ return -EINVAL;
-
- mutex_lock(&syscall_trace_lock);
- if (!sys_perf_refcount_exit)
-@@ -654,6 +660,8 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
- int num;
-
- num = ((struct syscall_metadata *)call->data)->syscall_nr;
-+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
-+ return;
-
- mutex_lock(&syscall_trace_lock);
- sys_perf_refcount_exit--;
-diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
-index 209b379..7f76423 100644
---- a/kernel/trace/trace_workqueue.c
-+++ b/kernel/trace/trace_workqueue.c
-@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
- int cpu;
- pid_t pid;
- /* Can be inserted from interrupt or user context, need to be atomic */
-- atomic_t inserted;
-+ atomic_unchecked_t inserted;
- /*
- * Don't need to be atomic, works are serialized in a single workqueue thread
- * on a single CPU.
-@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
- spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
- list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
- if (node->pid == wq_thread->pid) {
-- atomic_inc(&node->inserted);
-+ atomic_inc_unchecked(&node->inserted);
- goto found;
- }
- }
-@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
- tsk = get_pid_task(pid, PIDTYPE_PID);
- if (tsk) {
- seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
-- atomic_read(&cws->inserted), cws->executed,
-+ atomic_read_unchecked(&cws->inserted), cws->executed,
- tsk->comm);
- put_task_struct(tsk);
- }
-diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
-index 63da38c..639904e 100644
---- a/kernel/utsname_sysctl.c
-+++ b/kernel/utsname_sysctl.c
-@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
- static int proc_do_uts_string(ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
-- struct ctl_table uts_table;
-+ ctl_table_no_const uts_table;
- int r;
- memcpy(&uts_table, table, sizeof(uts_table));
- uts_table.data = get_uts(table, write);
-diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index a8bc4d9..eae8357 100644
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -574,7 +574,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata cpu_nfb = {
-+static struct notifier_block cpu_nfb = {
- .notifier_call = cpu_callback
- };
-
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 563820c..1548b9b 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -3506,7 +3506,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
- */
- worker_flags |= WORKER_REBIND;
- worker_flags &= ~WORKER_ROGUE;
-- ACCESS_ONCE(worker->flags) = worker_flags;
-+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
-
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
-diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index 82928f5..a3c7bb1 100644
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -510,7 +510,7 @@ config DEBUG_MUTEXES
-
- config DEBUG_LOCK_ALLOC
- bool "Lock debugging: detect incorrect freeing of live locks"
-- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
-+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
- select LOCKDEP
-@@ -524,7 +524,7 @@ config DEBUG_LOCK_ALLOC
-
- config PROVE_LOCKING
- bool "Lock debugging: prove locking correctness"
-- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
-+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
- select LOCKDEP
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
-@@ -616,7 +616,7 @@ config LOCKDEP
-
- config LOCK_STAT
- bool "Lock usage statistics"
-- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
-+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
- select LOCKDEP
- select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
-@@ -1103,6 +1103,7 @@ config LATENCYTOP
- depends on DEBUG_KERNEL
- depends on STACKTRACE_SUPPORT
- depends on PROC_FS
-+ depends on !GRKERNSEC_HIDESYM
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
- select KALLSYMS
- select KALLSYMS_ALL
-@@ -1126,7 +1127,7 @@ source kernel/trace/Kconfig
-
- config PROVIDE_OHCI1394_DMA_INIT
- bool "Remote debugging over FireWire early on boot"
-- depends on PCI && X86
-+ depends on PCI && X86 && !GRKERNSEC
- help
- If you want to debug problems which hang or crash the kernel early
- on boot and the crashing machine has a FireWire port, you can use
-@@ -1155,7 +1156,7 @@ config PROVIDE_OHCI1394_DMA_INIT
-
- config FIREWIRE_OHCI_REMOTE_DMA
- bool "Remote debugging over FireWire with firewire-ohci"
-- depends on FIREWIRE_OHCI
-+ depends on FIREWIRE_OHCI && !GRKERNSEC
- help
- This option lets you use the FireWire bus for remote debugging
- with help of the firewire-ohci driver. It enables unfiltered
-diff --git a/lib/Makefile b/lib/Makefile
-index c06efca..bcafc28 100644
---- a/lib/Makefile
-+++ b/lib/Makefile
-@@ -46,7 +46,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
-
- obj-$(CONFIG_BTREE) += btree.o
- obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
--obj-$(CONFIG_DEBUG_LIST) += list_debug.o
-+obj-y += list_debug.o
- obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
-
- ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
-diff --git a/lib/bitmap.c b/lib/bitmap.c
-index dbc526f..528d2c2 100644
---- a/lib/bitmap.c
-+++ b/lib/bitmap.c
-@@ -423,7 +423,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
- {
- int c, old_c, totaldigits, ndigits, nchunks, nbits;
- u32 chunk;
-- const char __user __force *ubuf = (const char __user __force *)buf;
-+ const char __user *ubuf = (const char __force_user *)buf;
-
- bitmap_zero(maskp, nmaskbits);
-
-@@ -508,7 +508,7 @@ int bitmap_parse_user(const char __user *ubuf,
- {
- if (!access_ok(VERIFY_READ, ubuf, ulen))
- return -EFAULT;
-- return __bitmap_parse((const char __force *)ubuf,
-+ return __bitmap_parse((const char __force_kernel *)ubuf,
- ulen, 1, maskp, nmaskbits);
-
- }
-@@ -600,7 +600,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
- {
- unsigned a, b;
- int c, old_c, totaldigits;
-- const char __user __force *ubuf = (const char __user __force *)buf;
-+ const char __user *ubuf = (const char __force_user *)buf;
- int exp_digit, in_range;
-
- totaldigits = c = 0;
-@@ -700,7 +700,7 @@ int bitmap_parselist_user(const char __user *ubuf,
- {
- if (!access_ok(VERIFY_READ, ubuf, ulen))
- return -EFAULT;
-- return __bitmap_parselist((const char __force *)ubuf,
-+ return __bitmap_parselist((const char __force_kernel *)ubuf,
- ulen, 1, maskp, nmaskbits);
- }
- EXPORT_SYMBOL(bitmap_parselist_user);
-diff --git a/lib/bug.c b/lib/bug.c
-index 1955209..cbbb2ad 100644
---- a/lib/bug.c
-+++ b/lib/bug.c
-@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
- return BUG_TRAP_TYPE_NONE;
-
- bug = find_bug(bugaddr);
-+ if (!bug)
-+ return BUG_TRAP_TYPE_NONE;
-
- file = NULL;
- line = 0;
-diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
-index 4dc2032..7a2a1da 100644
---- a/lib/cpu-notifier-error-inject.c
-+++ b/lib/cpu-notifier-error-inject.c
-@@ -45,7 +45,9 @@ static struct notifier_block err_inject_cpu_notifier = {
-
- static int err_inject_init(void)
- {
-- err_inject_cpu_notifier.priority = priority;
-+ pax_open_kernel();
-+ *(int *)&err_inject_cpu_notifier.priority = priority;
-+ pax_close_kernel();
-
- return register_hotcpu_notifier(&err_inject_cpu_notifier);
- }
-diff --git a/lib/debugobjects.c b/lib/debugobjects.c
-index a78b7c6..2c73084 100644
---- a/lib/debugobjects.c
-+++ b/lib/debugobjects.c
-@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
- if (limit > 4)
- return;
-
-- is_on_stack = object_is_on_stack(addr);
-+ is_on_stack = object_starts_on_stack(addr);
- if (is_on_stack == onstack)
- return;
-
-diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
-index 6a110e2..799667a 100644
---- a/lib/decompress_bunzip2.c
-+++ b/lib/decompress_bunzip2.c
-@@ -666,7 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
-
- /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
- uncompressed data. Allocate intermediate buffer for block. */
-- bd->dbufSize = 100000*(i-BZh0);
-+ i -= BZh0;
-+ bd->dbufSize = 100000 * i;
-
- bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
- if (!bd->dbuf)
-diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
-index 476c65a..b4c50e8 100644
---- a/lib/decompress_unlzma.c
-+++ b/lib/decompress_unlzma.c
-@@ -39,10 +39,10 @@
-
- #define MIN(a, b) (((a) < (b)) ? (a) : (b))
-
--static long long INIT read_int(unsigned char *ptr, int size)
-+static unsigned long long INIT read_int(unsigned char *ptr, int size)
- {
- int i;
-- long long ret = 0;
-+ unsigned long long ret = 0;
-
- for (i = 0; i < size; i++)
- ret = (ret << 8) | ptr[size-i-1];
-diff --git a/lib/devres.c b/lib/devres.c
-index 7c0e953..f642b5c 100644
---- a/lib/devres.c
-+++ b/lib/devres.c
-@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
- void devm_iounmap(struct device *dev, void __iomem *addr)
- {
- WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
-- (void *)addr));
-+ (void __force *)addr));
- iounmap(addr);
- }
- EXPORT_SYMBOL(devm_iounmap);
-@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
- {
- ioport_unmap(addr);
- WARN_ON(devres_destroy(dev, devm_ioport_map_release,
-- devm_ioport_map_match, (void *)addr));
-+ devm_ioport_map_match, (void __force *)addr));
- }
- EXPORT_SYMBOL(devm_ioport_unmap);
-
-diff --git a/lib/div64.c b/lib/div64.c
-index 5b49191..d84e6fc 100644
---- a/lib/div64.c
-+++ b/lib/div64.c
-@@ -58,7 +58,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
- EXPORT_SYMBOL(__div64_32);
-
- #ifndef div_s64_rem
--s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
-+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
- {
- u64 quotient;
-
-@@ -89,7 +89,7 @@ EXPORT_SYMBOL(div_s64_rem);
- * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
- */
- #ifndef div64_u64
--u64 div64_u64(u64 dividend, u64 divisor)
-+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
- {
- u32 high = divisor >> 32;
- u64 quot;
-diff --git a/lib/dma-debug.c b/lib/dma-debug.c
-index fea790a..3bdd6b4 100644
---- a/lib/dma-debug.c
-+++ b/lib/dma-debug.c
-@@ -760,7 +760,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
-
- void dma_debug_add_bus(struct bus_type *bus)
- {
-- struct notifier_block *nb;
-+ notifier_block_no_const *nb;
-
- if (global_disable)
- return;
-@@ -925,7 +925,7 @@ out:
-
- static void check_for_stack(struct device *dev, void *addr)
- {
-- if (object_is_on_stack(addr))
-+ if (object_starts_on_stack(addr))
- err_printk(dev, NULL, "DMA-API: device driver maps memory from"
- "stack [addr=%p]\n", addr);
- }
-diff --git a/lib/extable.c b/lib/extable.c
-index 4cac81e..63e9b8f 100644
---- a/lib/extable.c
-+++ b/lib/extable.c
-@@ -13,6 +13,7 @@
- #include <linux/init.h>
- #include <linux/sort.h>
- #include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-
- #ifndef ARCH_HAS_SORT_EXTABLE
- /*
-@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
- void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish)
- {
-+ pax_open_kernel();
- sort(start, finish - start, sizeof(struct exception_table_entry),
- cmp_ex, NULL);
-+ pax_close_kernel();
- }
-
- #ifdef CONFIG_MODULES
-diff --git a/lib/inflate.c b/lib/inflate.c
-index 013a761..c28f3fc 100644
---- a/lib/inflate.c
-+++ b/lib/inflate.c
-@@ -269,7 +269,7 @@ static void free(void *where)
- malloc_ptr = free_mem_ptr;
- }
- #else
--#define malloc(a) kmalloc(a, GFP_KERNEL)
-+#define malloc(a) kmalloc((a), GFP_KERNEL)
- #define free(a) kfree(a)
- #endif
-
-diff --git a/lib/ioremap.c b/lib/ioremap.c
-index da4e2ad..6373b5f 100644
---- a/lib/ioremap.c
-+++ b/lib/ioremap.c
-@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long next;
-
- phys_addr -= addr;
-- pmd = pmd_alloc(&init_mm, pud, addr);
-+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
-@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
- unsigned long next;
-
- phys_addr -= addr;
-- pud = pud_alloc(&init_mm, pgd, addr);
-+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
-diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
-index bd2bea9..6b3c95e 100644
---- a/lib/is_single_threaded.c
-+++ b/lib/is_single_threaded.c
-@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
- struct task_struct *p, *t;
- bool ret;
-
-+ if (!mm)
-+ return true;
-+
- if (atomic_read(&task->signal->live) != 1)
- return false;
-
-diff --git a/lib/kobject.c b/lib/kobject.c
-index 83bd5b3..757af67 100644
---- a/lib/kobject.c
-+++ b/lib/kobject.c
-@@ -296,8 +296,9 @@ error:
- }
- EXPORT_SYMBOL(kobject_init);
-
--static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
-- const char *fmt, va_list vargs)
-+static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
-+ struct kobject *parent,
-+ const char *fmt, va_list vargs)
- {
- int retval;
-
-@@ -844,7 +845,7 @@ static struct kset *kset_create(const char *name,
- kset = kzalloc(sizeof(*kset), GFP_KERNEL);
- if (!kset)
- return NULL;
-- retval = kobject_set_name(&kset->kobj, name);
-+ retval = kobject_set_name(&kset->kobj, "%s", name);
- if (retval) {
- kfree(kset);
- return NULL;
-@@ -898,9 +899,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
-
-
- static DEFINE_SPINLOCK(kobj_ns_type_lock);
--static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
-+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
-
--int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
-+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
- {
- enum kobj_ns_type type = ops->type;
- int error;
-diff --git a/lib/kref.c b/lib/kref.c
-index 3efb882..8492f4c 100644
---- a/lib/kref.c
-+++ b/lib/kref.c
-@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
- */
- int kref_put(struct kref *kref, void (*release)(struct kref *kref))
- {
-- WARN_ON(release == NULL);
-+ BUG_ON(release == NULL);
- WARN_ON(release == (void (*)(struct kref *))kfree);
-
- if (atomic_dec_and_test(&kref->refcount)) {
-diff --git a/lib/list_debug.c b/lib/list_debug.c
-index b8029a5..2b120e1 100644
---- a/lib/list_debug.c
-+++ b/lib/list_debug.c
-@@ -8,7 +8,9 @@
-
- #include <linux/module.h>
- #include <linux/list.h>
-+#include <linux/mm.h>
-
-+#ifdef CONFIG_DEBUG_LIST
- /*
- * Insert a new entry between two known consecutive entries.
- *
-@@ -16,18 +18,40 @@
- * the prev/next entries already!
- */
-
-+static bool __list_add_debug(struct list_head *new,
-+ struct list_head *prev,
-+ struct list_head *next)
-+{
-+ if (unlikely(next->prev != prev)) {
-+ printk(KERN_ERR "list_add corruption. next->prev should be "
-+ "prev (%p), but was %p. (next=%p).\n",
-+ prev, next->prev, next);
-+ BUG();
-+ return false;
-+ }
-+ if (unlikely(prev->next != next)) {
-+ printk(KERN_ERR "list_add corruption. prev->next should be "
-+ "next (%p), but was %p. (prev=%p).\n",
-+ next, prev->next, prev);
-+ BUG();
-+ return false;
-+ }
-+ if (unlikely(new == prev || new == next)) {
-+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
-+ new, prev, next);
-+ BUG();
-+ return false;
-+ }
-+ return true;
-+}
-+
- void __list_add(struct list_head *new,
-- struct list_head *prev,
-- struct list_head *next)
-+ struct list_head *prev,
-+ struct list_head *next)
- {
-- WARN(next->prev != prev,
-- "list_add corruption. next->prev should be "
-- "prev (%p), but was %p. (next=%p).\n",
-- prev, next->prev, next);
-- WARN(prev->next != next,
-- "list_add corruption. prev->next should be "
-- "next (%p), but was %p. (prev=%p).\n",
-- next, prev->next, prev);
-+ if (!__list_add_debug(new, prev, next))
-+ return;
-+
- next->prev = new;
- new->next = next;
- new->prev = prev;
-@@ -35,28 +59,46 @@ void __list_add(struct list_head *new,
- }
- EXPORT_SYMBOL(__list_add);
-
--void __list_del_entry(struct list_head *entry)
-+static bool __list_del_entry_debug(struct list_head *entry)
- {
- struct list_head *prev, *next;
-
- prev = entry->prev;
- next = entry->next;
-
-- if (WARN(next == LIST_POISON1,
-- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
-- entry, LIST_POISON1) ||
-- WARN(prev == LIST_POISON2,
-- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
-- entry, LIST_POISON2) ||
-- WARN(prev->next != entry,
-- "list_del corruption. prev->next should be %p, "
-- "but was %p\n", entry, prev->next) ||
-- WARN(next->prev != entry,
-- "list_del corruption. next->prev should be %p, "
-- "but was %p\n", entry, next->prev))
-+ if (unlikely(next == LIST_POISON1)) {
-+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
-+ entry, LIST_POISON1);
-+ BUG();
-+ return false;
-+ }
-+ if (unlikely(prev == LIST_POISON2)) {
-+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
-+ entry, LIST_POISON2);
-+ BUG();
-+ return false;
-+ }
-+ if (unlikely(entry->prev->next != entry)) {
-+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
-+ "but was %p\n", entry, prev->next);
-+ BUG();
-+ return false;
-+ }
-+ if (unlikely(entry->next->prev != entry)) {
-+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
-+ "but was %p\n", entry, next->prev);
-+ BUG();
-+ return false;
-+ }
-+ return true;
-+}
-+
-+void __list_del_entry(struct list_head *entry)
-+{
-+ if (!__list_del_entry_debug(entry))
- return;
-
-- __list_del(prev, next);
-+ __list_del(entry->prev, entry->next);
- }
- EXPORT_SYMBOL(__list_del_entry);
-
-@@ -73,3 +115,76 @@ void list_del(struct list_head *entry)
- entry->prev = LIST_POISON2;
- }
- EXPORT_SYMBOL(list_del);
-+#endif
-+
-+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
-+{
-+#ifdef CONFIG_DEBUG_LIST
-+ if (!__list_add_debug(new, prev, next))
-+ return;
-+#endif
-+
-+ pax_open_kernel();
-+ next->prev = new;
-+ new->next = next;
-+ new->prev = prev;
-+ prev->next = new;
-+ pax_close_kernel();
-+}
-+EXPORT_SYMBOL(__pax_list_add);
-+
-+void pax_list_del(struct list_head *entry)
-+{
-+#ifdef CONFIG_DEBUG_LIST
-+ if (!__list_del_entry_debug(entry))
-+ return;
-+#endif
-+
-+ pax_open_kernel();
-+ __list_del(entry->prev, entry->next);
-+ entry->next = LIST_POISON1;
-+ entry->prev = LIST_POISON2;
-+ pax_close_kernel();
-+}
-+EXPORT_SYMBOL(pax_list_del);
-+
-+void pax_list_del_init(struct list_head *entry)
-+{
-+ pax_open_kernel();
-+ __list_del(entry->prev, entry->next);
-+ INIT_LIST_HEAD(entry);
-+ pax_close_kernel();
-+}
-+EXPORT_SYMBOL(pax_list_del_init);
-+
-+void __pax_list_add_rcu(struct list_head *new,
-+ struct list_head *prev, struct list_head *next)
-+{
-+#ifdef CONFIG_DEBUG_LIST
-+ if (!__list_add_debug(new, prev, next))
-+ return;
-+#endif
-+
-+ pax_open_kernel();
-+ new->next = next;
-+ new->prev = prev;
-+ rcu_assign_pointer(list_next_rcu(prev), new);
-+ next->prev = new;
-+ pax_close_kernel();
-+}
-+EXPORT_SYMBOL(__pax_list_add_rcu);
-+
-+void pax_list_del_rcu(struct list_head *entry)
-+{
-+#ifdef CONFIG_DEBUG_LIST
-+ if (!__list_del_entry_debug(entry))
-+ return;
-+#endif
-+
-+ pax_open_kernel();
-+ __list_del(entry->prev, entry->next);
-+ entry->next = LIST_POISON1;
-+ entry->prev = LIST_POISON2;
-+ pax_close_kernel();
-+}
-+EXPORT_SYMBOL(pax_list_del_rcu);
-diff --git a/lib/nlattr.c b/lib/nlattr.c
-index be25e35..4d920a2 100644
---- a/lib/nlattr.c
-+++ b/lib/nlattr.c
-@@ -272,7 +272,11 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
- {
- int minlen = min_t(int, count, nla_len(src));
-
-+ BUG_ON(minlen < 0);
-+
- memcpy(dest, nla_data(src), minlen);
-+ if (count > minlen)
-+ memset(dest + minlen, 0, count - minlen);
-
- return minlen;
- }
-diff --git a/lib/radix-tree.c b/lib/radix-tree.c
-index d9df745..a541641b 100644
---- a/lib/radix-tree.c
-+++ b/lib/radix-tree.c
-@@ -80,7 +80,7 @@ struct radix_tree_preload {
- int nr;
- struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
- };
--static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
-+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
-
- static inline void *ptr_to_indirect(void *ptr)
- {
-@@ -1273,8 +1273,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
-
- node = indirect_to_ptr(node);
- max_index = radix_tree_maxindex(node->height);
-- if (cur_index > max_index)
-+ if (cur_index > max_index) {
-+ rcu_read_unlock();
- break;
-+ }
-
- cur_index = __locate(node, item, cur_index, &found_index);
- rcu_read_unlock();
-diff --git a/lib/random32.c b/lib/random32.c
-index 1f44bdc..009bfe8 100644
---- a/lib/random32.c
-+++ b/lib/random32.c
-@@ -2,19 +2,19 @@
- This is a maximally equidistributed combined Tausworthe generator
- based on code from GNU Scientific Library 1.5 (30 Jun 2004)
-
-- x_n = (s1_n ^ s2_n ^ s3_n)
-+ lfsr113 version:
-
-- s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
-- s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
-- s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
-+ x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
-
-- The period of this generator is about 2^88.
-+ s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
-+ s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
-+ s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
-+ s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
-+
-+ The period of this generator is about 2^113 (see erratum paper).
-
- From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
-- Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
--
-- This is available on the net from L'Ecuyer's home page,
--
-+ Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
- ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
-
-@@ -29,61 +29,148 @@
- that paper.)
-
- This affects the seeding procedure by imposing the requirement
-- s1 > 1, s2 > 7, s3 > 15.
-+ s1 > 1, s2 > 7, s3 > 15, s4 > 127.
-
- */
-
- #include <linux/types.h>
- #include <linux/percpu.h>
--#include <linux/module.h>
-+#include <linux/export.h>
- #include <linux/jiffies.h>
- #include <linux/random.h>
-+#include <linux/sched.h>
-
--static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
-+#ifdef CONFIG_RANDOM32_SELFTEST
-+static void __init prandom_state_selftest(void);
-+#endif
-+
-+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
- /**
-- * prandom32 - seeded pseudo-random number generator.
-+ * prandom_u32_state - seeded pseudo-random number generator.
- * @state: pointer to state structure holding seeded state.
- *
- * This is used for pseudo-randomness with no outside seeding.
-- * For more random results, use random32().
-+ * For more random results, use prandom_u32().
- */
--u32 prandom32(struct rnd_state *state)
-+u32 prandom_u32_state(struct rnd_state *state)
- {
- #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
-- state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
-- state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
-- state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
-+ state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
-+ state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
-+ state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
-+ state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
-
-- return (state->s1 ^ state->s2 ^ state->s3);
-+ return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
- }
--EXPORT_SYMBOL(prandom32);
-+EXPORT_SYMBOL(prandom_u32_state);
-
- /**
-- * random32 - pseudo random number generator
-+ * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
--u32 random32(void)
-+u32 prandom_u32(void)
- {
- unsigned long r;
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-- r = prandom32(state);
-+ r = prandom_u32_state(state);
- put_cpu_var(state);
- return r;
- }
--EXPORT_SYMBOL(random32);
-+EXPORT_SYMBOL(prandom_u32);
-+
-+/*
-+ * prandom_bytes_state - get the requested number of pseudo-random bytes
-+ *
-+ * @state: pointer to state structure holding seeded state.
-+ * @buf: where to copy the pseudo-random bytes to
-+ * @bytes: the requested number of bytes
-+ *
-+ * This is used for pseudo-randomness with no outside seeding.
-+ * For more random results, use prandom_bytes().
-+ */
-+void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
-+{
-+ unsigned char *p = buf;
-+ int i;
-+
-+ for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
-+ u32 random = prandom_u32_state(state);
-+ int j;
-+
-+ for (j = 0; j < sizeof(u32); j++) {
-+ p[i + j] = random;
-+ random >>= BITS_PER_BYTE;
-+ }
-+ }
-+ if (i < bytes) {
-+ u32 random = prandom_u32_state(state);
-+
-+ for (; i < bytes; i++) {
-+ p[i] = random;
-+ random >>= BITS_PER_BYTE;
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(prandom_bytes_state);
-+
-+/**
-+ * prandom_bytes - get the requested number of pseudo-random bytes
-+ * @buf: where to copy the pseudo-random bytes to
-+ * @bytes: the requested number of bytes
-+ */
-+void prandom_bytes(void *buf, int bytes)
-+{
-+ struct rnd_state *state = &get_cpu_var(net_rand_state);
-+
-+ prandom_bytes_state(state, buf, bytes);
-+ put_cpu_var(state);
-+}
-+EXPORT_SYMBOL(prandom_bytes);
-+
-+static void prandom_warmup(struct rnd_state *state)
-+{
-+ /* Calling RNG ten times to satify recurrence condition */
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+ prandom_u32_state(state);
-+}
-+
-+static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
-+{
-+ /* Note: This sort of seeding is ONLY used in test cases and
-+ * during boot at the time from core_initcall until late_initcall
-+ * as we don't have a stronger entropy source available yet.
-+ * After late_initcall, we reseed entire state, we have to (!),
-+ * otherwise an attacker just needs to search 32 bit space to
-+ * probe for our internal 128 bit state if he knows a couple
-+ * of prandom32 outputs!
-+ */
-+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-+ state->s1 = __seed(LCG(seed), 2U);
-+ state->s2 = __seed(LCG(state->s1), 8U);
-+ state->s3 = __seed(LCG(state->s2), 16U);
-+ state->s4 = __seed(LCG(state->s3), 128U);
-+}
-
- /**
-- * srandom32 - add entropy to pseudo random number generator
-+ * prandom_seed - add entropy to pseudo random number generator
- * @seed: seed value
- *
-- * Add some additional seeding to the random32() pool.
-+ * Add some additional seeding to the prandom pool.
- */
--void srandom32(u32 entropy)
-+void prandom_seed(u32 entropy)
- {
- int i;
- /*
-@@ -92,59 +179,275 @@ void srandom32(u32 entropy)
- */
- for_each_possible_cpu (i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-- state->s1 = __seed(state->s1 ^ entropy, 2);
-+
-+ state->s1 = __seed(state->s1 ^ entropy, 2U);
-+ prandom_warmup(state);
- }
- }
--EXPORT_SYMBOL(srandom32);
-+EXPORT_SYMBOL(prandom_seed);
-
- /*
- * Generate some initially weak seeding values to allow
-- * to start the random32() engine.
-+ * to start the prandom_u32() engine.
- */
--static int __init random32_init(void)
-+static int __init prandom_init(void)
- {
- int i;
-
-+#ifdef CONFIG_RANDOM32_SELFTEST
-+ prandom_state_selftest();
-+#endif
-+
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
-
--#define LCG(x) ((x) * 69069) /* super-duper LCG */
-- state->s1 = __seed(LCG(i + jiffies), 2);
-- state->s2 = __seed(LCG(state->s1), 8);
-- state->s3 = __seed(LCG(state->s2), 16);
--
-- /* "warm it up" */
-- prandom32(state);
-- prandom32(state);
-- prandom32(state);
-- prandom32(state);
-- prandom32(state);
-- prandom32(state);
-+ prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
-+ prandom_warmup(state);
- }
- return 0;
- }
--core_initcall(random32_init);
-+core_initcall(prandom_init);
-+
-+static void __prandom_timer(unsigned long dontcare);
-+static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
-+
-+static void __prandom_timer(unsigned long dontcare)
-+{
-+ u32 entropy;
-+ unsigned long expires;
-+
-+ get_random_bytes(&entropy, sizeof(entropy));
-+ prandom_seed(entropy);
-+
-+ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
-+ expires = 40 + (prandom_u32() % 40);
-+ seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-+
-+ add_timer(&seed_timer);
-+}
-+
-+static void __init __prandom_start_seed_timer(void)
-+{
-+ set_timer_slack(&seed_timer, HZ);
-+ seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
-+ add_timer(&seed_timer);
-+}
-
- /*
- * Generate better values after random number generator
- * is fully initialized.
- */
--static int __init random32_reseed(void)
-+static void __prandom_reseed(bool late)
- {
- int i;
-+ unsigned long flags;
-+ static bool latch = false;
-+ static DEFINE_SPINLOCK(lock);
-+
-+ /* Asking for random bytes might result in bytes getting
-+ * moved into the nonblocking pool and thus marking it
-+ * as initialized. In this case we would double back into
-+ * this function and attempt to do a late reseed.
-+ * Ignore the pointless attempt to reseed again if we're
-+ * already waiting for bytes when the nonblocking pool
-+ * got initialized.
-+ */
-+
-+ /* only allow initial seeding (late == false) once */
-+ if (!spin_trylock_irqsave(&lock, flags))
-+ return;
-+
-+ if (latch && !late)
-+ goto out;
-+ latch = true;
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
-- u32 seeds[3];
-+ u32 seeds[4];
-
- get_random_bytes(&seeds, sizeof(seeds));
-- state->s1 = __seed(seeds[0], 2);
-- state->s2 = __seed(seeds[1], 8);
-- state->s3 = __seed(seeds[2], 16);
-+ state->s1 = __seed(seeds[0], 2U);
-+ state->s2 = __seed(seeds[1], 8U);
-+ state->s3 = __seed(seeds[2], 16U);
-+ state->s4 = __seed(seeds[3], 128U);
-
-- /* mix it in */
-- prandom32(state);
-+ prandom_warmup(state);
- }
-+out:
-+ spin_unlock_irqrestore(&lock, flags);
-+}
-+
-+void prandom_reseed_late(void)
-+{
-+ __prandom_reseed(true);
-+}
-+
-+static int __init prandom_reseed(void)
-+{
-+ __prandom_reseed(false);
-+ __prandom_start_seed_timer();
- return 0;
- }
--late_initcall(random32_reseed);
-+late_initcall(prandom_reseed);
-+
-+#ifdef CONFIG_RANDOM32_SELFTEST
-+static struct prandom_test1 {
-+ u32 seed;
-+ u32 result;
-+} test1[] = {
-+ { 1U, 3484351685U },
-+ { 2U, 2623130059U },
-+ { 3U, 3125133893U },
-+ { 4U, 984847254U },
-+};
-+
-+static struct prandom_test2 {
-+ u32 seed;
-+ u32 iteration;
-+ u32 result;
-+} test2[] = {
-+ /* Test cases against taus113 from GSL library. */
-+ { 931557656U, 959U, 2975593782U },
-+ { 1339693295U, 876U, 3887776532U },
-+ { 1545556285U, 961U, 1615538833U },
-+ { 601730776U, 723U, 1776162651U },
-+ { 1027516047U, 687U, 511983079U },
-+ { 416526298U, 700U, 916156552U },
-+ { 1395522032U, 652U, 2222063676U },
-+ { 366221443U, 617U, 2992857763U },
-+ { 1539836965U, 714U, 3783265725U },
-+ { 556206671U, 994U, 799626459U },
-+ { 684907218U, 799U, 367789491U },
-+ { 2121230701U, 931U, 2115467001U },
-+ { 1668516451U, 644U, 3620590685U },
-+ { 768046066U, 883U, 2034077390U },
-+ { 1989159136U, 833U, 1195767305U },
-+ { 536585145U, 996U, 3577259204U },
-+ { 1008129373U, 642U, 1478080776U },
-+ { 1740775604U, 939U, 1264980372U },
-+ { 1967883163U, 508U, 10734624U },
-+ { 1923019697U, 730U, 3821419629U },
-+ { 442079932U, 560U, 3440032343U },
-+ { 1961302714U, 845U, 841962572U },
-+ { 2030205964U, 962U, 1325144227U },
-+ { 1160407529U, 507U, 240940858U },
-+ { 635482502U, 779U, 4200489746U },
-+ { 1252788931U, 699U, 867195434U },
-+ { 1961817131U, 719U, 668237657U },
-+ { 1071468216U, 983U, 917876630U },
-+ { 1281848367U, 932U, 1003100039U },
-+ { 582537119U, 780U, 1127273778U },
-+ { 1973672777U, 853U, 1071368872U },
-+ { 1896756996U, 762U, 1127851055U },
-+ { 847917054U, 500U, 1717499075U },
-+ { 1240520510U, 951U, 2849576657U },
-+ { 1685071682U, 567U, 1961810396U },
-+ { 1516232129U, 557U, 3173877U },
-+ { 1208118903U, 612U, 1613145022U },
-+ { 1817269927U, 693U, 4279122573U },
-+ { 1510091701U, 717U, 638191229U },
-+ { 365916850U, 807U, 600424314U },
-+ { 399324359U, 702U, 1803598116U },
-+ { 1318480274U, 779U, 2074237022U },
-+ { 697758115U, 840U, 1483639402U },
-+ { 1696507773U, 840U, 577415447U },
-+ { 2081979121U, 981U, 3041486449U },
-+ { 955646687U, 742U, 3846494357U },
-+ { 1250683506U, 749U, 836419859U },
-+ { 595003102U, 534U, 366794109U },
-+ { 47485338U, 558U, 3521120834U },
-+ { 619433479U, 610U, 3991783875U },
-+ { 704096520U, 518U, 4139493852U },
-+ { 1712224984U, 606U, 2393312003U },
-+ { 1318233152U, 922U, 3880361134U },
-+ { 855572992U, 761U, 1472974787U },
-+ { 64721421U, 703U, 683860550U },
-+ { 678931758U, 840U, 380616043U },
-+ { 692711973U, 778U, 1382361947U },
-+ { 677703619U, 530U, 2826914161U },
-+ { 92393223U, 586U, 1522128471U },
-+ { 1222592920U, 743U, 3466726667U },
-+ { 358288986U, 695U, 1091956998U },
-+ { 1935056945U, 958U, 514864477U },
-+ { 735675993U, 990U, 1294239989U },
-+ { 1560089402U, 897U, 2238551287U },
-+ { 70616361U, 829U, 22483098U },
-+ { 368234700U, 731U, 2913875084U },
-+ { 20221190U, 879U, 1564152970U },
-+ { 539444654U, 682U, 1835141259U },
-+ { 1314987297U, 840U, 1801114136U },
-+ { 2019295544U, 645U, 3286438930U },
-+ { 469023838U, 716U, 1637918202U },
-+ { 1843754496U, 653U, 2562092152U },
-+ { 400672036U, 809U, 4264212785U },
-+ { 404722249U, 965U, 2704116999U },
-+ { 600702209U, 758U, 584979986U },
-+ { 519953954U, 667U, 2574436237U },
-+ { 1658071126U, 694U, 2214569490U },
-+ { 420480037U, 749U, 3430010866U },
-+ { 690103647U, 969U, 3700758083U },
-+ { 1029424799U, 937U, 3787746841U },
-+ { 2012608669U, 506U, 3362628973U },
-+ { 1535432887U, 998U, 42610943U },
-+ { 1330635533U, 857U, 3040806504U },
-+ { 1223800550U, 539U, 3954229517U },
-+ { 1322411537U, 680U, 3223250324U },
-+ { 1877847898U, 945U, 2915147143U },
-+ { 1646356099U, 874U, 965988280U },
-+ { 805687536U, 744U, 4032277920U },
-+ { 1948093210U, 633U, 1346597684U },
-+ { 392609744U, 783U, 1636083295U },
-+ { 690241304U, 770U, 1201031298U },
-+ { 1360302965U, 696U, 1665394461U },
-+ { 1220090946U, 780U, 1316922812U },
-+ { 447092251U, 500U, 3438743375U },
-+ { 1613868791U, 592U, 828546883U },
-+ { 523430951U, 548U, 2552392304U },
-+ { 726692899U, 810U, 1656872867U },
-+ { 1364340021U, 836U, 3710513486U },
-+ { 1986257729U, 931U, 935013962U },
-+ { 407983964U, 921U, 728767059U },
-+};
-+
-+static void __init prandom_state_selftest(void)
-+{
-+ int i, j, errors = 0, runs = 0;
-+ bool error = false;
-+
-+ for (i = 0; i < ARRAY_SIZE(test1); i++) {
-+ struct rnd_state state;
-+
-+ prandom_seed_very_weak(&state, test1[i].seed);
-+ prandom_warmup(&state);
-+
-+ if (test1[i].result != prandom_u32_state(&state))
-+ error = true;
-+ }
-+
-+ if (error)
-+ pr_warn("prandom: seed boundary self test failed\n");
-+ else
-+ pr_info("prandom: seed boundary self test passed\n");
-+
-+ for (i = 0; i < ARRAY_SIZE(test2); i++) {
-+ struct rnd_state state;
-+
-+ prandom_seed_very_weak(&state, test2[i].seed);
-+ prandom_warmup(&state);
-+
-+ for (j = 0; j < test2[i].iteration - 1; j++)
-+ prandom_u32_state(&state);
-+
-+ if (test2[i].result != prandom_u32_state(&state))
-+ errors++;
-+
-+ runs++;
-+ cond_resched();
-+ }
-+
-+ if (errors)
-+ pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
-+ else
-+ pr_info("prandom: %d self tests passed\n", runs);
-+}
-+#endif
-diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index ae02e42..cd72015 100644
---- a/lib/vsprintf.c
-+++ b/lib/vsprintf.c
-@@ -16,6 +16,9 @@
- * - scnprintf and vscnprintf
- */
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+#define __INCLUDED_BY_HIDESYM 1
-+#endif
- #include <stdarg.h>
- #include <linux/module.h>
- #include <linux/types.h>
-@@ -414,7 +417,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
- char sym[KSYM_SYMBOL_LEN];
- if (ext == 'B')
- sprint_backtrace(sym, value);
-- else if (ext != 'f' && ext != 's')
-+ else if (ext != 'f' && ext != 's' && ext != 'a')
- sprint_symbol(sym, value);
- else
- kallsyms_lookup(value, NULL, NULL, NULL, sym);
-@@ -778,7 +781,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
- return string(buf, end, uuid, spec);
- }
-
--int kptr_restrict __read_mostly;
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+int kptr_restrict __read_only = 2;
-+#else
-+int kptr_restrict __read_only;
-+#endif
-
- /*
- * Show a '%p' thing. A kernel extension is that the '%p' is followed
-@@ -792,6 +799,8 @@ int kptr_restrict __read_mostly;
- * - 'S' For symbolic direct pointers with offset
- * - 's' For symbolic direct pointers without offset
- * - 'B' For backtraced symbolic direct pointers with offset
-+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
-+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
- * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
- * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
- * - 'M' For a 6-byte MAC address, it prints the address in the
-@@ -836,12 +845,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
- {
- if (!ptr && *fmt != 'K') {
- /*
-- * Print (null) with the same width as a pointer so it makes
-+ * Print (nil) with the same width as a pointer so it makes
- * tabular output look nice.
- */
- if (spec.field_width == -1)
- spec.field_width = 2 * sizeof(void *);
-- return string(buf, end, "(null)", spec);
-+ return string(buf, end, "(nil)", spec);
- }
-
- switch (*fmt) {
-@@ -851,6 +860,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
- /* Fallthrough */
- case 'S':
- case 's':
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ break;
-+#else
-+ return symbol_string(buf, end, ptr, spec, *fmt);
-+#endif
-+ case 'A':
-+ case 'a':
- case 'B':
- return symbol_string(buf, end, ptr, spec, *fmt);
- case 'R':
-@@ -879,9 +895,17 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
- case 'U':
- return uuid_string(buf, end, ptr, spec, fmt);
- case 'V':
-- return buf + vsnprintf(buf, end > buf ? end - buf : 0,
-- ((struct va_format *)ptr)->fmt,
-- *(((struct va_format *)ptr)->va));
-+ {
-+ va_list va;
-+
-+ va_copy(va, *((struct va_format *)ptr)->va);
-+ buf += vsnprintf(buf, end > buf ? end - buf : 0,
-+ ((struct va_format *)ptr)->fmt, va);
-+ va_end(va);
-+ return buf;
-+ }
-+ case 'P':
-+ break;
- case 'K':
- /*
- * %pK cannot be used in IRQ context because its test
-@@ -924,6 +948,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
- }
- break;
- }
-+
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ /* 'P' = approved pointers to copy to userland,
-+ as in the /proc/kallsyms case, as we make it display nothing
-+ for non-root users, and the real contents for root users
-+ Also ignore 'K' pointers, since we force their NULLing for non-root users
-+ above
-+ */
-+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
-+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
-+ dump_stack();
-+ ptr = NULL;
-+ }
-+#endif
-+
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(void *);
-@@ -1635,11 +1674,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
- typeof(type) value; \
- if (sizeof(type) == 8) { \
- args = PTR_ALIGN(args, sizeof(u32)); \
-- *(u32 *)&value = *(u32 *)args; \
-- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
-+ *(u32 *)&value = *(const u32 *)args; \
-+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
- } else { \
- args = PTR_ALIGN(args, sizeof(type)); \
-- value = *(typeof(type) *)args; \
-+ value = *(const typeof(type) *)args; \
- } \
- args += sizeof(type); \
- value; \
-@@ -1702,7 +1741,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
- case FORMAT_TYPE_STR: {
- const char *str_arg = args;
- args += strlen(str_arg) + 1;
-- str = string(str, end, (char *)str_arg, spec);
-+ str = string(str, end, str_arg, spec);
- break;
- }
-
-diff --git a/localversion-grsec b/localversion-grsec
-new file mode 100644
-index 0000000..7cd6065
---- /dev/null
-+++ b/localversion-grsec
-@@ -0,0 +1 @@
-+-grsec
-diff --git a/mm/Kconfig b/mm/Kconfig
-index 011b110..05d1b6f 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -241,10 +241,11 @@ config KSM
- root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
-
- config DEFAULT_MMAP_MIN_ADDR
-- int "Low address space to protect from user allocation"
-+ int "Low address space to protect from user allocation"
- depends on MMU
-- default 4096
-- help
-+ default 32768 if ALPHA || ARM || PARISC || SPARC32
-+ default 65536
-+ help
- This is the portion of low virtual memory which should be protected
- from userspace allocation. Keeping a user from writing to low pages
- can help reduce the impact of kernel NULL pointer bugs.
-@@ -274,7 +275,7 @@ config MEMORY_FAILURE
-
- config HWPOISON_INJECT
- tristate "HWPoison pages injector"
-- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
-+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
- select PROC_PAGE_MONITOR
-
- config NOMMU_INITIAL_TRIM_EXCESS
-diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
-index 8b1a477..f3a339f 100644
---- a/mm/Kconfig.debug
-+++ b/mm/Kconfig.debug
-@@ -1,6 +1,7 @@
- config DEBUG_PAGEALLOC
- bool "Debug page memory allocations"
- depends on DEBUG_KERNEL
-+ depends on !PAX_MEMORY_SANITIZE
- depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
- depends on !KMEMCHECK
- select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
-diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index 2b49dd2..0527d62 100644
---- a/mm/backing-dev.c
-+++ b/mm/backing-dev.c
-@@ -12,7 +12,7 @@
- #include <linux/device.h>
- #include <trace/events/writeback.h>
-
--static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
-+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
-
- struct backing_dev_info default_backing_dev_info = {
- .name = "default",
-@@ -759,7 +759,6 @@ EXPORT_SYMBOL(bdi_destroy);
- int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
- unsigned int cap)
- {
-- char tmp[32];
- int err;
-
- bdi->name = name;
-@@ -768,8 +767,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
- if (err)
- return err;
-
-- sprintf(tmp, "%.28s%s", name, "-%d");
-- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
-+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
- if (err) {
- bdi_destroy(bdi);
- return err;
-diff --git a/mm/filemap.c b/mm/filemap.c
-index 556858c..71a567d 100644
---- a/mm/filemap.c
-+++ b/mm/filemap.c
-@@ -1773,7 +1773,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
- struct address_space *mapping = file->f_mapping;
-
- if (!mapping->a_ops->readpage)
-- return -ENOEXEC;
-+ return -ENODEV;
- file_accessed(file);
- vma->vm_ops = &generic_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
-@@ -2021,7 +2021,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-
- while (bytes) {
- char __user *buf = iov->iov_base + base;
-- int copy = min(bytes, iov->iov_len - base);
-+ size_t copy = min(bytes, iov->iov_len - base);
-
- base = 0;
- left = __copy_from_user_inatomic(vaddr, buf, copy);
-@@ -2050,7 +2050,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
- BUG_ON(!in_atomic());
- kaddr = kmap_atomic(page, KM_USER0);
- if (likely(i->nr_segs == 1)) {
-- int left;
-+ size_t left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
- copied = bytes - left;
-@@ -2078,7 +2078,7 @@ size_t iov_iter_copy_from_user(struct page *page,
-
- kaddr = kmap(page);
- if (likely(i->nr_segs == 1)) {
-- int left;
-+ size_t left;
- char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user(kaddr + offset, buf, bytes);
- copied = bytes - left;
-@@ -2108,7 +2108,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
- * zero-length segments (without overruning the iovec).
- */
- while (bytes || unlikely(i->count && !iov->iov_len)) {
-- int copy;
-+ size_t copy;
-
- copy = min(bytes, iov->iov_len - base);
- BUG_ON(!i->count || i->count < copy);
-@@ -2179,6 +2179,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
- *pos = i_size_read(inode);
-
- if (limit != RLIM_INFINITY) {
-+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
- if (*pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
-diff --git a/mm/fremap.c b/mm/fremap.c
-index 9ed4fd4..c42648d 100644
---- a/mm/fremap.c
-+++ b/mm/fremap.c
-@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
- retry:
- vma = find_vma(mm, start);
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
-+ goto out;
-+#endif
-+
- /*
- * Make sure the vma is shared, that it supports prefaulting,
- * and that the remapped range is valid and fully within
-diff --git a/mm/highmem.c b/mm/highmem.c
-index 09fc744..3936897 100644
---- a/mm/highmem.c
-+++ b/mm/highmem.c
-@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
- * So no dangers, even with speculative execution.
- */
- page = pte_page(pkmap_page_table[i]);
-+ pax_open_kernel();
- pte_clear(&init_mm, (unsigned long)page_address(page),
- &pkmap_page_table[i]);
--
-+ pax_close_kernel();
- set_page_address(page, NULL);
- need_flush = 1;
- }
-@@ -199,9 +200,11 @@ start:
- }
- }
- vaddr = PKMAP_ADDR(last_pkmap_nr);
-+
-+ pax_open_kernel();
- set_pte_at(&init_mm, vaddr,
- &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
--
-+ pax_close_kernel();
- pkmap_count[last_pkmap_nr] = 1;
- set_page_address(page, (void *)vaddr);
-
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index 79166c2..7ce048f 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -704,7 +704,7 @@ out:
- * run pte_offset_map on the pmd, if an huge pmd could
- * materialize from under us from a different thread.
- */
-- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
-+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
- return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
-@@ -1347,6 +1347,11 @@ static int __split_huge_page_map(struct page *page,
- i++, haddr += PAGE_SIZE) {
- pte_t *pte, entry;
- BUG_ON(PageCompound(page+i));
-+ /*
-+ * Note that pmd_numa is not transferred deliberately
-+ * to avoid any possibility that pte_numa leaks to
-+ * a PROT_NONE VMA by accident.
-+ */
- entry = mk_pte(page + i, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- if (!pmd_write(*pmd))
-diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 26922da..73f8544 100644
---- a/mm/hugetlb.c
-+++ b/mm/hugetlb.c
-@@ -2009,15 +2009,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
- struct hstate *h = &default_hstate;
- unsigned long tmp;
- int ret;
-+ ctl_table_no_const hugetlb_table;
-
- tmp = h->max_huge_pages;
-
- if (write && h->order >= MAX_ORDER)
- return -EINVAL;
-
-- table->data = &tmp;
-- table->maxlen = sizeof(unsigned long);
-- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
-+ hugetlb_table = *table;
-+ hugetlb_table.data = &tmp;
-+ hugetlb_table.maxlen = sizeof(unsigned long);
-+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
- if (ret)
- goto out;
-
-@@ -2074,15 +2076,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
- struct hstate *h = &default_hstate;
- unsigned long tmp;
- int ret;
-+ ctl_table_no_const hugetlb_table;
-
- tmp = h->nr_overcommit_huge_pages;
-
- if (write && h->order >= MAX_ORDER)
- return -EINVAL;
-
-- table->data = &tmp;
-- table->maxlen = sizeof(unsigned long);
-- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
-+ hugetlb_table = *table;
-+ hugetlb_table.data = &tmp;
-+ hugetlb_table.maxlen = sizeof(unsigned long);
-+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
- if (ret)
- goto out;
-
-@@ -2518,6 +2522,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
- return 1;
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ struct vm_area_struct *vma_m;
-+ unsigned long address_m;
-+ pte_t *ptep_m;
-+
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (!vma_m)
-+ return;
-+
-+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
-+ get_page(page_m);
-+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
-+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
-+}
-+#endif
-+
- /*
- * Hugetlb_cow() should be called with page lock of the original hugepage held.
- */
-@@ -2620,6 +2645,11 @@ retry_avoidcopy:
- make_huge_pte(vma, new_page, 1));
- page_remove_rmap(old_page);
- hugepage_add_new_anon_rmap(new_page, vma, address);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_huge_pte(vma, address, new_page);
-+#endif
-+
- /* Make the old page be freed below */
- new_page = old_page;
- mmu_notifier_invalidate_range_end(mm,
-@@ -2771,6 +2801,10 @@ retry:
- && (vma->vm_flags & VM_SHARED)));
- set_huge_pte_at(mm, address, ptep, new_pte);
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_huge_pte(vma, address, page);
-+#endif
-+
- if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
- /* Optimization, do the COW without a second fault */
- ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2801,6 +2835,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- struct hstate *h = hstate_vma(vma);
- int need_wait_lock = 0;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- ptep = huge_pte_offset(mm, address);
- if (ptep) {
- entry = huge_ptep_get(ptep);
-@@ -2812,6 +2850,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- VM_FAULT_SET_HINDEX(h - hstates);
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m) {
-+ unsigned long address_m;
-+
-+ if (vma->vm_start > vma_m->vm_start) {
-+ address_m = address;
-+ address -= SEGMEXEC_TASK_SIZE;
-+ vma = vma_m;
-+ h = hstate_vma(vma);
-+ } else
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+
-+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
-+ return VM_FAULT_OOM;
-+ address_m &= HPAGE_MASK;
-+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
-+ }
-+#endif
-+
- ptep = huge_pte_alloc(mm, address, huge_page_size(h));
- if (!ptep)
- return VM_FAULT_OOM;
-diff --git a/mm/internal.h b/mm/internal.h
-index 0c26b5e..1cc340f 100644
---- a/mm/internal.h
-+++ b/mm/internal.h
-@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
- * in mm/page_alloc.c
- */
- extern void __free_pages_bootmem(struct page *page, unsigned int order);
-+extern void free_compound_page(struct page *page);
- extern void prep_compound_page(struct page *page, unsigned long order);
- #ifdef CONFIG_MEMORY_FAILURE
- extern bool is_free_buddy_page(struct page *page);
-diff --git a/mm/kmemleak.c b/mm/kmemleak.c
-index cc8cf1d..677c52d 100644
---- a/mm/kmemleak.c
-+++ b/mm/kmemleak.c
-@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
-
- for (i = 0; i < object->trace_len; i++) {
- void *ptr = (void *)object->trace[i];
-- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
-+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
- }
- }
-
-@@ -1747,7 +1747,7 @@ static int __init kmemleak_late_init(void)
- return -ENOMEM;
- }
-
-- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
-+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
- &kmemleak_fops);
- if (!dentry)
- pr_warning("Failed to create the debugfs kmemleak file\n");
-diff --git a/mm/maccess.c b/mm/maccess.c
-index d53adf9..03a24bf 100644
---- a/mm/maccess.c
-+++ b/mm/maccess.c
-@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
- set_fs(KERNEL_DS);
- pagefault_disable();
- ret = __copy_from_user_inatomic(dst,
-- (__force const void __user *)src, size);
-+ (const void __force_user *)src, size);
- pagefault_enable();
- set_fs(old_fs);
-
-@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
-
- set_fs(KERNEL_DS);
- pagefault_disable();
-- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
-+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
- pagefault_enable();
- set_fs(old_fs);
-
-diff --git a/mm/madvise.c b/mm/madvise.c
-index 23d3a6b..76f0f839 100644
---- a/mm/madvise.c
-+++ b/mm/madvise.c
-@@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
- pgoff_t pgoff;
- unsigned long new_flags = vma->vm_flags;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- switch (behavior) {
- case MADV_NORMAL:
- new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
-@@ -111,6 +115,13 @@ success:
- /*
- * vm_flags is protected by the mmap_sem held in write mode.
- */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m)
-+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
-+#endif
-+
- vma->vm_flags = new_flags;
-
- out:
-@@ -169,6 +180,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
- struct vm_area_struct ** prev,
- unsigned long start, unsigned long end)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- *prev = vma;
- if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
- return -EINVAL;
-@@ -181,6 +197,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
- zap_page_range(vma, start, end - start, &details);
- } else
- zap_page_range(vma, start, end - start, NULL);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m) {
-+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
-+ struct zap_details details = {
-+ .nonlinear_vma = vma_m,
-+ .last_index = ULONG_MAX,
-+ };
-+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, &details);
-+ } else
-+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
-+ }
-+#endif
-+
- return 0;
- }
-
-@@ -386,6 +417,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
- if (end < start)
- goto out;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ if (end > SEGMEXEC_TASK_SIZE)
-+ goto out;
-+ } else
-+#endif
-+
-+ if (end > TASK_SIZE)
-+ goto out;
-+
- error = 0;
- if (end == start)
- goto out;
-diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index 51901b1..79af2f4 100644
---- a/mm/memory-failure.c
-+++ b/mm/memory-failure.c
-@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
-
- int sysctl_memory_failure_recovery __read_mostly = 1;
-
--atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
-+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
-
- #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
-
-@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_code = BUS_MCEERR_AO;
-- si.si_addr = (void *)addr;
-+ si.si_addr = (void __user *)addr;
- #ifdef __ARCH_SI_TRAPNO
- si.si_trapno = trapno;
- #endif
-@@ -750,7 +750,7 @@ static struct page_state {
- unsigned long res;
- char *msg;
- int (*action)(struct page *p, unsigned long pfn);
--} error_states[] = {
-+} __do_const error_states[] = {
- { reserved, reserved, "reserved kernel", me_kernel },
- /*
- * free pages are specially detected outside this table:
-@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
- }
-
- nr_pages = 1 << compound_trans_order(hpage);
-- atomic_long_add(nr_pages, &mce_bad_pages);
-+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
-
- /*
- * We need/can do nothing about count=0 pages.
-@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
- if (PageHWPoison(hpage)) {
- if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != hpage && TestSetPageHWPoison(hpage))) {
-- atomic_long_sub(nr_pages, &mce_bad_pages);
-+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
- unlock_page(hpage);
- return 0;
- }
-@@ -1094,14 +1094,14 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
- */
- if (!PageHWPoison(p)) {
- printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
-- atomic_long_sub(nr_pages, &mce_bad_pages);
-+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
- put_page(hpage);
- res = 0;
- goto out;
- }
- if (hwpoison_filter(p)) {
- if (TestClearPageHWPoison(p))
-- atomic_long_sub(nr_pages, &mce_bad_pages);
-+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
- unlock_page(hpage);
- put_page(hpage);
- return 0;
-@@ -1318,7 +1318,7 @@ int unpoison_memory(unsigned long pfn)
- return 0;
- }
- if (TestClearPageHWPoison(p))
-- atomic_long_sub(nr_pages, &mce_bad_pages);
-+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
- pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
- return 0;
- }
-@@ -1332,7 +1332,7 @@ int unpoison_memory(unsigned long pfn)
- */
- if (TestClearPageHWPoison(page)) {
- pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
-- atomic_long_sub(nr_pages, &mce_bad_pages);
-+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
- freeit = 1;
- if (PageHuge(page))
- clear_page_hwpoison_huge_page(page);
-@@ -1447,13 +1447,13 @@ done:
- /* overcommit hugetlb page will be freed to buddy */
- if (PageHuge(hpage)) {
- if (!PageHWPoison(hpage))
-- atomic_long_add(1 << compound_trans_order(hpage),
-+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
- &mce_bad_pages);
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- } else {
- SetPageHWPoison(page);
-- atomic_long_inc(&mce_bad_pages);
-+ atomic_long_inc_unchecked(&mce_bad_pages);
- }
-
- /* keep elevated page count for bad page */
-@@ -1592,7 +1592,7 @@ int soft_offline_page(struct page *page, int flags)
- return ret;
-
- done:
-- atomic_long_add(1, &mce_bad_pages);
-+ atomic_long_add_unchecked(1, &mce_bad_pages);
- SetPageHWPoison(page);
- /* keep elevated page count for bad page */
- return ret;
-diff --git a/mm/memory.c b/mm/memory.c
-index 452b8ba..d322be8 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -462,8 +462,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
- return;
-
- pmd = pmd_offset(pud, start);
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
- pud_clear(pud);
- pmd_free_tlb(tlb, pmd, start);
-+#endif
-+
- }
-
- static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -494,9 +498,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
- if (end - 1 > ceiling - 1)
- return;
-
-+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
- pud = pud_offset(pgd, start);
- pgd_clear(pgd);
- pud_free_tlb(tlb, pud, start);
-+#endif
-+
- }
-
- /*
-@@ -1584,12 +1591,6 @@ no_page_table:
- return page;
- }
-
--static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
--{
-- return stack_guard_page_start(vma, addr) ||
-- stack_guard_page_end(vma, addr+PAGE_SIZE);
--}
--
- /**
- * __get_user_pages() - pin user pages in memory
- * @tsk: task_struct of target task
-@@ -1662,10 +1663,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- i = 0;
-
-- do {
-+ while (nr_pages) {
- struct vm_area_struct *vma;
-
-- vma = find_extend_vma(mm, start);
-+ vma = find_vma(mm, start);
- if (!vma && in_gate_area(mm, start)) {
- unsigned long pg = start & PAGE_MASK;
- pgd_t *pgd;
-@@ -1713,7 +1714,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- goto next_page;
- }
-
-- if (!vma ||
-+ if (!vma || start < vma->vm_start ||
- (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- return i ? : -EFAULT;
-@@ -1740,11 +1741,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- int ret;
- unsigned int fault_flags = 0;
-
-- /* For mlock, just skip the stack guard page. */
-- if (foll_flags & FOLL_MLOCK) {
-- if (stack_guard_page(vma, start))
-- goto next_page;
-- }
- if (foll_flags & FOLL_WRITE)
- fault_flags |= FAULT_FLAG_WRITE;
- if (nonblocking)
-@@ -1818,7 +1814,7 @@ next_page:
- start += PAGE_SIZE;
- nr_pages--;
- } while (nr_pages && start < vma->vm_end);
-- } while (nr_pages);
-+ }
- return i;
- }
- EXPORT_SYMBOL(__get_user_pages);
-@@ -2030,6 +2026,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
- page_add_file_rmap(page);
- set_pte_at(mm, addr, pte, mk_pte(page, prot));
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_file_pte(vma, addr, page, ptl);
-+#endif
-+
- retval = 0;
- pte_unmap_unlock(pte, ptl);
- return retval;
-@@ -2064,10 +2064,22 @@ out:
- int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
- struct page *page)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return -EFAULT;
- if (!page_count(page))
- return -EINVAL;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m)
-+ vma_m->vm_flags |= VM_INSERTPAGE;
-+#endif
-+
- vma->vm_flags |= VM_INSERTPAGE;
- return insert_page(vma, addr, page, vma->vm_page_prot);
- }
-@@ -2153,6 +2165,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn)
- {
- BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
-+ BUG_ON(vma->vm_mirror);
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- return -EFAULT;
-@@ -2407,7 +2420,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
-
- BUG_ON(pud_huge(*pud));
-
-- pmd = pmd_alloc(mm, pud, addr);
-+ pmd = (mm == &init_mm) ?
-+ pmd_alloc_kernel(mm, pud, addr) :
-+ pmd_alloc(mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
-@@ -2427,7 +2442,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long next;
- int err;
-
-- pud = pud_alloc(mm, pgd, addr);
-+ pud = (mm == &init_mm) ?
-+ pud_alloc_kernel(mm, pgd, addr) :
-+ pud_alloc(mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
-@@ -2515,6 +2532,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
- copy_user_highpage(dst, src, va, vma);
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ spinlock_t *ptl;
-+ pte_t *pte, entry;
-+
-+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-+ entry = *pte;
-+ if (!pte_present(entry)) {
-+ if (!pte_none(entry)) {
-+ BUG_ON(pte_file(entry));
-+ free_swap_and_cache(pte_to_swp_entry(entry));
-+ pte_clear_not_present_full(mm, address, pte, 0);
-+ }
-+ } else {
-+ struct page *page;
-+
-+ flush_cache_page(vma, address, pte_pfn(entry));
-+ entry = ptep_clear_flush(vma, address, pte);
-+ BUG_ON(pte_dirty(entry));
-+ page = vm_normal_page(vma, address, entry);
-+ if (page) {
-+ update_hiwater_rss(mm);
-+ if (PageAnon(page))
-+ dec_mm_counter_fast(mm, MM_ANONPAGES);
-+ else
-+ dec_mm_counter_fast(mm, MM_FILEPAGES);
-+ page_remove_rmap(page);
-+ page_cache_release(page);
-+ }
-+ }
-+ pte_unmap_unlock(pte, ptl);
-+}
-+
-+/* PaX: if vma is mirrored, synchronize the mirror's PTE
-+ *
-+ * the ptl of the lower mapped page is held on entry and is not released on exit
-+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
-+ */
-+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ unsigned long address_m;
-+ spinlock_t *ptl_m;
-+ struct vm_area_struct *vma_m;
-+ pmd_t *pmd_m;
-+ pte_t *pte_m, entry_m;
-+
-+ BUG_ON(!page_m || !PageAnon(page_m));
-+
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (!vma_m)
-+ return;
-+
-+ BUG_ON(!PageLocked(page_m));
-+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+ pte_m = pte_offset_map(pmd_m, address_m);
-+ ptl_m = pte_lockptr(mm, pmd_m);
-+ if (ptl != ptl_m) {
-+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+ if (!pte_none(*pte_m))
-+ goto out;
-+ }
-+
-+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
-+ page_cache_get(page_m);
-+ page_add_anon_rmap(page_m, vma_m, address_m);
-+ inc_mm_counter_fast(mm, MM_ANONPAGES);
-+ set_pte_at(mm, address_m, pte_m, entry_m);
-+ update_mmu_cache(vma_m, address_m, entry_m);
-+out:
-+ if (ptl != ptl_m)
-+ spin_unlock(ptl_m);
-+ pte_unmap(pte_m);
-+ unlock_page(page_m);
-+}
-+
-+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ unsigned long address_m;
-+ spinlock_t *ptl_m;
-+ struct vm_area_struct *vma_m;
-+ pmd_t *pmd_m;
-+ pte_t *pte_m, entry_m;
-+
-+ BUG_ON(!page_m || PageAnon(page_m));
-+
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (!vma_m)
-+ return;
-+
-+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+ pte_m = pte_offset_map(pmd_m, address_m);
-+ ptl_m = pte_lockptr(mm, pmd_m);
-+ if (ptl != ptl_m) {
-+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+ if (!pte_none(*pte_m))
-+ goto out;
-+ }
-+
-+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
-+ page_cache_get(page_m);
-+ page_add_file_rmap(page_m);
-+ inc_mm_counter_fast(mm, MM_FILEPAGES);
-+ set_pte_at(mm, address_m, pte_m, entry_m);
-+ update_mmu_cache(vma_m, address_m, entry_m);
-+out:
-+ if (ptl != ptl_m)
-+ spin_unlock(ptl_m);
-+ pte_unmap(pte_m);
-+}
-+
-+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ unsigned long address_m;
-+ spinlock_t *ptl_m;
-+ struct vm_area_struct *vma_m;
-+ pmd_t *pmd_m;
-+ pte_t *pte_m, entry_m;
-+
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (!vma_m)
-+ return;
-+
-+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+ pte_m = pte_offset_map(pmd_m, address_m);
-+ ptl_m = pte_lockptr(mm, pmd_m);
-+ if (ptl != ptl_m) {
-+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+ if (!pte_none(*pte_m))
-+ goto out;
-+ }
-+
-+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
-+ set_pte_at(mm, address_m, pte_m, entry_m);
-+out:
-+ if (ptl != ptl_m)
-+ spin_unlock(ptl_m);
-+ pte_unmap(pte_m);
-+}
-+
-+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
-+{
-+ struct page *page_m;
-+ pte_t entry;
-+
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
-+ goto out;
-+
-+ entry = *pte;
-+ page_m = vm_normal_page(vma, address, entry);
-+ if (!page_m)
-+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
-+ else if (PageAnon(page_m)) {
-+ if (pax_find_mirror_vma(vma)) {
-+ pte_unmap_unlock(pte, ptl);
-+ lock_page(page_m);
-+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
-+ if (pte_same(entry, *pte))
-+ pax_mirror_anon_pte(vma, address, page_m, ptl);
-+ else
-+ unlock_page(page_m);
-+ }
-+ } else
-+ pax_mirror_file_pte(vma, address, page_m, ptl);
-+
-+out:
-+ pte_unmap_unlock(pte, ptl);
-+}
-+#endif
-+
- /*
- * This routine handles present pages, when users try to write
- * to a shared page. It is done by copying the page to a new address
-@@ -2733,6 +2930,12 @@ gotten:
- */
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (likely(pte_same(*page_table, orig_pte))) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (pax_find_mirror_vma(vma))
-+ BUG_ON(!trylock_page(new_page));
-+#endif
-+
- if (old_page) {
- if (!PageAnon(old_page)) {
- dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2784,6 +2987,10 @@ gotten:
- page_remove_rmap(old_page);
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_anon_pte(vma, address, new_page, ptl);
-+#endif
-+
- /* Free the old page.. */
- new_page = old_page;
- ret |= VM_FAULT_WRITE;
-@@ -3063,6 +3270,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
- swap_free(entry);
- if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
- try_to_free_swap(page);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
-+#endif
-+
- unlock_page(page);
- if (swapcache) {
- /*
-@@ -3086,6 +3298,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, page_table);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_anon_pte(vma, address, page, ptl);
-+#endif
-+
- unlock:
- pte_unmap_unlock(page_table, ptl);
- out:
-@@ -3105,40 +3322,6 @@ out_release:
- }
-
- /*
-- * This is like a special single-page "expand_{down|up}wards()",
-- * except we must first make sure that 'address{-|+}PAGE_SIZE'
-- * doesn't hit another vma.
-- */
--static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
--{
-- address &= PAGE_MASK;
-- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-- struct vm_area_struct *prev = vma->vm_prev;
--
-- /*
-- * Is there a mapping abutting this one below?
-- *
-- * That's only ok if it's the same stack mapping
-- * that has gotten split..
-- */
-- if (prev && prev->vm_end == address)
-- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
--
-- return expand_downwards(vma, address - PAGE_SIZE);
-- }
-- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-- struct vm_area_struct *next = vma->vm_next;
--
-- /* As VM_GROWSDOWN but s/below/above/ */
-- if (next && next->vm_start == address + PAGE_SIZE)
-- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
--
-- return expand_upwards(vma, address + PAGE_SIZE);
-- }
-- return 0;
--}
--
--/*
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3147,27 +3330,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *page_table, pmd_t *pmd,
- unsigned int flags)
- {
-- struct page *page;
-+ struct page *page = NULL;
- spinlock_t *ptl;
- pte_t entry;
-
-- pte_unmap(page_table);
--
-- /* Check if we need to add a guard page to the stack */
-- if (check_stack_guard_page(vma, address) < 0)
-- return VM_FAULT_SIGSEGV;
--
-- /* Use the zero-page for reads */
- if (!(flags & FAULT_FLAG_WRITE)) {
- entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
- vma->vm_page_prot));
-- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-+ ptl = pte_lockptr(mm, pmd);
-+ spin_lock(ptl);
- if (!pte_none(*page_table))
- goto unlock;
- goto setpte;
- }
-
- /* Allocate our own private page. */
-+ pte_unmap(page_table);
-+
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3186,6 +3365,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
- if (!pte_none(*page_table))
- goto release;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (pax_find_mirror_vma(vma))
-+ BUG_ON(!trylock_page(page));
-+#endif
-+
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, address);
- setpte:
-@@ -3193,6 +3377,12 @@ setpte:
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, page_table);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (page)
-+ pax_mirror_anon_pte(vma, address, page, ptl);
-+#endif
-+
- unlock:
- pte_unmap_unlock(page_table, ptl);
- return 0;
-@@ -3336,6 +3526,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- */
- /* Only go through if we didn't race with anybody else... */
- if (likely(pte_same(*page_table, orig_pte))) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (anon && pax_find_mirror_vma(vma))
-+ BUG_ON(!trylock_page(page));
-+#endif
-+
- flush_icache_page(vma, page);
- entry = mk_pte(page, vma->vm_page_prot);
- if (flags & FAULT_FLAG_WRITE)
-@@ -3355,6 +3551,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-
- /* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, address, page_table);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (anon)
-+ pax_mirror_anon_pte(vma, address, page, ptl);
-+ else
-+ pax_mirror_file_pte(vma, address, page, ptl);
-+#endif
-+
- } else {
- if (cow_page)
- mem_cgroup_uncharge_page(cow_page);
-@@ -3508,6 +3712,12 @@ int handle_pte_fault(struct mm_struct *mm,
- if (flags & FAULT_FLAG_WRITE)
- flush_tlb_fix_spurious_fault(vma, address);
- }
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ pax_mirror_pte(vma, address, pte, pmd, ptl);
-+ return 0;
-+#endif
-+
- unlock:
- pte_unmap_unlock(pte, ptl);
- return 0;
-@@ -3524,6 +3734,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- pmd_t *pmd;
- pte_t *pte;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- __set_current_state(TASK_RUNNING);
-
- count_vm_event(PGFAULT);
-@@ -3535,6 +3749,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- if (unlikely(is_vm_hugetlb_page(vma)))
- return hugetlb_fault(mm, vma, address, flags);
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m) {
-+ unsigned long address_m;
-+ pgd_t *pgd_m;
-+ pud_t *pud_m;
-+ pmd_t *pmd_m;
-+
-+ if (vma->vm_start > vma_m->vm_start) {
-+ address_m = address;
-+ address -= SEGMEXEC_TASK_SIZE;
-+ vma = vma_m;
-+ } else
-+ address_m = address + SEGMEXEC_TASK_SIZE;
-+
-+ pgd_m = pgd_offset(mm, address_m);
-+ pud_m = pud_alloc(mm, pgd_m, address_m);
-+ if (!pud_m)
-+ return VM_FAULT_OOM;
-+ pmd_m = pmd_alloc(mm, pud_m, address_m);
-+ if (!pmd_m)
-+ return VM_FAULT_OOM;
-+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
-+ return VM_FAULT_OOM;
-+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
-+ }
-+#endif
-+
- retry:
- pgd = pgd_offset(mm, address);
- pud = pud_alloc(mm, pgd, address);
-@@ -3576,7 +3818,7 @@ retry:
- * run pte_offset_map on the pmd, if an huge pmd could
- * materialize from under us from a different thread.
- */
-- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
-+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
- return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
-@@ -3613,6 +3855,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
- spin_unlock(&mm->page_table_lock);
- return 0;
- }
-+
-+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-+{
-+ pud_t *new = pud_alloc_one(mm, address);
-+ if (!new)
-+ return -ENOMEM;
-+
-+ smp_wmb(); /* See comment in __pte_alloc */
-+
-+ spin_lock(&mm->page_table_lock);
-+ if (pgd_present(*pgd)) /* Another has populated it */
-+ pud_free(mm, new);
-+ else
-+ pgd_populate_kernel(mm, pgd, new);
-+ spin_unlock(&mm->page_table_lock);
-+ return 0;
-+}
- #endif /* __PAGETABLE_PUD_FOLDED */
-
- #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3643,11 +3902,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
- spin_unlock(&mm->page_table_lock);
- return 0;
- }
-+
-+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
-+{
-+ pmd_t *new = pmd_alloc_one(mm, address);
-+ if (!new)
-+ return -ENOMEM;
-+
-+ smp_wmb(); /* See comment in __pte_alloc */
-+
-+ spin_lock(&mm->page_table_lock);
-+#ifndef __ARCH_HAS_4LEVEL_HACK
-+ if (pud_present(*pud)) /* Another has populated it */
-+ pmd_free(mm, new);
-+ else
-+ pud_populate_kernel(mm, pud, new);
-+#else
-+ if (pgd_present(*pud)) /* Another has populated it */
-+ pmd_free(mm, new);
-+ else
-+ pgd_populate_kernel(mm, pud, new);
-+#endif /* __ARCH_HAS_4LEVEL_HACK */
-+ spin_unlock(&mm->page_table_lock);
-+ return 0;
-+}
- #endif /* __PAGETABLE_PMD_FOLDED */
-
--int make_pages_present(unsigned long addr, unsigned long end)
-+ssize_t make_pages_present(unsigned long addr, unsigned long end)
- {
-- int ret, len, write;
-+ ssize_t ret, len, write;
- struct vm_area_struct * vma;
-
- vma = find_vma(current->mm, addr);
-@@ -3680,7 +3963,7 @@ static int __init gate_vma_init(void)
- gate_vma.vm_start = FIXADDR_USER_START;
- gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-- gate_vma.vm_page_prot = __P101;
-+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
- /*
- * Make sure the vDSO gets into every core dump.
- * Dumping its contents makes post-mortem fully interpretable later
-@@ -3820,8 +4103,8 @@ out:
- return ret;
- }
-
--int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-- void *buf, int len, int write)
-+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-+ void *buf, size_t len, int write)
- {
- resource_size_t phys_addr;
- unsigned long prot = 0;
-@@ -3846,8 +4129,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
- * Access another process' address space as given in mm. If non-NULL, use the
- * given task for page fault accounting.
- */
--static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-- unsigned long addr, void *buf, int len, int write)
-+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-+ unsigned long addr, void *buf, size_t len, int write)
- {
- struct vm_area_struct *vma;
- void *old_buf = buf;
-@@ -3855,7 +4138,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- down_read(&mm->mmap_sem);
- /* ignore errors, just check how much was successfully transferred */
- while (len) {
-- int bytes, ret, offset;
-+ ssize_t bytes, ret, offset;
- void *maddr;
- struct page *page = NULL;
-
-@@ -3914,8 +4197,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- *
- * The caller must hold a reference on @mm.
- */
--int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-- void *buf, int len, int write)
-+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
-+ void *buf, size_t len, int write)
- {
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
- }
-@@ -3925,11 +4208,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- * Source/target buffer must be kernel space,
- * Do not walk the page table directly, use get_user_pages
- */
--int access_process_vm(struct task_struct *tsk, unsigned long addr,
-- void *buf, int len, int write)
-+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
-+ void *buf, size_t len, int write)
- {
- struct mm_struct *mm;
-- int ret;
-+ ssize_t ret;
-
- mm = get_task_mm(tsk);
- if (!mm)
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index a72fa33..0b12a09 100644
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -652,6 +652,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- unsigned long vmstart;
- unsigned long vmend;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+#endif
-+
- vma = find_vma_prev(mm, start, &prev);
- if (!vma || vma->vm_start > start)
- return -EFAULT;
-@@ -690,6 +694,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- err = vma_replace_policy(vma, new_pol);
- if (err)
- goto out;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+ if (vma_m) {
-+ err = vma_replace_policy(vma_m, new_pol);
-+ if (err)
-+ goto out;
-+ }
-+#endif
-+
- }
-
- out:
-@@ -1126,6 +1140,17 @@ static long do_mbind(unsigned long start, unsigned long len,
-
- if (end < start)
- return -EINVAL;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ if (end > SEGMEXEC_TASK_SIZE)
-+ return -EINVAL;
-+ } else
-+#endif
-+
-+ if (end > TASK_SIZE)
-+ return -EINVAL;
-+
- if (end == start)
- return 0;
-
-@@ -1341,6 +1366,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
- if (!mm)
- goto out;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (mm != current->mm &&
-+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
-+ err = -EPERM;
-+ goto out;
-+ }
-+#endif
-+
- /*
- * Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
-@@ -1350,8 +1383,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
- rcu_read_lock();
- tcred = __task_cred(task);
- if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
-- cred->uid != tcred->suid && cred->uid != tcred->uid &&
-- !capable(CAP_SYS_NICE)) {
-+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
- rcu_read_unlock();
- err = -EPERM;
- goto out;
-diff --git a/mm/migrate.c b/mm/migrate.c
-index 7d26ea5..e2941874 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -1392,6 +1392,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
- if (!mm)
- return -EINVAL;
-
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ if (mm != current->mm &&
-+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
-+ err = -EPERM;
-+ goto out;
-+ }
-+#endif
-+
- /*
- * Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
-@@ -1401,8 +1409,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
- rcu_read_lock();
- tcred = __task_cred(task);
- if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
-- cred->uid != tcred->suid && cred->uid != tcred->uid &&
-- !capable(CAP_SYS_NICE)) {
-+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
- rcu_read_unlock();
- err = -EPERM;
- goto out;
-diff --git a/mm/mlock.c b/mm/mlock.c
-index 39b3a7d..660592a 100644
---- a/mm/mlock.c
-+++ b/mm/mlock.c
-@@ -13,6 +13,7 @@
- #include <linux/pagemap.h>
- #include <linux/mempolicy.h>
- #include <linux/syscalls.h>
-+#include <linux/security.h>
- #include <linux/sched.h>
- #include <linux/export.h>
- #include <linux/rmap.h>
-@@ -378,7 +379,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
- {
- unsigned long nstart, end, tmp;
- struct vm_area_struct * vma, * prev;
-- int error;
-+ int error = 0;
-
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len != PAGE_ALIGN(len));
-@@ -387,6 +388,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
- return -EINVAL;
- if (end == start)
- return 0;
-+ if (end > TASK_SIZE)
-+ return -EINVAL;
-+
- vma = find_vma_prev(current->mm, start, &prev);
- if (!vma || vma->vm_start > start)
- return -ENOMEM;
-@@ -397,6 +401,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
- for (nstart = start ; ; ) {
- vm_flags_t newflags;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
-+ break;
-+#endif
-+
- /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
-
- newflags = vma->vm_flags | VM_LOCKED;
-@@ -502,6 +511,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
- lock_limit >>= PAGE_SHIFT;
-
- /* check against resource limits */
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
- if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
- error = do_mlock(start, len, 1);
- up_write(&current->mm->mmap_sem);
-@@ -525,23 +535,29 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
- static int do_mlockall(int flags)
- {
- struct vm_area_struct * vma, * prev = NULL;
-- unsigned int def_flags = 0;
-
- if (flags & MCL_FUTURE)
-- def_flags = VM_LOCKED;
-- current->mm->def_flags = def_flags;
-+ current->mm->def_flags |= VM_LOCKED;
-+ else
-+ current->mm->def_flags &= ~VM_LOCKED;
- if (flags == MCL_FUTURE)
- goto out;
-
- for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
- vm_flags_t newflags;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
-+ break;
-+#endif
-+
- newflags = vma->vm_flags | VM_LOCKED;
- if (!(flags & MCL_CURRENT))
- newflags &= ~VM_LOCKED;
-
- /* Ignore errors */
- mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
-+ cond_resched();
- }
- out:
- return 0;
-@@ -568,6 +584,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
- lock_limit >>= PAGE_SHIFT;
-
- ret = -ENOMEM;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
- if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
- capable(CAP_IPC_LOCK))
- ret = do_mlockall(flags);
-diff --git a/mm/mm_init.c b/mm/mm_init.c
-index 1ffd97a..ed75674 100644
---- a/mm/mm_init.c
-+++ b/mm/mm_init.c
-@@ -9,8 +9,33 @@
- #include <linux/init.h>
- #include <linux/kobject.h>
- #include <linux/export.h>
-+#include <linux/slab.h>
- #include "internal.h"
-
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
-+static int __init pax_sanitize_slab_setup(char *str)
-+{
-+ if (!str)
-+ return 0;
-+
-+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
-+ pr_info("PaX slab sanitization: %s\n", "disabled");
-+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
-+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
-+ pr_info("PaX slab sanitization: %s\n", "fast");
-+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
-+ } else if (!strcmp(str, "full")) {
-+ pr_info("PaX slab sanitization: %s\n", "full");
-+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
-+ } else
-+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
-+
-+ return 0;
-+}
-+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
-+#endif
-+
- #ifdef CONFIG_DEBUG_MEMORY_INIT
- int mminit_loglevel;
-
-diff --git a/mm/mmap.c b/mm/mmap.c
-index 94f4e34..294492d 100644
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -30,6 +30,7 @@
- #include <linux/perf_event.h>
- #include <linux/audit.h>
- #include <linux/khugepaged.h>
-+#include <linux/random.h>
-
- #include <asm/uaccess.h>
- #include <asm/cacheflush.h>
-@@ -46,6 +47,16 @@
- #define arch_rebalance_pgtables(addr, len) (addr)
- #endif
-
-+static inline void verify_mm_writelocked(struct mm_struct *mm)
-+{
-+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
-+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
-+ up_read(&mm->mmap_sem);
-+ BUG();
-+ }
-+#endif
-+}
-+
- static void unmap_region(struct mm_struct *mm,
- struct vm_area_struct *vma, struct vm_area_struct *prev,
- unsigned long start, unsigned long end);
-@@ -71,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
- * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- */
--pgprot_t protection_map[16] = {
-+pgprot_t protection_map[16] __read_only = {
- __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
- __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
- };
-
--pgprot_t vm_get_page_prot(unsigned long vm_flags)
-+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
- {
-- return __pgprot(pgprot_val(protection_map[vm_flags &
-+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
- pgprot_val(arch_vm_get_page_prot(vm_flags)));
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+ if (!(__supported_pte_mask & _PAGE_NX) &&
-+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
-+ (vm_flags & (VM_READ | VM_WRITE)))
-+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
-+#endif
-+
-+ return prot;
- }
- EXPORT_SYMBOL(vm_get_page_prot);
-
- int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
- int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
- int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
- /*
- * Make sure vm_committed_as in one cacheline and not cacheline shared with
- * other variables. It can be updated by several CPUs frequently.
-@@ -228,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
- struct vm_area_struct *next = vma->vm_next;
-
- might_sleep();
-+ BUG_ON(vma->vm_mirror);
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
- if (vma->vm_file) {
-@@ -272,6 +294,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
- * not page aligned -Ram Gupta
- */
- rlim = rlimit(RLIMIT_DATA);
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
-+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMPABLE_ENABLED) && current_uid())
-+ rlim = 4096 * PAGE_SIZE;
-+#endif
-+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
- if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
- (mm->end_data - mm->start_data) > rlim)
- goto out;
-@@ -692,6 +720,12 @@ static int
- can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
-+ return 0;
-+#endif
-+
- if (is_mergeable_vma(vma, file, vm_flags) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- if (vma->vm_pgoff == vm_pgoff)
-@@ -711,6 +745,12 @@ static int
- can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
-+ return 0;
-+#endif
-+
- if (is_mergeable_vma(vma, file, vm_flags) &&
- is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
- pgoff_t vm_pglen;
-@@ -753,13 +793,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- struct vm_area_struct *vma_merge(struct mm_struct *mm,
- struct vm_area_struct *prev, unsigned long addr,
- unsigned long end, unsigned long vm_flags,
-- struct anon_vma *anon_vma, struct file *file,
-+ struct anon_vma *anon_vma, struct file *file,
- pgoff_t pgoff, struct mempolicy *policy)
- {
- pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- struct vm_area_struct *area, *next;
- int err;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
-+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
-+
-+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
-+#endif
-+
- /*
- * We later require that vma->vm_flags == vm_flags,
- * so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -775,6 +822,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
- if (next && next->vm_end == end) /* cases 6, 7, 8 */
- next = next->vm_next;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (prev)
-+ prev_m = pax_find_mirror_vma(prev);
-+ if (area)
-+ area_m = pax_find_mirror_vma(area);
-+ if (next)
-+ next_m = pax_find_mirror_vma(next);
-+#endif
-+
- /*
- * Can it merge with the predecessor?
- */
-@@ -794,9 +850,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
- /* cases 1, 6 */
- err = vma_adjust(prev, prev->vm_start,
- next->vm_end, prev->vm_pgoff, NULL);
-- } else /* cases 2, 5, 7 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!err && prev_m)
-+ err = vma_adjust(prev_m, prev_m->vm_start,
-+ next_m->vm_end, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+ } else { /* cases 2, 5, 7 */
- err = vma_adjust(prev, prev->vm_start,
- end, prev->vm_pgoff, NULL);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!err && prev_m)
-+ err = vma_adjust(prev_m, prev_m->vm_start,
-+ end_m, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+ }
- if (err)
- return NULL;
- khugepaged_enter_vma_merge(prev, vm_flags);
-@@ -810,12 +881,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
- mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags,
- anon_vma, file, pgoff+pglen)) {
-- if (prev && addr < prev->vm_end) /* case 4 */
-+ if (prev && addr < prev->vm_end) { /* case 4 */
- err = vma_adjust(prev, prev->vm_start,
- addr, prev->vm_pgoff, NULL);
-- else /* cases 3, 8 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!err && prev_m)
-+ err = vma_adjust(prev_m, prev_m->vm_start,
-+ addr_m, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+ } else { /* cases 3, 8 */
- err = vma_adjust(area, addr, next->vm_end,
- next->vm_pgoff - pglen, NULL);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!err && area_m)
-+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
-+ next_m->vm_pgoff - pglen, NULL);
-+#endif
-+
-+ }
- if (err)
- return NULL;
- khugepaged_enter_vma_merge(area, vm_flags);
-@@ -924,15 +1010,22 @@ none:
- void vm_stat_account(struct mm_struct *mm, unsigned long flags,
- struct file *file, long pages)
- {
-- const unsigned long stack_flags
-- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
-+#endif
-+
-+ mm->total_vm += pages;
-
- if (file) {
- mm->shared_vm += pages;
- if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
- mm->exec_vm += pages;
-- } else if (flags & stack_flags)
-+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
- mm->stack_vm += pages;
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
-+#endif
- if (flags & (VM_RESERVED|VM_IO))
- mm->reserved_vm += pages;
- }
-@@ -958,7 +1051,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- * (the exception is when the underlying filesystem is noexec
- * mounted, in which case we dont add PROT_EXEC.)
- */
-- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
- if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
- prot |= PROT_EXEC;
-
-@@ -984,7 +1077,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- /* Obtain the address to map to. we verify (or select) it and ensure
- * that it represents a valid section of the address space.
- */
-- addr = get_unmapped_area(file, addr, len, pgoff, flags);
-+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
- if (addr & ~PAGE_MASK)
- return addr;
-
-@@ -995,6 +1088,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
- mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
-
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (mm->pax_flags & MF_PAX_MPROTECT) {
-+
-+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
-+ mm->binfmt->handle_mmap)
-+ mm->binfmt->handle_mmap(file);
-+#endif
-+
-+#ifndef CONFIG_PAX_MPROTECT_COMPAT
-+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
-+ gr_log_rwxmmap(file);
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ vm_flags &= ~VM_EXEC;
-+#else
-+ return -EPERM;
-+#endif
-+
-+ }
-+
-+ if (!(vm_flags & VM_EXEC))
-+ vm_flags &= ~VM_MAYEXEC;
-+#else
-+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
-+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+#endif
-+ else
-+ vm_flags &= ~VM_MAYWRITE;
-+ }
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
-+ vm_flags &= ~VM_PAGEEXEC;
-+#endif
-+
- if (flags & MAP_LOCKED)
- if (!can_do_mlock())
- return -EPERM;
-@@ -1006,6 +1136,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- locked += mm->locked_vm;
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- return -EAGAIN;
- }
-@@ -1076,6 +1207,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- if (error)
- return error;
-
-+ if (!gr_acl_handle_mmap(file, prot))
-+ return -EACCES;
-+
- return mmap_region(file, addr, len, flags, vm_flags, pgoff);
- }
- EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1156,7 +1290,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
- vm_flags_t vm_flags = vma->vm_flags;
-
- /* If it was private or non-writable, the write bit is already clear */
-- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
-+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
- return 0;
-
- /* The backer wishes to know when pages are first written to? */
-@@ -1205,17 +1339,32 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long charged = 0;
- struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m = NULL;
-+#endif
-+
-+ /*
-+ * mm->mmap_sem is required to protect against another thread
-+ * changing the mappings in case we sleep.
-+ */
-+ verify_mm_writelocked(mm);
-+
- /* Clear old maps */
- error = -ENOMEM;
--munmap_back:
- vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
- if (vma && vma->vm_start < addr + len) {
- if (do_munmap(mm, addr, len))
- return -ENOMEM;
-- goto munmap_back;
-+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-+ BUG_ON(vma && vma->vm_start < addr + len);
- }
-
- /* Check against address space limit. */
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
-+#endif
-+
- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
- return -ENOMEM;
-
-@@ -1261,6 +1410,16 @@ munmap_back:
- goto unacct_error;
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
-+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+ if (!vma_m) {
-+ error = -ENOMEM;
-+ goto free_vma;
-+ }
-+ }
-+#endif
-+
- vma->vm_mm = mm;
- vma->vm_start = addr;
- vma->vm_end = addr + len;
-@@ -1269,8 +1428,9 @@ munmap_back:
- vma->vm_pgoff = pgoff;
- INIT_LIST_HEAD(&vma->anon_vma_chain);
-
-+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
-+
- if (file) {
-- error = -EINVAL;
- if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
- goto free_vma;
- if (vm_flags & VM_DENYWRITE) {
-@@ -1284,6 +1444,19 @@ munmap_back:
- error = file->f_op->mmap(file, vma);
- if (error)
- goto unmap_and_free_vma;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m && (vm_flags & VM_EXECUTABLE))
-+ added_exe_file_vma(mm);
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
-+ vma->vm_flags |= VM_PAGEEXEC;
-+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+ }
-+#endif
-+
- if (vm_flags & VM_EXECUTABLE)
- added_exe_file_vma(mm);
-
-@@ -1296,6 +1469,8 @@ munmap_back:
- pgoff = vma->vm_pgoff;
- vm_flags = vma->vm_flags;
- } else if (vm_flags & VM_SHARED) {
-+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
-+ goto free_vma;
- error = shmem_zero_setup(vma);
- if (error)
- goto free_vma;
-@@ -1319,14 +1494,19 @@ munmap_back:
- vma_link(mm, vma, prev, rb_link, rb_parent);
- file = vma->vm_file;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m)
-+ BUG_ON(pax_mirror_vma(vma_m, vma));
-+#endif
-+
- /* Once vma denies write, undo our temporary denial count */
- if (correct_wcount)
- atomic_inc(&inode->i_writecount);
- out:
- perf_event_mmap(vma);
-
-- mm->total_vm += len >> PAGE_SHIFT;
- vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
-+ track_exec_limit(mm, addr, addr + len, vm_flags);
- if (vm_flags & VM_LOCKED) {
- if (!mlock_vma_pages_range(vma, addr, addr + len))
- mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1344,6 +1524,12 @@ unmap_and_free_vma:
- unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
- charged = 0;
- free_vma:
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m)
-+ kmem_cache_free(vm_area_cachep, vma_m);
-+#endif
-+
- kmem_cache_free(vm_area_cachep, vma);
- unacct_error:
- if (charged)
-@@ -1351,6 +1537,73 @@ unacct_error:
- return error;
- }
-
-+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
-+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
-+{
-+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
-+ return ((random32() & 0xFF) + 1) << PAGE_SHIFT;
-+
-+ return 0;
-+}
-+#endif
-+
-+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset)
-+{
-+ if (!vma) {
-+#ifdef CONFIG_STACK_GROWSUP
-+ if (*addr > sysctl_heap_stack_gap)
-+ vma = find_vma(current->mm, *addr - sysctl_heap_stack_gap);
-+ else
-+ vma = find_vma(current->mm, 0);
-+ if (vma && (vma->vm_flags & VM_GROWSUP))
-+ return false;
-+#endif
-+ return true;
-+ }
-+
-+ if (*addr + len > vma->vm_start)
-+ return false;
-+
-+ if (offset) {
-+ if (vma->vm_prev && *addr == vma->vm_prev->vm_end && (vma->vm_start - len - vma->vm_prev->vm_end >= offset)) {
-+ *addr = vma->vm_prev->vm_end + offset;
-+ return true;
-+ }
-+ return offset <= vma->vm_start - *addr - len;
-+ } else if (vma->vm_flags & VM_GROWSDOWN)
-+ return sysctl_heap_stack_gap <= vma->vm_start - *addr - len;
-+#ifdef CONFIG_STACK_GROWSUP
-+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
-+ if (*addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap)
-+ return true;
-+ if (vma->vm_start - len - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) {
-+ *addr = vma->vm_start - len;
-+ return true;
-+ }
-+ return false;
-+ }
-+#endif
-+
-+ return true;
-+}
-+
-+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
-+{
-+ if (vma->vm_start < len)
-+ return -ENOMEM;
-+
-+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
-+ if (offset <= vma->vm_start - len)
-+ return vma->vm_start - len - offset;
-+ else
-+ return -ENOMEM;
-+ }
-+
-+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
-+ return vma->vm_start - len - sysctl_heap_stack_gap;
-+ return -ENOMEM;
-+}
-+
- /* Get an address range which is currently unmapped.
- * For shmat() with addr=0.
- *
-@@ -1370,6 +1623,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (len > TASK_SIZE - mmap_min_addr)
- return -ENOMEM;
-@@ -1377,18 +1631,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- if (flags & MAP_FIXED)
- return addr;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- addr = PAGE_ALIGN(addr);
-- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-- (!vma || addr + len <= vma->vm_start))
-- return addr;
-+ if (TASK_SIZE - len >= addr) {
-+ vma = find_vma(mm, addr);
-+ if (addr >= mmap_min_addr && check_heap_stack_gap(vma, &addr, len, offset))
-+ return addr;
-+ }
- }
- if (len > mm->cached_hole_size) {
-- start_addr = addr = mm->free_area_cache;
-+ start_addr = addr = mm->free_area_cache;
- } else {
-- start_addr = addr = TASK_UNMAPPED_BASE;
-- mm->cached_hole_size = 0;
-+ start_addr = addr = mm->mmap_base;
-+ mm->cached_hole_size = 0;
- }
-
- full_search:
-@@ -1399,34 +1658,40 @@ full_search:
- * Start a new search - just in case we missed
- * some holes.
- */
-- if (start_addr != TASK_UNMAPPED_BASE) {
-- addr = TASK_UNMAPPED_BASE;
-- start_addr = addr;
-+ if (start_addr != mm->mmap_base) {
-+ start_addr = addr = mm->mmap_base;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
-- if (!vma || addr + len <= vma->vm_start) {
-- /*
-- * Remember the place where we stopped the search:
-- */
-- mm->free_area_cache = addr + len;
-- return addr;
-- }
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ break;
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
- }
-+
-+ /*
-+ * Remember the place where we stopped the search:
-+ */
-+ mm->free_area_cache = addr + len;
-+ return addr;
- }
- #endif
-
- void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
-+ return;
-+#endif
-+
- /*
- * Is this a new hole at the lowest possible address?
- */
-- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
-+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
- mm->free_area_cache = addr;
- mm->cached_hole_size = ~0UL;
- }
-@@ -1444,7 +1709,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- {
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
-- unsigned long addr = addr0;
-+ unsigned long base = mm->mmap_base, addr = addr0;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr);
-
- /* requested length too big for entire address space */
-@@ -1454,13 +1720,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- if (flags & MAP_FIXED)
- return addr;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
-- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-- (!vma || addr + len <= vma->vm_start))
-- return addr;
-+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr) {
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
-+ return addr;
-+ }
- }
-
- /* check if free_area_cache is useful for us */
-@@ -1474,10 +1745,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- /* make sure it can fit in the remaining address space */
- if (addr >= low_limit + len) {
-- vma = find_vma(mm, addr-len);
-- if (!vma || addr <= vma->vm_start)
-+ addr -= len;
-+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- /* remember the address as a hint for next time */
-- return (mm->free_area_cache = addr-len);
-+ return (mm->free_area_cache = addr);
- }
-
- if (mm->mmap_base < low_limit + len)
-@@ -1492,7 +1764,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- * return with success:
- */
- vma = find_vma(mm, addr);
-- if (!vma || addr+len <= vma->vm_start)
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
- /* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
-
-@@ -1501,8 +1773,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
-- addr = vma->vm_start-len;
-- } while (vma->vm_start >= low_limit + len);
-+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr) && addr >= low_limit);
-
- bottomup:
- /*
-@@ -1511,13 +1783,21 @@ bottomup:
- * can happen with large stack limits and large mmap()
- * allocations.
- */
-+ mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+ mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
-- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
-- mm->free_area_cache = mm->mmap_base;
-+ mm->mmap_base = base;
-+ mm->free_area_cache = base;
- mm->cached_hole_size = ~0UL;
-
- return addr;
-@@ -1526,6 +1806,12 @@ bottomup:
-
- void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
-+ return;
-+#endif
-+
- /*
- * Is this a new hole at the highest possible address?
- */
-@@ -1533,8 +1819,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
- mm->free_area_cache = addr;
-
- /* dont allow allocations above current base */
-- if (mm->free_area_cache > mm->mmap_base)
-+ if (mm->free_area_cache > mm->mmap_base) {
- mm->free_area_cache = mm->mmap_base;
-+ mm->cached_hole_size = ~0UL;
-+ }
- }
-
- unsigned long
-@@ -1607,40 +1895,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
-
- EXPORT_SYMBOL(find_vma);
-
--/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
-+/*
-+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
-+ */
- struct vm_area_struct *
- find_vma_prev(struct mm_struct *mm, unsigned long addr,
- struct vm_area_struct **pprev)
- {
-- struct vm_area_struct *vma = NULL, *prev = NULL;
-- struct rb_node *rb_node;
-- if (!mm)
-- goto out;
--
-- /* Guard against addr being lower than the first VMA */
-- vma = mm->mmap;
--
-- /* Go through the RB tree quickly. */
-- rb_node = mm->mm_rb.rb_node;
--
-- while (rb_node) {
-- struct vm_area_struct *vma_tmp;
-- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
--
-- if (addr < vma_tmp->vm_end) {
-- rb_node = rb_node->rb_left;
-- } else {
-- prev = vma_tmp;
-- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
-- break;
-+ struct vm_area_struct *vma;
-+
-+ vma = find_vma(mm, addr);
-+ if (vma) {
-+ *pprev = vma->vm_prev;
-+ } else {
-+ struct rb_node *rb_node = mm->mm_rb.rb_node;
-+ *pprev = NULL;
-+ while (rb_node) {
-+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
- rb_node = rb_node->rb_right;
- }
- }
-+ return vma;
-+}
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
-+{
-+ struct vm_area_struct *vma_m;
-
--out:
-- *pprev = prev;
-- return prev ? prev->vm_next : vma;
-+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
-+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
-+ BUG_ON(vma->vm_mirror);
-+ return NULL;
-+ }
-+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
-+ vma_m = vma->vm_mirror;
-+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
-+ BUG_ON(vma->vm_file != vma_m->vm_file);
-+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
-+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
-+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
-+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
-+ return vma_m;
- }
-+#endif
-
- /*
- * Verify that the stack growth is acceptable and
-@@ -1651,17 +1949,15 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
- {
- struct mm_struct *mm = vma->vm_mm;
- struct rlimit *rlim = current->signal->rlim;
-- unsigned long new_start, actual_size;
-+ unsigned long new_start;
-
- /* address space limit tests */
- if (!may_expand_vm(mm, grow))
- return -ENOMEM;
-
- /* Stack limit test */
-- actual_size = size;
-- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-- actual_size -= PAGE_SIZE;
-- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
-+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
-+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
- return -ENOMEM;
-
- /* mlock limit tests */
-@@ -1671,6 +1967,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
- locked = mm->locked_vm + grow;
- limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
- limit >>= PAGE_SHIFT;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
- if (locked > limit && !capable(CAP_IPC_LOCK))
- return -ENOMEM;
- }
-@@ -1689,7 +1986,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
- return -ENOMEM;
-
- /* Ok, everything looks good - let it rip */
-- mm->total_vm += grow;
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
-@@ -1701,37 +1997,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
- * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
- * vma is the last one with address > vma->vm_end. Have to extend vma.
- */
-+#ifndef CONFIG_IA64
-+static
-+#endif
- int expand_upwards(struct vm_area_struct *vma, unsigned long address)
- {
- int error;
-+ bool locknext;
-
- if (!(vma->vm_flags & VM_GROWSUP))
- return -EFAULT;
-
-+ /* Also guard against wrapping around to address 0. */
-+ if (address < PAGE_ALIGN(address+1))
-+ address = PAGE_ALIGN(address+1);
-+ else
-+ return -ENOMEM;
-+
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
-+ if (locknext && anon_vma_prepare(vma->vm_next))
-+ return -ENOMEM;
- vma_lock_anon_vma(vma);
-+ if (locknext)
-+ vma_lock_anon_vma(vma->vm_next);
-
- /*
- * vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_sem in read mode. We need the
-- * anon_vma lock to serialize against concurrent expand_stacks.
-- * Also guard against wrapping around to address 0.
-+ * anon_vma locks to serialize against concurrent expand_stacks
-+ * and expand_upwards.
- */
-- if (address < PAGE_ALIGN(address+4))
-- address = PAGE_ALIGN(address+4);
-- else {
-- vma_unlock_anon_vma(vma);
-- return -ENOMEM;
-- }
- error = 0;
-
- /* Somebody else might have raced and expanded it already */
-- if (address > vma->vm_end) {
-+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
-+ error = -ENOMEM;
-+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
- unsigned long size, grow;
-
- size = address - vma->vm_start;
-@@ -1746,6 +2053,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
- }
- }
- }
-+ if (locknext)
-+ vma_unlock_anon_vma(vma->vm_next);
- vma_unlock_anon_vma(vma);
- khugepaged_enter_vma_merge(vma, vma->vm_flags);
- return error;
-@@ -1759,6 +2068,8 @@ int expand_downwards(struct vm_area_struct *vma,
- unsigned long address)
- {
- int error;
-+ bool lockprev = false;
-+ struct vm_area_struct *prev;
-
- /*
- * We must make sure the anon_vma is allocated
-@@ -1772,6 +2083,15 @@ int expand_downwards(struct vm_area_struct *vma,
- if (error)
- return error;
-
-+ prev = vma->vm_prev;
-+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
-+#endif
-+ if (lockprev && anon_vma_prepare(prev))
-+ return -ENOMEM;
-+ if (lockprev)
-+ vma_lock_anon_vma(prev);
-+
- vma_lock_anon_vma(vma);
-
- /*
-@@ -1781,9 +2101,17 @@ int expand_downwards(struct vm_area_struct *vma,
- */
-
- /* Somebody else might have raced and expanded it already */
-- if (address < vma->vm_start) {
-+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
-+ error = -ENOMEM;
-+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
- unsigned long size, grow;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m;
-+
-+ vma_m = pax_find_mirror_vma(vma);
-+#endif
-+
- size = vma->vm_end - address;
- grow = (vma->vm_start - address) >> PAGE_SHIFT;
-
-@@ -1793,18 +2121,48 @@ int expand_downwards(struct vm_area_struct *vma,
- if (!error) {
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
-+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m) {
-+ vma_m->vm_start -= grow << PAGE_SHIFT;
-+ vma_m->vm_pgoff -= grow;
-+ }
-+#endif
-+
- perf_event_mmap(vma);
- }
- }
- }
- vma_unlock_anon_vma(vma);
-+ if (lockprev)
-+ vma_unlock_anon_vma(prev);
- khugepaged_enter_vma_merge(vma, vma->vm_flags);
- return error;
- }
-
-+/*
-+ * Note how expand_stack() refuses to expand the stack all the way to
-+ * abut the next virtual mapping, *unless* that mapping itself is also
-+ * a stack mapping. We want to leave room for a guard page, after all
-+ * (the guard page itself is not added here, that is done by the
-+ * actual page faulting logic)
-+ *
-+ * This matches the behavior of the guard page logic (see mm/memory.c:
-+ * check_stack_guard_page()), which only allows the guard page to be
-+ * removed under these circumstances.
-+ */
- #ifdef CONFIG_STACK_GROWSUP
- int expand_stack(struct vm_area_struct *vma, unsigned long address)
- {
-+ struct vm_area_struct *next;
-+
-+ address &= PAGE_MASK;
-+ next = vma->vm_next;
-+ if (next && next->vm_start == address + PAGE_SIZE) {
-+ if (!(next->vm_flags & VM_GROWSUP))
-+ return -ENOMEM;
-+ }
- return expand_upwards(vma, address);
- }
-
-@@ -1827,6 +2185,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
- #else
- int expand_stack(struct vm_area_struct *vma, unsigned long address)
- {
-+ struct vm_area_struct *prev;
-+
-+ address &= PAGE_MASK;
-+ prev = vma->vm_prev;
-+ if (prev && prev->vm_end == address) {
-+ if (!(prev->vm_flags & VM_GROWSDOWN))
-+ return -ENOMEM;
-+ }
- return expand_downwards(vma, address);
- }
-
-@@ -1867,7 +2233,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
- do {
- long nrpages = vma_pages(vma);
-
-- mm->total_vm -= nrpages;
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
-+ vma = remove_vma(vma);
-+ continue;
-+ }
-+#endif
-+
- vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
- vma = remove_vma(vma);
- } while (vma);
-@@ -1912,6 +2284,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
- insertion_point = (prev ? &prev->vm_next : &mm->mmap);
- vma->vm_prev = NULL;
- do {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma->vm_mirror) {
-+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
-+ vma->vm_mirror->vm_mirror = NULL;
-+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
-+ vma->vm_mirror = NULL;
-+ }
-+#endif
-+
- rb_erase(&vma->vm_rb, &mm->mm_rb);
- mm->map_count--;
- tail_vma = vma;
-@@ -1940,14 +2322,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- struct vm_area_struct *new;
- int err = -ENOMEM;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m, *new_m = NULL;
-+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
-+#endif
-+
- if (is_vm_hugetlb_page(vma) && (addr &
- ~(huge_page_mask(hstate_vma(vma)))))
- return -EINVAL;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ vma_m = pax_find_mirror_vma(vma);
-+#endif
-+
- new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
- if (!new)
- goto out_err;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m) {
-+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+ if (!new_m) {
-+ kmem_cache_free(vm_area_cachep, new);
-+ goto out_err;
-+ }
-+ }
-+#endif
-+
- /* most fields are the same, copy all, and then fixup */
- *new = *vma;
-
-@@ -1960,6 +2361,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m) {
-+ *new_m = *vma_m;
-+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
-+ new_m->vm_mirror = new;
-+ new->vm_mirror = new_m;
-+
-+ if (new_below)
-+ new_m->vm_end = addr_m;
-+ else {
-+ new_m->vm_start = addr_m;
-+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
-+ }
-+ }
-+#endif
-+
- pol = mpol_dup(vma_policy(vma));
- if (IS_ERR(pol)) {
- err = PTR_ERR(pol);
-@@ -1985,6 +2402,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- else
- err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (!err && vma_m) {
-+ if (anon_vma_clone(new_m, vma_m))
-+ goto out_free_mpol;
-+
-+ mpol_get(pol);
-+ vma_set_policy(new_m, pol);
-+
-+ if (new_m->vm_file) {
-+ get_file(new_m->vm_file);
-+ if (vma_m->vm_flags & VM_EXECUTABLE)
-+ added_exe_file_vma(mm);
-+ }
-+
-+ if (new_m->vm_ops && new_m->vm_ops->open)
-+ new_m->vm_ops->open(new_m);
-+
-+ if (new_below)
-+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
-+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
-+ else
-+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
-+
-+ if (err) {
-+ if (new_m->vm_ops && new_m->vm_ops->close)
-+ new_m->vm_ops->close(new_m);
-+ if (new_m->vm_file) {
-+ if (vma_m->vm_flags & VM_EXECUTABLE)
-+ removed_exe_file_vma(mm);
-+ fput(new_m->vm_file);
-+ }
-+ mpol_put(pol);
-+ }
-+ }
-+#endif
-+
- /* Success. */
- if (!err)
- return 0;
-@@ -1997,10 +2450,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- removed_exe_file_vma(mm);
- fput(new->vm_file);
- }
-- unlink_anon_vmas(new);
- out_free_mpol:
- mpol_put(pol);
- out_free_vma:
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (new_m) {
-+ unlink_anon_vmas(new_m);
-+ kmem_cache_free(vm_area_cachep, new_m);
-+ }
-+#endif
-+
-+ unlink_anon_vmas(new);
- kmem_cache_free(vm_area_cachep, new);
- out_err:
- return err;
-@@ -2013,6 +2474,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr, int new_below)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
-+ if (mm->map_count >= sysctl_max_map_count-1)
-+ return -ENOMEM;
-+ } else
-+#endif
-+
- if (mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
-@@ -2024,11 +2494,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
- * work. This now handles partial unmappings.
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-+#ifdef CONFIG_PAX_SEGMEXEC
- int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
- {
-+ int ret = __do_munmap(mm, start, len);
-+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
-+ return ret;
-+
-+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
-+}
-+
-+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
-+#else
-+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
-+#endif
-+{
- unsigned long end;
- struct vm_area_struct *vma, *prev, *last;
-
-+ /*
-+ * mm->mmap_sem is required to protect against another thread
-+ * changing the mappings in case we sleep.
-+ */
-+ verify_mm_writelocked(mm);
-+
- if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
- return -EINVAL;
-
-@@ -2103,6 +2592,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
- /* Fix up all other VM information */
- remove_vma_list(mm, vma);
-
-+ track_exec_limit(mm, start, end, 0UL);
-+
- return 0;
- }
-
-@@ -2115,22 +2606,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
-
- profile_munmap(addr);
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
-+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
-+ return -EINVAL;
-+#endif
-+
- down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len);
- up_write(&mm->mmap_sem);
- return ret;
- }
-
--static inline void verify_mm_writelocked(struct mm_struct *mm)
--{
--#ifdef CONFIG_DEBUG_VM
-- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
-- WARN_ON(1);
-- up_read(&mm->mmap_sem);
-- }
--#endif
--}
--
- /*
- * this is really a simplified "do_mmap". it only handles
- * anonymous maps. eventually we may be able to do some
-@@ -2144,6 +2631,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
- struct rb_node ** rb_link, * rb_parent;
- pgoff_t pgoff = addr >> PAGE_SHIFT;
- int error;
-+ unsigned long charged;
-
- len = PAGE_ALIGN(len);
- if (!len)
-@@ -2155,16 +2643,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
-
- flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+ flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (mm->pax_flags & MF_PAX_MPROTECT)
-+ flags &= ~VM_MAYEXEC;
-+#endif
-+
-+ }
-+#endif
-+
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (error & ~PAGE_MASK)
- return error;
-
-+ charged = len >> PAGE_SHIFT;
-+
- /*
- * mlock MCL_FUTURE?
- */
- if (mm->def_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
-- locked = len >> PAGE_SHIFT;
-+ locked = charged;
- locked += mm->locked_vm;
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
-@@ -2181,22 +2683,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
- /*
- * Clear old maps. this also does some error checking for us
- */
-- munmap_back:
- vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
- if (vma && vma->vm_start < addr + len) {
- if (do_munmap(mm, addr, len))
- return -ENOMEM;
-- goto munmap_back;
-+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-+ BUG_ON(vma && vma->vm_start < addr + len);
- }
-
- /* Check against address space limits *after* clearing old maps... */
-- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
-+ if (!may_expand_vm(mm, charged))
- return -ENOMEM;
-
- if (mm->map_count > sysctl_max_map_count)
- return -ENOMEM;
-
-- if (security_vm_enough_memory(len >> PAGE_SHIFT))
-+ if (security_vm_enough_memory(charged))
- return -ENOMEM;
-
- /* Can we just expand an old private anonymous mapping? */
-@@ -2210,7 +2712,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
- */
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
- if (!vma) {
-- vm_unacct_memory(len >> PAGE_SHIFT);
-+ vm_unacct_memory(charged);
- return -ENOMEM;
- }
-
-@@ -2224,11 +2726,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
- vma_link(mm, vma, prev, rb_link, rb_parent);
- out:
- perf_event_mmap(vma);
-- mm->total_vm += len >> PAGE_SHIFT;
-+ mm->total_vm += charged;
- if (flags & VM_LOCKED) {
- if (!mlock_vma_pages_range(vma, addr, addr + len))
-- mm->locked_vm += (len >> PAGE_SHIFT);
-+ mm->locked_vm += charged;
- }
-+ track_exec_limit(mm, addr, addr + len, flags);
- return addr;
- }
-
-@@ -2275,8 +2778,10 @@ void exit_mmap(struct mm_struct *mm)
- * Walk the list again, actually closing and freeing it,
- * with preemption enabled, without holding any MM locks.
- */
-- while (vma)
-+ while (vma) {
-+ vma->vm_mirror = NULL;
- vma = remove_vma(vma);
-+ }
-
- BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
- }
-@@ -2290,6 +2795,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
- struct vm_area_struct * __vma, * prev;
- struct rb_node ** rb_link, * rb_parent;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m = NULL;
-+#endif
-+
-+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
-+ return -EPERM;
-+
- /*
- * The vm_pgoff of a purely anonymous vma should be irrelevant
- * until its first write fault, when page's anon_vma and index
-@@ -2312,7 +2824,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
- if ((vma->vm_flags & VM_ACCOUNT) &&
- security_vm_enough_memory_mm(mm, vma_pages(vma)))
- return -ENOMEM;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
-+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+ if (!vma_m)
-+ return -ENOMEM;
-+ }
-+#endif
-+
- vma_link(mm, vma, prev, rb_link, rb_parent);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (vma_m)
-+ BUG_ON(pax_mirror_vma(vma_m, vma));
-+#endif
-+
- return 0;
- }
-
-@@ -2330,6 +2857,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
- struct rb_node **rb_link, *rb_parent;
- struct mempolicy *pol;
-
-+ BUG_ON(vma->vm_mirror);
-+
- /*
- * If anonymous vma has not yet been faulted, update new pgoff
- * to match new location, to increase its chance of merging.
-@@ -2380,6 +2909,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
- return NULL;
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
-+{
-+ struct vm_area_struct *prev_m;
-+ struct rb_node **rb_link_m, *rb_parent_m;
-+ struct mempolicy *pol_m;
-+
-+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
-+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
-+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
-+ *vma_m = *vma;
-+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
-+ if (anon_vma_clone(vma_m, vma))
-+ return -ENOMEM;
-+ pol_m = vma_policy(vma_m);
-+ mpol_get(pol_m);
-+ vma_set_policy(vma_m, pol_m);
-+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
-+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
-+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
-+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
-+ if (vma_m->vm_file)
-+ get_file(vma_m->vm_file);
-+ if (vma_m->vm_ops && vma_m->vm_ops->open)
-+ vma_m->vm_ops->open(vma_m);
-+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
-+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
-+ vma_m->vm_mirror = vma;
-+ vma->vm_mirror = vma_m;
-+ return 0;
-+}
-+#endif
-+
- /*
- * Return true if the calling process may expand its vm space by the passed
- * number of pages
-@@ -2391,6 +2953,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
-
- lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
-
-+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
- if (cur + npages > lim)
- return 0;
- return 1;
-@@ -2461,6 +3024,22 @@ int install_special_mapping(struct mm_struct *mm,
- vma->vm_start = addr;
- vma->vm_end = addr + len;
-
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (mm->pax_flags & MF_PAX_MPROTECT) {
-+#ifndef CONFIG_PAX_MPROTECT_COMPAT
-+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
-+ return -EPERM;
-+ if (!(vm_flags & VM_EXEC))
-+ vm_flags &= ~VM_MAYEXEC;
-+#else
-+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
-+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+#endif
-+ else
-+ vm_flags &= ~VM_MAYWRITE;
-+ }
-+#endif
-+
- vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
-diff --git a/mm/mmu_context.c b/mm/mmu_context.c
-index cf332bc..add7e3a 100644
---- a/mm/mmu_context.c
-+++ b/mm/mmu_context.c
-@@ -33,6 +33,7 @@ void use_mm(struct mm_struct *mm)
- }
- tsk->mm = mm;
- switch_mm(active_mm, mm, tsk);
-+ populate_stack();
- task_unlock(tsk);
-
- if (active_mm != mm)
-diff --git a/mm/mprotect.c b/mm/mprotect.c
-index 5a688a2..fa006d9 100644
---- a/mm/mprotect.c
-+++ b/mm/mprotect.c
-@@ -23,10 +23,16 @@
- #include <linux/mmu_notifier.h>
- #include <linux/migrate.h>
- #include <linux/perf_event.h>
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+#include <linux/elf.h>
-+#endif
-+
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
-+#include <asm/mmu_context.h>
-
- #ifndef pgprot_modify
- static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
- flush_tlb_range(vma, start, end);
- }
-
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+/* called while holding the mmap semaphor for writing except stack expansion */
-+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
-+{
-+ unsigned long oldlimit, newlimit = 0UL;
-+
-+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
-+ return;
-+
-+ spin_lock(&mm->page_table_lock);
-+ oldlimit = mm->context.user_cs_limit;
-+ if ((prot & VM_EXEC) && oldlimit < end)
-+ /* USER_CS limit moved up */
-+ newlimit = end;
-+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
-+ /* USER_CS limit moved down */
-+ newlimit = start;
-+
-+ if (newlimit) {
-+ mm->context.user_cs_limit = newlimit;
-+
-+#ifdef CONFIG_SMP
-+ wmb();
-+ cpumask_clear(&mm->context.cpu_user_cs_mask);
-+ cpumask_set_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask);
-+#endif
-+
-+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
-+ }
-+ spin_unlock(&mm->page_table_lock);
-+ if (newlimit == end) {
-+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
-+
-+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
-+ if (is_vm_hugetlb_page(vma))
-+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
-+ else
-+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
-+ }
-+}
-+#endif
-+
- int
- mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- unsigned long start, unsigned long end, unsigned long newflags)
-@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- int error;
- int dirty_accountable = 0;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m = NULL;
-+ unsigned long start_m, end_m;
-+
-+ start_m = start + SEGMEXEC_TASK_SIZE;
-+ end_m = end + SEGMEXEC_TASK_SIZE;
-+#endif
-+
- if (newflags == oldflags) {
- *pprev = vma;
- return 0;
- }
-
-+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
-+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
-+
-+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
-+ return -ENOMEM;
-+
-+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
-+ return -ENOMEM;
-+ }
-+
- /*
- * If we make a private mapping writable we increase our commit;
- * but (without finer accounting) cannot reduce our commit if we
-@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- }
- }
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
-+ if (start != vma->vm_start) {
-+ error = split_vma(mm, vma, start, 1);
-+ if (error)
-+ goto fail;
-+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
-+ *pprev = (*pprev)->vm_next;
-+ }
-+
-+ if (end != vma->vm_end) {
-+ error = split_vma(mm, vma, end, 0);
-+ if (error)
-+ goto fail;
-+ }
-+
-+ if (pax_find_mirror_vma(vma)) {
-+ error = __do_munmap(mm, start_m, end_m - start_m);
-+ if (error)
-+ goto fail;
-+ } else {
-+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+ if (!vma_m) {
-+ error = -ENOMEM;
-+ goto fail;
-+ }
-+ vma->vm_flags = newflags;
-+ error = pax_mirror_vma(vma_m, vma);
-+ if (error) {
-+ vma->vm_flags = oldflags;
-+ goto fail;
-+ }
-+ }
-+ }
-+#endif
-+
- /*
- * First try to merge with previous and/or next vma.
- */
-@@ -204,9 +306,21 @@ success:
- * vm_flags and vm_page_prot are protected by the mmap_sem
- * held in write mode.
- */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
-+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
-+#endif
-+
- vma->vm_flags = newflags;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (mm->binfmt && mm->binfmt->handle_mprotect)
-+ mm->binfmt->handle_mprotect(vma, newflags);
-+#endif
-+
- vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
-- vm_get_page_prot(newflags));
-+ vm_get_page_prot(vma->vm_flags));
-
- if (vma_wants_writenotify(vma)) {
- vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
-@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
- end = start + len;
- if (end <= start)
- return -ENOMEM;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+ if (end > SEGMEXEC_TASK_SIZE)
-+ return -EINVAL;
-+ } else
-+#endif
-+
-+ if (end > TASK_SIZE)
-+ return -EINVAL;
-+
- if (!arch_validate_prot(prot))
- return -EINVAL;
-
-@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
-- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
- prot |= PROT_EXEC;
-
- vm_flags = calc_vm_prot_bits(prot);
-@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
- if (start > vma->vm_start)
- prev = vma;
-
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
-+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
-+#endif
-+
- for (nstart = start ; ; ) {
- unsigned long newflags;
-
-@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
-
- /* newflags >> 4 shift VM_MAY% in place of VM_% */
- if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
-+ if (prot & (PROT_WRITE | PROT_EXEC))
-+ gr_log_rwxmprotect(vma);
-+
-+ error = -EACCES;
-+ goto out;
-+ }
-+
-+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
- error = -EACCES;
- goto out;
- }
-@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
- error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
- if (error)
- goto out;
-+
-+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
-+
- nstart = tmp;
-
- if (nstart < prev->vm_end)
-diff --git a/mm/mremap.c b/mm/mremap.c
-index d6959cb..c9e1e45 100644
---- a/mm/mremap.c
-+++ b/mm/mremap.c
-@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
- continue;
- pte = ptep_get_and_clear(mm, old_addr, old_pte);
- pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
-+ pte = pte_exprotect(pte);
-+#endif
-+
- set_pte_at(mm, new_addr, new_pte, pte);
- }
-
-@@ -251,7 +257,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
- * If this were a serious issue, we'd add a flag to do_munmap().
- */
- hiwater_vm = mm->hiwater_vm;
-- mm->total_vm += new_len >> PAGE_SHIFT;
- vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
-
- if (do_munmap(mm, old_addr, old_len) < 0) {
-@@ -290,6 +295,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
- if (is_vm_hugetlb_page(vma))
- goto Einval;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (pax_find_mirror_vma(vma))
-+ goto Einval;
-+#endif
-+
- /* We can't remap across vm area boundaries */
- if (old_len > vma->vm_end - addr)
- goto Efault;
-@@ -346,20 +356,25 @@ static unsigned long mremap_to(unsigned long addr,
- unsigned long ret = -EINVAL;
- unsigned long charged = 0;
- unsigned long map_flags;
-+ unsigned long pax_task_size = TASK_SIZE;
-
- if (new_addr & ~PAGE_MASK)
- goto out;
-
-- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-+
-+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
- goto out;
-
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
-- if ((new_addr <= addr) && (new_addr+new_len) > addr)
-- goto out;
--
-- if ((addr <= new_addr) && (addr+old_len) > new_addr)
-+ if (addr + old_len > new_addr && new_addr + new_len > addr)
- goto out;
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-@@ -431,6 +446,7 @@ unsigned long do_mremap(unsigned long addr,
- struct vm_area_struct *vma;
- unsigned long ret = -EINVAL;
- unsigned long charged = 0;
-+ unsigned long pax_task_size = TASK_SIZE;
-
- if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
- goto out;
-@@ -449,6 +465,17 @@ unsigned long do_mremap(unsigned long addr,
- if (!new_len)
- goto out;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+ pax_task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+ pax_task_size -= PAGE_SIZE;
-+
-+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
-+ old_len > pax_task_size || addr > pax_task_size-old_len)
-+ goto out;
-+
- if (flags & MREMAP_FIXED) {
- if (flags & MREMAP_MAYMOVE)
- ret = mremap_to(addr, old_len, new_addr, new_len);
-@@ -490,7 +517,6 @@ unsigned long do_mremap(unsigned long addr,
- goto out;
- }
-
-- mm->total_vm += pages;
- vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
- if (vma->vm_flags & VM_LOCKED) {
- mm->locked_vm += pages;
-@@ -498,6 +524,7 @@ unsigned long do_mremap(unsigned long addr,
- addr + new_len);
- }
- ret = addr;
-+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
- goto out;
- }
- }
-@@ -524,7 +551,13 @@ unsigned long do_mremap(unsigned long addr,
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
-+
-+ map_flags = vma->vm_flags;
- ret = move_vma(vma, addr, old_len, new_len, new_addr);
-+ if (!(ret & ~PAGE_MASK)) {
-+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
-+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
-+ }
- }
- out:
- if (ret & ~PAGE_MASK)
-diff --git a/mm/nobootmem.c b/mm/nobootmem.c
-index 07c08c4..8d4ad26 100644
---- a/mm/nobootmem.c
-+++ b/mm/nobootmem.c
-@@ -109,19 +109,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
- unsigned long __init free_all_memory_core_early(int nodeid)
- {
- int i;
-- u64 start, end;
-+ u64 start, end, startrange, endrange;
- unsigned long count = 0;
-- struct range *range = NULL;
-+ struct range *range = NULL, rangerange = { 0, 0 };
- int nr_range;
-
- nr_range = get_free_all_memory_range(&range, nodeid);
-+ startrange = __pa(range) >> PAGE_SHIFT;
-+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
-
- for (i = 0; i < nr_range; i++) {
- start = range[i].start;
- end = range[i].end;
-+ if (start <= endrange && startrange < end) {
-+ BUG_ON(rangerange.start | rangerange.end);
-+ rangerange = range[i];
-+ continue;
-+ }
- count += end - start;
- __free_pages_memory(start, end);
- }
-+ start = rangerange.start;
-+ end = rangerange.end;
-+ count += end - start;
-+ __free_pages_memory(start, end);
-
- return count;
- }
-diff --git a/mm/nommu.c b/mm/nommu.c
-index d0cb11f..e0a7fea 100644
---- a/mm/nommu.c
-+++ b/mm/nommu.c
-@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
- int sysctl_overcommit_ratio = 50; /* default is 50% */
- int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
- int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
--int heap_stack_gap = 0;
-
- atomic_long_t mmap_pages_allocated;
-
-@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
- EXPORT_SYMBOL(find_vma);
-
- /*
-- * find a VMA
-- * - we don't extend stack VMAs under NOMMU conditions
-- */
--struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
--{
-- return find_vma(mm, addr);
--}
--
--/*
- * expand a stack to a given address
- * - not supported under NOMMU conditions
- */
-@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
-
- /* most fields are the same, copy all, and then fixup */
- *new = *vma;
-+ INIT_LIST_HEAD(&new->anon_vma_chain);
- *region = *vma->vm_region;
- new->vm_region = region;
-
-@@ -1971,8 +1962,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- }
- EXPORT_SYMBOL(filemap_fault);
-
--static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-- unsigned long addr, void *buf, int len, int write)
-+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-+ unsigned long addr, void *buf, size_t len, int write)
- {
- struct vm_area_struct *vma;
-
-@@ -2013,8 +2004,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- *
- * The caller must hold a reference on @mm.
- */
--int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-- void *buf, int len, int write)
-+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
-+ void *buf, size_t len, int write)
- {
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
- }
-@@ -2023,7 +2014,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- * Access another process' address space.
- * - source/target buffer must be kernel space
- */
--int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
-+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
- {
- struct mm_struct *mm;
-
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 1bf1f74..5e27559 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -522,7 +522,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
- * card's bdi_dirty may rush to many times higher than bdi_setpoint.
- * - the bdi dirty thresh drops quickly due to change of JBOD workload
- */
--static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
-+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
-@@ -1373,7 +1373,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
- return NOTIFY_DONE;
- }
-
--static struct notifier_block __cpuinitdata ratelimit_nb = {
-+static struct notifier_block ratelimit_nb = {
- .notifier_call = ratelimit_handler,
- .next = NULL,
- };
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 62a7fa23..aaa6823 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -57,6 +57,7 @@
- #include <linux/ftrace_event.h>
- #include <linux/memcontrol.h>
- #include <linux/prefetch.h>
-+#include <linux/random.h>
-
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -341,7 +342,7 @@ out:
- * This usage means that zero-order pages may not be compound.
- */
-
--static void free_compound_page(struct page *page)
-+void free_compound_page(struct page *page)
- {
- __free_pages_ok(page, compound_order(page));
- }
-@@ -654,6 +655,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
- int i;
- int bad = 0;
-
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ unsigned long index = 1UL << order;
-+#endif
-+
- trace_mm_page_free_direct(page, order);
- kmemcheck_free_shadow(page, order);
-
-@@ -669,6 +674,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
- debug_check_no_obj_freed(page_address(page),
- PAGE_SIZE << order);
- }
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ for (; index; --index)
-+ sanitize_highpage(page + index - 1);
-+#endif
-+
- arch_free_page(page, order);
- kernel_map_pages(page, 1 << order, 0);
-
-@@ -692,6 +703,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
- local_irq_restore(flags);
- }
-
-+#ifdef CONFIG_PAX_LATENT_ENTROPY
-+bool __meminitdata extra_latent_entropy;
-+
-+static int __init setup_pax_extra_latent_entropy(char *str)
-+{
-+ extra_latent_entropy = true;
-+ return 0;
-+}
-+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
-+
-+volatile u64 latent_entropy __latent_entropy;
-+EXPORT_SYMBOL(latent_entropy);
-+#endif
-+
- /*
- * permit the bootmem allocator to evade page validation on high-order frees
- */
-@@ -715,6 +740,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
- set_page_count(p, 0);
- }
-
-+#ifdef CONFIG_PAX_LATENT_ENTROPY
-+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
-+ u64 hash = 0;
-+ size_t index, end = PAGE_SIZE * (1UL << order) / sizeof hash;
-+ const u64 *data = lowmem_page_address(page);
-+
-+ for (index = 0; index < end; index++)
-+ hash ^= hash + data[index];
-+ latent_entropy ^= hash;
-+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-+ }
-+#endif
-+
- set_page_refcounted(page);
- __free_pages(page, order);
- }
-@@ -784,8 +822,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
-
-+#ifndef CONFIG_PAX_MEMORY_SANITIZE
- if (gfp_flags & __GFP_ZERO)
- prep_zero_page(page, order, gfp_flags);
-+#endif
-
- if (order && (gfp_flags & __GFP_COMP))
- prep_compound_page(page, order);
-diff --git a/mm/percpu.c b/mm/percpu.c
-index e29a1c4..e7f90f0 100644
---- a/mm/percpu.c
-+++ b/mm/percpu.c
-@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
- static unsigned int pcpu_high_unit_cpu __read_mostly;
-
- /* the address of the first chunk which starts with the kernel static area */
--void *pcpu_base_addr __read_mostly;
-+void *pcpu_base_addr __read_only;
- EXPORT_SYMBOL_GPL(pcpu_base_addr);
-
- static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
-diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
-index 70e814a..38e1f43 100644
---- a/mm/process_vm_access.c
-+++ b/mm/process_vm_access.c
-@@ -13,6 +13,7 @@
- #include <linux/uio.h>
- #include <linux/sched.h>
- #include <linux/highmem.h>
-+#include <linux/security.h>
- #include <linux/ptrace.h>
- #include <linux/slab.h>
- #include <linux/syscalls.h>
-@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
- size_t iov_l_curr_offset = 0;
- ssize_t iov_len;
-
-+ return -ENOSYS; // PaX: until properly audited
-+
- /*
- * Work out how many pages of struct pages we're going to need
- * when eventually calling get_user_pages
- */
- for (i = 0; i < riovcnt; i++) {
- iov_len = rvec[i].iov_len;
-- if (iov_len > 0) {
-- nr_pages_iov = ((unsigned long)rvec[i].iov_base
-- + iov_len)
-- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
-- / PAGE_SIZE + 1;
-- nr_pages = max(nr_pages, nr_pages_iov);
-- }
-+ if (iov_len <= 0)
-+ continue;
-+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
-+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
-+ nr_pages = max(nr_pages, nr_pages_iov);
- }
-
- if (nr_pages == 0)
-@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
- goto free_proc_pages;
- }
-
-- task_lock(task);
-- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
-- task_unlock(task);
-+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
- rc = -EPERM;
- goto put_task_struct;
- }
-- mm = task->mm;
-
-- if (!mm || (task->flags & PF_KTHREAD)) {
-- task_unlock(task);
-- rc = -EINVAL;
-+ mm = mm_access(task, PTRACE_MODE_ATTACH);
-+ if (!mm || IS_ERR(mm)) {
-+ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
-+ /*
-+ * Explicitly map EACCES to EPERM as EPERM is a more a
-+ * appropriate error code for process_vw_readv/writev
-+ */
-+ if (rc == -EACCES)
-+ rc = -EPERM;
- goto put_task_struct;
- }
-
-- atomic_inc(&mm->mm_users);
-- task_unlock(task);
--
- for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
- rc = process_vm_rw_single_vec(
- (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
-diff --git a/mm/readahead.c b/mm/readahead.c
-index cbcbb02..dfdc1de 100644
---- a/mm/readahead.c
-+++ b/mm/readahead.c
-@@ -342,7 +342,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
- * - length of the sequential read sequence, or
- * - thrashing threshold in memory tight systems
- */
--static pgoff_t count_history_pages(struct address_space *mapping,
-+static pgoff_t __intentional_overflow(-1) count_history_pages(struct address_space *mapping,
- struct file_ra_state *ra,
- pgoff_t offset, unsigned long max)
- {
-diff --git a/mm/rmap.c b/mm/rmap.c
-index 98f0bf7f..b7d8cdc 100644
---- a/mm/rmap.c
-+++ b/mm/rmap.c
-@@ -156,6 +156,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
- struct anon_vma *anon_vma = vma->anon_vma;
- struct anon_vma_chain *avc;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct anon_vma_chain *avc_m = NULL;
-+#endif
-+
- might_sleep();
- if (unlikely(!anon_vma)) {
- struct mm_struct *mm = vma->vm_mm;
-@@ -165,6 +169,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
- if (!avc)
- goto out_enomem;
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
-+ if (!avc_m)
-+ goto out_enomem_free_avc;
-+#endif
-+
- anon_vma = find_mergeable_anon_vma(vma);
- allocated = NULL;
- if (!anon_vma) {
-@@ -178,6 +188,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
- /* page_table_lock to protect against threads */
- spin_lock(&mm->page_table_lock);
- if (likely(!vma->anon_vma)) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
-+
-+ if (vma_m) {
-+ BUG_ON(vma_m->anon_vma);
-+ vma_m->anon_vma = anon_vma;
-+ avc_m->anon_vma = anon_vma;
-+ avc_m->vma = vma;
-+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
-+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
-+ avc_m = NULL;
-+ }
-+#endif
-+
- vma->anon_vma = anon_vma;
- avc->anon_vma = anon_vma;
- avc->vma = vma;
-@@ -193,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
-
- if (unlikely(allocated))
- put_anon_vma(allocated);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (unlikely(avc_m))
-+ anon_vma_chain_free(avc_m);
-+#endif
-+
- if (unlikely(avc))
- anon_vma_chain_free(avc);
- }
- return 0;
-
- out_enomem_free_avc:
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+ if (avc_m)
-+ anon_vma_chain_free(avc_m);
-+#endif
-+
- anon_vma_chain_free(avc);
- out_enomem:
- return -ENOMEM;
-@@ -257,7 +294,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
- * good chance of avoiding scanning the whole hierarchy when it searches where
- * page is mapped.
- */
--int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
-+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
- {
- struct anon_vma_chain *avc, *pavc;
- struct anon_vma *root = NULL;
-@@ -311,7 +348,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
- * the corresponding VMA in the parent process is attached to.
- * Returns 0 on success, non-zero on failure.
- */
--int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
-+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
- {
- struct anon_vma_chain *avc;
- struct anon_vma *anon_vma;
-@@ -429,8 +466,10 @@ static void anon_vma_ctor(void *data)
- void __init anon_vma_init(void)
- {
- anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
-- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
-- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
-+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
-+ anon_vma_ctor);
-+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
-+ SLAB_PANIC|SLAB_NO_SANITIZE);
- }
-
- /*
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 83efac6..7104960 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -31,7 +31,7 @@
- #include <linux/export.h>
- #include <linux/swap.h>
-
--static struct vfsmount *shm_mnt;
-+struct vfsmount *shm_mnt;
-
- #ifdef CONFIG_SHMEM
- /*
-@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
- #define BOGO_DIRENT_SIZE 20
-
- /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
--#define SHORT_SYMLINK_LEN 128
-+#define SHORT_SYMLINK_LEN 64
-
- /*
- * vmtruncate_range() communicates with shmem_fault via
-@@ -1926,6 +1926,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
- static int shmem_xattr_validate(const char *name)
- {
- struct { const char *prefix; size_t len; } arr[] = {
-+
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
-+#endif
-+
- { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
- { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
- };
-@@ -1979,6 +1984,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
- if (err)
- return err;
-
-+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
-+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
-+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
-+ return -EOPNOTSUPP;
-+ if (size > 8)
-+ return -EINVAL;
-+ }
-+#endif
-+
- if (size == 0)
- value = ""; /* empty EA, do not remove */
-
-@@ -2312,8 +2326,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
- int err = -ENOMEM;
-
- /* Round up to L1_CACHE_BYTES to resist false sharing */
-- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
-- L1_CACHE_BYTES), GFP_KERNEL);
-+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
- if (!sbinfo)
- return -ENOMEM;
-
-diff --git a/mm/slab.c b/mm/slab.c
-index aea5e42..98a44a8 100644
---- a/mm/slab.c
-+++ b/mm/slab.c
-@@ -151,7 +151,7 @@
-
- /* Legal flag mask for kmem_cache_create(). */
- #if DEBUG
--# define CREATE_MASK (SLAB_RED_ZONE | \
-+# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | SLAB_RED_ZONE | \
- SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
- SLAB_STORE_USER | \
-@@ -159,8 +159,8 @@
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
- #else
--# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
-- SLAB_CACHE_DMA | \
-+# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | \
-+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
- SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
-@@ -288,7 +288,7 @@ struct kmem_list3 {
- * Need this for bootstrapping a per node allocator.
- */
- #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
--static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
-+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
- #define CACHE_CACHE 0
- #define SIZE_AC MAX_NUMNODES
- #define SIZE_L3 (2 * MAX_NUMNODES)
-@@ -389,10 +389,12 @@ static void kmem_list3_init(struct kmem_list3 *parent)
- if ((x)->max_freeable < i) \
- (x)->max_freeable = i; \
- } while (0)
--#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
--#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
--#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
--#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
-+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
-+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
-+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
-+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
-+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
-+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
- #else
- #define STATS_INC_ACTIVE(x) do { } while (0)
- #define STATS_DEC_ACTIVE(x) do { } while (0)
-@@ -409,6 +411,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
- #define STATS_INC_ALLOCMISS(x) do { } while (0)
- #define STATS_INC_FREEHIT(x) do { } while (0)
- #define STATS_INC_FREEMISS(x) do { } while (0)
-+#define STATS_INC_SANITIZED(x) do { } while (0)
-+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
- #endif
-
- #if DEBUG
-@@ -538,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
- static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-- const struct slab *slab, void *obj)
-+ const struct slab *slab, const void *obj)
- {
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-@@ -559,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
- struct cache_names {
- char *name;
- char *name_dma;
-+ char *name_usercopy;
- };
-
- static struct cache_names __initdata cache_names[] = {
--#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
-+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
- #include <linux/kmalloc_sizes.h>
-- {NULL,}
-+ {NULL}
- #undef CACHE
- };
-
-@@ -752,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
- if (unlikely(gfpflags & GFP_DMA))
- return csizep->cs_dmacachep;
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ if (unlikely(gfpflags & GFP_USERCOPY))
-+ return csizep->cs_usercopycachep;
-+#endif
-+
- return csizep->cs_cachep;
- }
-
-@@ -1370,7 +1381,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __cpuinitdata cpucache_notifier = {
-+static struct notifier_block cpucache_notifier = {
- &cpuup_callback, NULL, 0
- };
-
-@@ -1572,7 +1583,7 @@ void __init kmem_cache_init(void)
- sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
- sizes[INDEX_AC].cs_size,
- ARCH_KMALLOC_MINALIGN,
-- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
-+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
- NULL);
-
- if (INDEX_AC != INDEX_L3) {
-@@ -1580,7 +1591,7 @@ void __init kmem_cache_init(void)
- kmem_cache_create(names[INDEX_L3].name,
- sizes[INDEX_L3].cs_size,
- ARCH_KMALLOC_MINALIGN,
-- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
-+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
- NULL);
- }
-
-@@ -1598,7 +1609,7 @@ void __init kmem_cache_init(void)
- sizes->cs_cachep = kmem_cache_create(names->name,
- sizes->cs_size,
- ARCH_KMALLOC_MINALIGN,
-- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
-+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
- NULL);
- }
- #ifdef CONFIG_ZONE_DMA
-@@ -1610,6 +1621,16 @@ void __init kmem_cache_init(void)
- SLAB_PANIC,
- NULL);
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ sizes->cs_usercopycachep = kmem_cache_create(
-+ names->name_usercopy,
-+ sizes->cs_size,
-+ ARCH_KMALLOC_MINALIGN,
-+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
-+ NULL);
-+#endif
-+
- sizes++;
- names++;
- }
-@@ -2284,6 +2305,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
- */
- BUG_ON(flags & ~CREATE_MASK);
-
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
-+ flags |= SLAB_NO_SANITIZE;
-+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
-+ flags &= ~SLAB_NO_SANITIZE;
-+#endif
-+
- /*
- * Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
-@@ -3662,6 +3690,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
- struct array_cache *ac = cpu_cache_get(cachep);
-
- check_irq_off();
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
-+ STATS_INC_NOT_SANITIZED(cachep);
-+ else {
-+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, obj_size(cachep));
-+
-+ if (cachep->ctor)
-+ cachep->ctor(objp);
-+
-+ STATS_INC_SANITIZED(cachep);
-+ }
-+#endif
-+
- kmemleak_free_recursive(objp, cachep->flags);
- objp = cache_free_debugcheck(cachep, objp, caller);
-
-@@ -3879,6 +3921,7 @@ void kfree(const void *objp)
-
- if (unlikely(ZERO_OR_NULL_PTR(objp)))
- return;
-+ VM_BUG_ON(!virt_addr_valid(objp));
- local_irq_save(flags);
- kfree_debugcheck(objp);
- c = virt_to_cache(objp);
-@@ -4216,6 +4259,9 @@ static void print_slabinfo_header(struct seq_file *m)
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
- "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
-+#endif
- #endif
- seq_putc(m, '\n');
- }
-@@ -4325,14 +4371,22 @@ static int s_show(struct seq_file *m, void *p)
- }
- /* cpu stats */
- {
-- unsigned long allochit = atomic_read(&cachep->allochit);
-- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
-- unsigned long freehit = atomic_read(&cachep->freehit);
-- unsigned long freemiss = atomic_read(&cachep->freemiss);
-+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
-+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
-+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
-+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
-
- seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
- allochit, allocmiss, freehit, freemiss);
- }
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ {
-+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
-+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
-+
-+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
-+ }
-+#endif
- #endif
- seq_putc(m, '\n');
- return 0;
-@@ -4587,13 +4641,71 @@ static int __init slab_proc_init(void)
- {
- proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
- #ifdef CONFIG_DEBUG_SLAB_LEAK
-- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
-+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
- #endif
- return 0;
- }
- module_init(slab_proc_init);
- #endif
-
-+bool is_usercopy_object(const void *ptr)
-+{
-+ struct page *page;
-+ struct kmem_cache *cachep;
-+
-+ if (ZERO_OR_NULL_PTR(ptr))
-+ return false;
-+
-+ if (!slab_is_available())
-+ return false;
-+
-+ if (!virt_addr_valid(ptr))
-+ return false;
-+
-+ page = virt_to_head_page(ptr);
-+
-+ if (!PageSlab(page))
-+ return false;
-+
-+ cachep = page_get_cache(page);
-+ return cachep->flags & SLAB_USERCOPY;
-+}
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n)
-+{
-+ struct page *page;
-+ struct kmem_cache *cachep;
-+ struct slab *slabp;
-+ unsigned int objnr;
-+ unsigned long offset;
-+
-+ if (ZERO_OR_NULL_PTR(ptr))
-+ return "<null>";
-+
-+ if (!virt_addr_valid(ptr))
-+ return NULL;
-+
-+ page = virt_to_head_page(ptr);
-+
-+ if (!PageSlab(page))
-+ return NULL;
-+
-+ cachep = page_get_cache(page);
-+ if (!(cachep->flags & SLAB_USERCOPY))
-+ return cachep->name;
-+
-+ slabp = page_get_slab(page);
-+ objnr = obj_to_index(cachep, slabp, ptr);
-+ BUG_ON(objnr >= cachep->num);
-+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
-+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
-+ return NULL;
-+
-+ return cachep->name;
-+}
-+#endif
-+
- /**
- * ksize - get the actual amount of memory allocated for a given object
- * @objp: Pointer to the object
-diff --git a/mm/slob.c b/mm/slob.c
-index 8105be4..93fb21c 100644
---- a/mm/slob.c
-+++ b/mm/slob.c
-@@ -29,7 +29,7 @@
- * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
- * alloc_pages() directly, allocating compound pages so the page order
- * does not have to be separately tracked, and also stores the exact
-- * allocation size in page->private so that it can be used to accurately
-+ * allocation size in slob_page->size so that it can be used to accurately
- * provide ksize(). These objects are detected in kfree() because slob_page()
- * is false for them.
- *
-@@ -58,6 +58,7 @@
- */
-
- #include <linux/kernel.h>
-+#include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/mm.h>
- #include <linux/swap.h> /* struct reclaim_state */
-@@ -91,6 +92,13 @@ struct slob_block {
- };
- typedef struct slob_block slob_t;
-
-+struct kmem_cache {
-+ unsigned int size, align;
-+ unsigned long flags;
-+ const char *name;
-+ void (*ctor)(void *);
-+};
-+
- /*
- * We use struct page fields to manage some slob allocation aspects,
- * however to avoid the horrible mess in include/linux/mm_types.h, we'll
-@@ -100,9 +108,8 @@ struct slob_page {
- union {
- struct {
- unsigned long flags; /* mandatory */
-- atomic_t _count; /* mandatory */
- slobidx_t units; /* free units left in page */
-- unsigned long pad[2];
-+ unsigned long size; /* size when >=PAGE_SIZE */
- slob_t *free; /* first free slob_t in page */
- struct list_head list; /* linked list of free pages */
- };
-@@ -135,7 +142,7 @@ static LIST_HEAD(free_slob_large);
- */
- static inline int is_slob_page(struct slob_page *sp)
- {
-- return PageSlab((struct page *)sp);
-+ return PageSlab((struct page *)sp) && !sp->size;
- }
-
- static inline void set_slob_page(struct slob_page *sp)
-@@ -150,7 +157,7 @@ static inline void clear_slob_page(struct slob_page *sp)
-
- static inline struct slob_page *slob_page(const void *addr)
- {
-- return (struct slob_page *)virt_to_page(addr);
-+ return (struct slob_page *)virt_to_head_page(addr);
- }
-
- /*
-@@ -210,7 +217,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
- /*
- * Return the size of a slob block.
- */
--static slobidx_t slob_units(slob_t *s)
-+static slobidx_t slob_units(const slob_t *s)
- {
- if (s->units > 0)
- return s->units;
-@@ -220,7 +227,7 @@ static slobidx_t slob_units(slob_t *s)
- /*
- * Return the next free slob block pointer after this one.
- */
--static slob_t *slob_next(slob_t *s)
-+static slob_t *slob_next(const slob_t *s)
- {
- slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
- slobidx_t next;
-@@ -235,7 +242,7 @@ static slob_t *slob_next(slob_t *s)
- /*
- * Returns true if s is the last free block in its page.
- */
--static int slob_last(slob_t *s)
-+static int slob_last(const slob_t *s)
- {
- return !((unsigned long)slob_next(s) & ~PAGE_MASK);
- }
-@@ -254,6 +261,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
- if (!page)
- return NULL;
-
-+ set_slob_page(page);
- return page_address(page);
- }
-
-@@ -370,11 +378,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
- if (!b)
- return NULL;
- sp = slob_page(b);
-- set_slob_page(sp);
-
- spin_lock_irqsave(&slob_lock, flags);
- sp->units = SLOB_UNITS(PAGE_SIZE);
- sp->free = b;
-+ sp->size = 0;
- INIT_LIST_HEAD(&sp->list);
- set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
- set_slob_page_free(sp, slob_list);
-@@ -390,7 +398,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
- /*
- * slob_free: entry point into the slob allocator.
- */
--static void slob_free(void *block, int size)
-+static void slob_free(struct kmem_cache *c, void *block, int size)
- {
- struct slob_page *sp;
- slob_t *prev, *next, *b = (slob_t *)block;
-@@ -418,6 +426,11 @@ static void slob_free(void *block, int size)
- return;
- }
-
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
-+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
-+#endif
-+
- if (!slob_page_free(sp)) {
- /* This slob page is about to become partially free. Easy! */
- sp->units = units;
-@@ -476,10 +489,9 @@ out:
- * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
- */
-
--void *__kmalloc_node(size_t size, gfp_t gfp, int node)
-+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
- {
-- unsigned int *m;
-- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-+ slob_t *m;
- void *ret;
-
- gfp &= gfp_allowed_mask;
-@@ -494,7 +506,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
-
- if (!m)
- return NULL;
-- *m = size;
-+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
-+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
-+ m[0].units = size;
-+ m[1].units = align;
- ret = (void *)m + align;
-
- trace_kmalloc_node(_RET_IP_, ret,
-@@ -506,16 +521,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
- gfp |= __GFP_COMP;
- ret = slob_new_pages(gfp, order, node);
- if (ret) {
-- struct page *page;
-- page = virt_to_page(ret);
-- page->private = size;
-+ struct slob_page *sp;
-+ sp = slob_page(ret);
-+ sp->size = size;
- }
-
- trace_kmalloc_node(_RET_IP_, ret,
- size, PAGE_SIZE << order, gfp, node);
- }
-
-- kmemleak_alloc(ret, size, 1, gfp);
-+ return ret;
-+}
-+
-+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
-+{
-+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-+ void *ret = __kmalloc_node_align(size, gfp, node, align);
-+
-+ if (!ZERO_OR_NULL_PTR(ret))
-+ kmemleak_alloc(ret, size, 1, gfp);
- return ret;
- }
- EXPORT_SYMBOL(__kmalloc_node);
-@@ -530,16 +554,92 @@ void kfree(const void *block)
- return;
- kmemleak_free(block);
-
-+ VM_BUG_ON(!virt_addr_valid(block));
- sp = slob_page(block);
- if (is_slob_page(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-- unsigned int *m = (unsigned int *)(block - align);
-- slob_free(m, *m + align);
-- } else
-+ slob_t *m = (slob_t *)(block - align);
-+ slob_free(NULL, m, m[0].units + align);
-+ } else {
-+ clear_slob_page(sp);
-+ free_slob_page(sp);
-+ sp->size = 0;
- put_page(&sp->page);
-+ }
- }
- EXPORT_SYMBOL(kfree);
-
-+bool is_usercopy_object(const void *ptr)
-+{
-+ if (!slab_is_available())
-+ return false;
-+
-+ // PAX: TODO
-+
-+ return false;
-+}
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n)
-+{
-+ struct slob_page *sp;
-+ const slob_t *free;
-+ const void *base;
-+ unsigned long flags;
-+
-+ if (ZERO_OR_NULL_PTR(ptr))
-+ return "<null>";
-+
-+ if (!virt_addr_valid(ptr))
-+ return NULL;
-+
-+ sp = slob_page(ptr);
-+ if (!PageSlab((struct page *)sp))
-+ return NULL;
-+
-+ if (sp->size) {
-+ base = page_address(&sp->page);
-+ if (base <= ptr && n <= sp->size - (ptr - base))
-+ return NULL;
-+ return "<slob>";
-+ }
-+
-+ /* some tricky double walking to find the chunk */
-+ spin_lock_irqsave(&slob_lock, flags);
-+ base = (void *)((unsigned long)ptr & PAGE_MASK);
-+ free = sp->free;
-+
-+ while ((void *)free <= ptr) {
-+ base = free + slob_units(free);
-+ free = slob_next(free);
-+ }
-+
-+ while (base < (void *)free) {
-+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
-+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
-+ int offset;
-+
-+ if (ptr < base + align)
-+ break;
-+
-+ offset = ptr - base - align;
-+ if (offset >= m) {
-+ base += size;
-+ continue;
-+ }
-+
-+ if (n > m - offset)
-+ break;
-+
-+ spin_unlock_irqrestore(&slob_lock, flags);
-+ return NULL;
-+ }
-+
-+ spin_unlock_irqrestore(&slob_lock, flags);
-+ return "<slob>";
-+}
-+#endif
-+
- /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
- size_t ksize(const void *block)
- {
-@@ -552,27 +652,32 @@ size_t ksize(const void *block)
- sp = slob_page(block);
- if (is_slob_page(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-- unsigned int *m = (unsigned int *)(block - align);
-- return SLOB_UNITS(*m) * SLOB_UNIT;
-+ slob_t *m = (slob_t *)(block - align);
-+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
- } else
-- return sp->page.private;
-+ return sp->size;
- }
- EXPORT_SYMBOL(ksize);
-
--struct kmem_cache {
-- unsigned int size, align;
-- unsigned long flags;
-- const char *name;
-- void (*ctor)(void *);
--};
--
- struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
- {
- struct kmem_cache *c;
-
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
-+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
-+#else
- c = slob_alloc(sizeof(struct kmem_cache),
- GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
-+ flags |= SLAB_NO_SANITIZE;
-+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
-+ flags &= ~SLAB_NO_SANITIZE;
-+#endif
-
- if (c) {
- c->name = name;
-@@ -602,7 +707,7 @@ void kmem_cache_destroy(struct kmem_cache *c)
- kmemleak_free(c);
- if (c->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
-- slob_free(c, sizeof(struct kmem_cache));
-+ slob_free(NULL, c, sizeof(struct kmem_cache));
- }
- EXPORT_SYMBOL(kmem_cache_destroy);
-
-@@ -614,17 +719,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
-
- lockdep_trace_alloc(flags);
-
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ b = __kmalloc_node_align(c->size, flags, node, c->align);
-+#else
- if (c->size < PAGE_SIZE) {
- b = slob_alloc(c->size, flags, c->align, node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
- SLOB_UNITS(c->size) * SLOB_UNIT,
- flags, node);
- } else {
-+ struct slob_page *sp;
-+
- b = slob_new_pages(flags, get_order(c->size), node);
-+ sp = slob_page(b);
-+ sp->size = c->size;
- trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
- PAGE_SIZE << get_order(c->size),
- flags, node);
- }
-+#endif
-
- if (c->ctor)
- c->ctor(b);
-@@ -634,12 +747,18 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
- }
- EXPORT_SYMBOL(kmem_cache_alloc_node);
-
--static void __kmem_cache_free(void *b, int size)
-+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
- {
-- if (size < PAGE_SIZE)
-- slob_free(b, size);
-- else
-+ struct slob_page *sp = slob_page(b);
-+
-+ if (is_slob_page(sp))
-+ slob_free(c, b, size);
-+ else {
-+ clear_slob_page(sp);
-+ free_slob_page(sp);
-+ sp->size = 0;
- slob_free_pages(b, get_order(size));
-+ }
- }
-
- static void kmem_rcu_free(struct rcu_head *head)
-@@ -647,22 +766,36 @@ static void kmem_rcu_free(struct rcu_head *head)
- struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
- void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
-
-- __kmem_cache_free(b, slob_rcu->size);
-+ __kmem_cache_free(NULL, b, slob_rcu->size);
- }
-
- void kmem_cache_free(struct kmem_cache *c, void *b)
- {
-+ int size = c->size;
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ if (size + c->align < PAGE_SIZE) {
-+ size += c->align;
-+ b -= c->align;
-+ }
-+#endif
-+
- kmemleak_free_recursive(b, c->flags);
- if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
- struct slob_rcu *slob_rcu;
-- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
-- slob_rcu->size = c->size;
-+ slob_rcu = b + (size - sizeof(struct slob_rcu));
-+ slob_rcu->size = size;
- call_rcu(&slob_rcu->head, kmem_rcu_free);
- } else {
-- __kmem_cache_free(b, c->size);
-+ __kmem_cache_free(c, b, size);
- }
-
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ trace_kfree(_RET_IP_, b);
-+#else
- trace_kmem_cache_free(_RET_IP_, b);
-+#endif
-+
- }
- EXPORT_SYMBOL(kmem_cache_free);
-
-diff --git a/mm/slub.c b/mm/slub.c
-index 6a4c2fb..18d36e8 100644
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -186,7 +186,7 @@ static enum {
- PARTIAL, /* Kmem_cache_node works */
- UP, /* Everything works but does not show up in sysfs */
- SYSFS /* Sysfs up */
--} slab_state = DOWN;
-+} slab_state __read_only = DOWN;
-
- /* A list of all slab caches on the system */
- static DECLARE_RWSEM(slub_lock);
-@@ -208,7 +208,7 @@ struct track {
-
- enum track_item { TRACK_ALLOC, TRACK_FREE };
-
--#ifdef CONFIG_SYSFS
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- static int sysfs_slab_add(struct kmem_cache *);
- static int sysfs_slab_alias(struct kmem_cache *, const char *);
- static void sysfs_slab_remove(struct kmem_cache *);
-@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
- if (!t->addr)
- return;
-
-- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
-+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
- s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
- #ifdef CONFIG_STACKTRACE
- {
-@@ -2537,6 +2537,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
-
- slab_free_hook(s, x);
-
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (!(s->flags & SLAB_NO_SANITIZE)) {
-+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->objsize);
-+ if (s->ctor)
-+ s->ctor(x);
-+ }
-+#endif
-+
- redo:
- /*
- * Determine the currently cpus per cpu slab.
-@@ -2572,6 +2580,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
-
- page = virt_to_head_page(x);
-
-+ BUG_ON(!PageSlab(page));
-+
- slab_free(s, page, x, _RET_IP_);
-
- trace_kmem_cache_free(_RET_IP_, x);
-@@ -2605,7 +2615,7 @@ static int slub_min_objects;
- * Merge control. If this is set then no merging of slab caches will occur.
- * (Could be removed. This was introduced to pacify the merge skeptics.)
- */
--static int slub_nomerge;
-+static int slub_nomerge = 1;
-
- /*
- * Calculate the order of allocation given an slab object size.
-@@ -2909,6 +2919,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
- s->inuse = size;
-
- if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ (!(flags & SLAB_NO_SANITIZE)) ||
-+#endif
- s->ctor)) {
- /*
- * Relocate free pointer after the object if it is not
-@@ -3055,7 +3068,7 @@ static int kmem_cache_open(struct kmem_cache *s,
- else
- s->cpu_partial = 30;
-
-- s->refcount = 1;
-+ atomic_set(&s->refcount, 1);
- #ifdef CONFIG_NUMA
- s->remote_node_defrag_ratio = 1000;
- #endif
-@@ -3159,8 +3172,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
- void kmem_cache_destroy(struct kmem_cache *s)
- {
- down_write(&slub_lock);
-- s->refcount--;
-- if (!s->refcount) {
-+ if (atomic_dec_and_test(&s->refcount)) {
- list_del(&s->list);
- up_write(&slub_lock);
- if (kmem_cache_close(s)) {
-@@ -3189,6 +3201,10 @@ static struct kmem_cache *kmem_cache;
- static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
- #endif
-
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
-+#endif
-+
- static int __init setup_slub_min_order(char *str)
- {
- get_option(&str, &slub_min_order);
-@@ -3303,6 +3319,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
- return kmalloc_dma_caches[index];
-
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ if (flags & SLAB_USERCOPY)
-+ return kmalloc_usercopy_caches[index];
-+
-+#endif
-+
- return kmalloc_caches[index];
- }
-
-@@ -3371,6 +3394,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
- EXPORT_SYMBOL(__kmalloc_node);
- #endif
-
-+bool is_usercopy_object(const void *ptr)
-+{
-+ struct page *page;
-+ struct kmem_cache *s;
-+
-+ if (ZERO_OR_NULL_PTR(ptr))
-+ return false;
-+
-+ if (!slab_is_available())
-+ return false;
-+
-+ if (!virt_addr_valid(ptr))
-+ return false;
-+
-+ page = virt_to_head_page(ptr);
-+
-+ if (!PageSlab(page))
-+ return false;
-+
-+ s = page->slab;
-+ return s->flags & SLAB_USERCOPY;
-+}
-+
-+#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n)
-+{
-+ struct page *page;
-+ struct kmem_cache *s;
-+ unsigned long offset;
-+
-+ if (ZERO_OR_NULL_PTR(ptr))
-+ return "<null>";
-+
-+ if (!virt_addr_valid(ptr))
-+ return NULL;
-+
-+ page = virt_to_head_page(ptr);
-+
-+ if (!PageSlab(page))
-+ return NULL;
-+
-+ s = page->slab;
-+ if (!(s->flags & SLAB_USERCOPY))
-+ return s->name;
-+
-+ offset = (ptr - page_address(page)) % s->size;
-+ if (offset <= s->objsize && n <= s->objsize - offset)
-+ return NULL;
-+
-+ return s->name;
-+}
-+#endif
-+
- size_t ksize(const void *object)
- {
- struct page *page;
-@@ -3435,6 +3511,7 @@ void kfree(const void *x)
- if (unlikely(ZERO_OR_NULL_PTR(x)))
- return;
-
-+ VM_BUG_ON(!virt_addr_valid(x));
- page = virt_to_head_page(x);
- if (unlikely(!PageSlab(page))) {
- BUG_ON(!PageCompound(page));
-@@ -3645,7 +3722,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
- int node;
-
- list_add(&s->list, &slab_caches);
-- s->refcount = -1;
-+ atomic_set(&s->refcount, -1);
-
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
-@@ -3762,17 +3839,17 @@ void __init kmem_cache_init(void)
-
- /* Caches that are not of the two-to-the-power-of size */
- if (KMALLOC_MIN_SIZE <= 32) {
-- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
-+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
- caches++;
- }
-
- if (KMALLOC_MIN_SIZE <= 64) {
-- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
-+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
- caches++;
- }
-
- for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
-- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
-+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
- caches++;
- }
-
-@@ -3814,6 +3891,22 @@ void __init kmem_cache_init(void)
- }
- }
- #endif
-+
-+#ifdef CONFIG_PAX_USERCOPY_SLABS
-+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
-+ struct kmem_cache *s = kmalloc_caches[i];
-+
-+ if (s && s->size) {
-+ char *name = kasprintf(GFP_NOWAIT,
-+ "usercopy-kmalloc-%d", s->objsize);
-+
-+ BUG_ON(!name);
-+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
-+ s->objsize, SLAB_USERCOPY);
-+ }
-+ }
-+#endif
-+
- printk(KERN_INFO
- "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
- " CPUs=%d, Nodes=%d\n",
-@@ -3840,7 +3933,7 @@ static int slab_unmergeable(struct kmem_cache *s)
- /*
- * We may have set a slab to be unmergeable during bootstrap.
- */
-- if (s->refcount < 0)
-+ if (atomic_read(&s->refcount) < 0)
- return 1;
-
- return 0;
-@@ -3897,9 +3990,17 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- return NULL;
-
- down_write(&slub_lock);
-+
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
-+ flags |= SLAB_NO_SANITIZE;
-+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
-+ flags &= ~SLAB_NO_SANITIZE;
-+#endif
-+
- s = find_mergeable(size, align, flags, name, ctor);
- if (s) {
-- s->refcount++;
-+ atomic_inc(&s->refcount);
- /*
- * Adjust the object sizes so that we clear
- * the complete object on kzalloc.
-@@ -3908,7 +4009,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
-
- if (sysfs_slab_alias(s, name)) {
-- s->refcount--;
-+ atomic_dec(&s->refcount);
- goto err;
- }
- up_write(&slub_lock);
-@@ -3979,7 +4080,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata slab_notifier = {
-+static struct notifier_block slab_notifier = {
- .notifier_call = slab_cpuup_callback
- };
-
-@@ -4037,7 +4138,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
- }
- #endif
-
--#ifdef CONFIG_SYSFS
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- static int count_inuse(struct page *page)
- {
- return page->inuse;
-@@ -4424,12 +4525,12 @@ static void resiliency_test(void)
- validate_slab_cache(kmalloc_caches[9]);
- }
- #else
--#ifdef CONFIG_SYSFS
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- static void resiliency_test(void) {};
- #endif
- #endif
-
--#ifdef CONFIG_SYSFS
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- enum slab_stat_type {
- SL_ALL, /* All slabs */
- SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4676,7 +4777,7 @@ SLAB_ATTR_RO(ctor);
-
- static ssize_t aliases_show(struct kmem_cache *s, char *buf)
- {
-- return sprintf(buf, "%d\n", s->refcount - 1);
-+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
- }
- SLAB_ATTR_RO(aliases);
-
-@@ -5243,6 +5344,7 @@ static char *create_unique_id(struct kmem_cache *s)
- return name;
- }
-
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- static int sysfs_slab_add(struct kmem_cache *s)
- {
- int err;
-@@ -5271,7 +5373,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
- }
-
- s->kobj.kset = slab_kset;
-- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
-+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
- if (err) {
- kobject_put(&s->kobj);
- return err;
-@@ -5305,6 +5407,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
- kobject_del(&s->kobj);
- kobject_put(&s->kobj);
- }
-+#endif
-
- /*
- * Need to buffer aliases during bootup until sysfs becomes
-@@ -5318,6 +5421,7 @@ struct saved_alias {
-
- static struct saved_alias *alias_list;
-
-+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
- {
- struct saved_alias *al;
-@@ -5340,6 +5444,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
- alias_list = al;
- return 0;
- }
-+#endif
-
- static int __init slab_sysfs_init(void)
- {
-diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
-index 1b7e22a..3fcd4f3 100644
---- a/mm/sparse-vmemmap.c
-+++ b/mm/sparse-vmemmap.c
-@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
- if (!p)
- return NULL;
-- pud_populate(&init_mm, pud, p);
-+ pud_populate_kernel(&init_mm, pud, p);
- }
- return pud;
- }
-@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
- if (!p)
- return NULL;
-- pgd_populate(&init_mm, pgd, p);
-+ pgd_populate_kernel(&init_mm, pgd, p);
- }
- return pgd;
- }
-diff --git a/mm/swap.c b/mm/swap.c
-index a4b9016..d1a1b68 100644
---- a/mm/swap.c
-+++ b/mm/swap.c
-@@ -70,9 +70,11 @@ static void __put_compound_page(struct page *page)
- {
- compound_page_dtor *dtor;
-
-- if (!PageHuge(page))
-- __page_cache_release(page);
- dtor = get_compound_page_dtor(page);
-+ if (!PageHuge(page)) {
-+ BUG_ON(dtor != free_compound_page);
-+ __page_cache_release(page);
-+ }
- (*dtor)(page);
- }
-
-diff --git a/mm/swapfile.c b/mm/swapfile.c
-index dbd2b67..a1eac8c 100644
---- a/mm/swapfile.c
-+++ b/mm/swapfile.c
-@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
-
- static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
- /* Activity counter to indicate that a swapon or swapoff has occurred */
--static atomic_t proc_poll_event = ATOMIC_INIT(0);
-+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
-
- static inline unsigned char swap_count(unsigned char ent)
- {
-@@ -1677,7 +1677,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
- spin_unlock(&swap_lock);
-
- err = 0;
-- atomic_inc(&proc_poll_event);
-+ atomic_inc_unchecked(&proc_poll_event);
- wake_up_interruptible(&proc_poll_wait);
-
- out_dput:
-@@ -1693,8 +1693,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
-
- poll_wait(file, &proc_poll_wait, wait);
-
-- if (seq->poll_event != atomic_read(&proc_poll_event)) {
-- seq->poll_event = atomic_read(&proc_poll_event);
-+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
-+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
- return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
- }
-
-@@ -1792,7 +1792,7 @@ static int swaps_open(struct inode *inode, struct file *file)
- return ret;
-
- seq = file->private_data;
-- seq->poll_event = atomic_read(&proc_poll_event);
-+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
- return 0;
- }
-
-@@ -2126,7 +2126,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
- (p->flags & SWP_DISCARDABLE) ? "D" : "");
-
- mutex_unlock(&swapon_mutex);
-- atomic_inc(&proc_poll_event);
-+ atomic_inc_unchecked(&proc_poll_event);
- wake_up_interruptible(&proc_poll_wait);
-
- if (S_ISREG(inode->i_mode))
-diff --git a/mm/util.c b/mm/util.c
-index 136ac4f..f917fa9 100644
---- a/mm/util.c
-+++ b/mm/util.c
-@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- void arch_pick_mmap_layout(struct mm_struct *mm)
- {
- mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
- }
-diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 1431458..3eef1a6 100644
---- a/mm/vmalloc.c
-+++ b/mm/vmalloc.c
-@@ -27,10 +27,67 @@
- #include <linux/pfn.h>
- #include <linux/kmemleak.h>
- #include <linux/atomic.h>
-+#include <linux/llist.h>
- #include <asm/uaccess.h>
- #include <asm/tlbflush.h>
- #include <asm/shmparam.h>
-
-+struct vfree_deferred {
-+ struct llist_head list;
-+ struct work_struct wq;
-+};
-+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
-+
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+struct stack_deferred_llist {
-+ struct llist_head list;
-+ void *stack;
-+ void *lowmem_stack;
-+};
-+
-+struct stack_deferred {
-+ struct stack_deferred_llist list;
-+ struct work_struct wq;
-+};
-+
-+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
-+#endif
-+
-+static void __vunmap(const void *, int);
-+
-+static void free_work(struct work_struct *w)
-+{
-+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
-+ struct llist_node *llnode = llist_del_all(&p->list);
-+ while (llnode) {
-+ void *x = llnode;
-+ llnode = llist_next(llnode);
-+ __vunmap(x, 1);
-+ }
-+}
-+
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+static void unmap_work(struct work_struct *w)
-+{
-+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
-+ struct llist_node *llnode = llist_del_all(&p->list.list);
-+ while (llnode) {
-+ struct stack_deferred_llist *x =
-+ llist_entry((struct llist_head *)llnode,
-+ struct stack_deferred_llist, list);
-+ void *stack = ACCESS_ONCE(x->stack);
-+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
-+ llnode = llist_next(llnode);
-+ __vunmap(stack, 0);
-+#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-+ free_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
-+#else
-+ free_thread_info(lowmem_stack);
-+#endif
-+ }
-+}
-+#endif
-+
- /*** Page table manipulation functions ***/
-
- static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
-@@ -39,8 +96,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
-
- pte = pte_offset_kernel(pmd, addr);
- do {
-- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
-- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
-+ BUG_ON(!pte_exec(*pte));
-+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
-+ continue;
-+ }
-+#endif
-+
-+ {
-+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
-+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
-+ }
- } while (pte++, addr += PAGE_SIZE, addr != end);
- }
-
-@@ -100,16 +168,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
- pte = pte_alloc_kernel(pmd, addr);
- if (!pte)
- return -ENOMEM;
-+
-+ pax_open_kernel();
- do {
- struct page *page = pages[*nr];
-
-- if (WARN_ON(!pte_none(*pte)))
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (pgprot_val(prot) & _PAGE_NX)
-+#endif
-+
-+ if (!pte_none(*pte)) {
-+ pax_close_kernel();
-+ WARN_ON(1);
- return -EBUSY;
-- if (WARN_ON(!page))
-+ }
-+ if (!page) {
-+ pax_close_kernel();
-+ WARN_ON(1);
- return -ENOMEM;
-+ }
- set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
- (*nr)++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
-+ pax_close_kernel();
- return 0;
- }
-
-@@ -119,7 +200,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
- pmd_t *pmd;
- unsigned long next;
-
-- pmd = pmd_alloc(&init_mm, pud, addr);
-+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
-@@ -136,7 +217,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
- pud_t *pud;
- unsigned long next;
-
-- pud = pud_alloc(&init_mm, pgd, addr);
-+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
-@@ -196,6 +277,12 @@ int is_vmalloc_or_module_addr(const void *x)
- if (addr >= MODULES_VADDR && addr < MODULES_END)
- return 1;
- #endif
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
-+ return 1;
-+#endif
-+
- return is_vmalloc_addr(x);
- }
-
-@@ -216,8 +303,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
-
- if (!pgd_none(*pgd)) {
- pud_t *pud = pud_offset(pgd, addr);
-+#ifdef CONFIG_X86
-+ if (!pud_large(*pud))
-+#endif
- if (!pud_none(*pud)) {
- pmd_t *pmd = pmd_offset(pud, addr);
-+#ifdef CONFIG_X86
-+ if (!pmd_large(*pmd))
-+#endif
- if (!pmd_none(*pmd)) {
- pte_t *ptep, pte;
-
-@@ -1157,10 +1250,24 @@ void __init vmalloc_init(void)
-
- for_each_possible_cpu(i) {
- struct vmap_block_queue *vbq;
-+ struct vfree_deferred *p;
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ struct stack_deferred *p2;
-+#endif
-
- vbq = &per_cpu(vmap_block_queue, i);
- spin_lock_init(&vbq->lock);
- INIT_LIST_HEAD(&vbq->free);
-+
-+ p = &per_cpu(vfree_deferred, i);
-+ init_llist_head(&p->list);
-+ INIT_WORK(&p->wq, free_work);
-+
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+ p2 = &per_cpu(stack_deferred, i);
-+ init_llist_head(&p2->list.list);
-+ INIT_WORK(&p2->wq, unmap_work);
-+#endif
- }
-
- /* Import existing vmlist entries. */
-@@ -1301,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
- struct vm_struct *area;
-
- BUG_ON(in_interrupt());
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
-+ if (flags & VM_KERNEXEC) {
-+ if (start != VMALLOC_START || end != VMALLOC_END)
-+ return NULL;
-+ start = (unsigned long)MODULES_EXEC_VADDR;
-+ end = (unsigned long)MODULES_EXEC_END;
-+ }
-+#endif
-+
- if (flags & VM_IOREMAP) {
- int bit = fls(size);
-
-@@ -1475,7 +1592,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
- kfree(area);
- return;
- }
--
-+
- /**
- * vfree - release memory allocated by vmalloc()
- * @addr: memory base address
-@@ -1484,15 +1601,26 @@ static void __vunmap(const void *addr, int deallocate_pages)
- * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
- * NULL, no operation is performed.
- *
-- * Must not be called in interrupt context.
-+ * Must not be called in NMI context (strictly speaking, only if we don't
-+ * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
-+ * conventions for vfree() arch-depenedent would be a really bad idea)
-+ *
-+ * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
- */
- void vfree(const void *addr)
- {
-- BUG_ON(in_interrupt());
-+ BUG_ON(in_nmi());
-
- kmemleak_free(addr);
-
-- __vunmap(addr, 1);
-+ if (!addr)
-+ return;
-+ if (unlikely(in_interrupt())) {
-+ struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
-+ if (llist_add((struct llist_node *)addr, &p->list))
-+ schedule_work(&p->wq);
-+ } else
-+ __vunmap(addr, 1);
- }
- EXPORT_SYMBOL(vfree);
-
-@@ -1509,10 +1637,28 @@ void vunmap(const void *addr)
- {
- BUG_ON(in_interrupt());
- might_sleep();
-- __vunmap(addr, 0);
-+ if (addr)
-+ __vunmap(addr, 0);
- }
- EXPORT_SYMBOL(vunmap);
-
-+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
-+void unmap_process_stacks(struct task_struct *task)
-+{
-+ if (unlikely(in_interrupt())) {
-+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
-+ struct stack_deferred_llist *list = task->stack;
-+ list->stack = task->stack;
-+ list->lowmem_stack = task->lowmem_stack;
-+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
-+ schedule_work(&p->wq);
-+ } else {
-+ __vunmap(task->stack, 0);
-+ free_pages((unsigned long)task->lowmem_stack, THREAD_ORDER);
-+ }
-+}
-+#endif
-+
- /**
- * vmap - map an array of pages into virtually contiguous space
- * @pages: array of page pointers
-@@ -1533,6 +1679,11 @@ void *vmap(struct page **pages, unsigned int count,
- if (count > totalram_pages)
- return NULL;
-
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
-+ if (!(pgprot_val(prot) & _PAGE_NX))
-+ flags |= VM_KERNEXEC;
-+#endif
-+
- area = get_vm_area_caller((count << PAGE_SHIFT), flags,
- __builtin_return_address(0));
- if (!area)
-@@ -1634,6 +1785,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
- if (!size || (size >> PAGE_SHIFT) > totalram_pages)
- goto fail;
-
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
-+ if (!(pgprot_val(prot) & _PAGE_NX))
-+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
-+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
-+ else
-+#endif
-+
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
- start, end, node, gfp_mask, caller);
- if (!area)
-@@ -1807,10 +1965,9 @@ EXPORT_SYMBOL(vzalloc_node);
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- */
--
- void *vmalloc_exec(unsigned long size)
- {
-- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
- -1, __builtin_return_address(0));
- }
-
-@@ -2105,6 +2262,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
-
-+ BUG_ON(vma->vm_mirror);
-+
- if ((PAGE_SIZE-1) & (unsigned long)addr)
- return -EINVAL;
-
-@@ -2357,8 +2516,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
- return NULL;
- }
-
-- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
-- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
-+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
-+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
- if (!vas || !vms)
- goto err_free;
-
-@@ -2542,11 +2701,15 @@ static int s_show(struct seq_file *m, void *p)
- {
- struct vm_struct *v = p;
-
-- seq_printf(m, "0x%p-0x%p %7ld",
-+ seq_printf(m, "0x%pP-0x%pP %7ld",
- v->addr, v->addr + v->size, v->size);
-
- if (v->caller)
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ seq_printf(m, " %pK", v->caller);
-+#else
- seq_printf(m, " %pS", v->caller);
-+#endif
-
- if (v->nr_pages)
- seq_printf(m, " pages=%d", v->nr_pages);
-diff --git a/mm/vmstat.c b/mm/vmstat.c
-index 8fd603b..495a5a1 100644
---- a/mm/vmstat.c
-+++ b/mm/vmstat.c
-@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
- *
- * vm_stat contains the global counters
- */
--atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
-+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
- EXPORT_SYMBOL(vm_stat);
-
- #ifdef CONFIG_SMP
-@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
- v = p->vm_stat_diff[i];
- p->vm_stat_diff[i] = 0;
- local_irq_restore(flags);
-- atomic_long_add(v, &zone->vm_stat[i]);
-+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
- global_diff[i] += v;
- #ifdef CONFIG_NUMA
- /* 3 seconds idle till flush */
-@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
-
- for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- if (global_diff[i])
-- atomic_long_add(global_diff[i], &vm_stat[i]);
-+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
- }
-
- #endif
-@@ -1193,7 +1193,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __cpuinitdata vmstat_notifier =
-+static struct notifier_block vmstat_notifier =
- { &vmstat_cpuup_callback, NULL, 0 };
- #endif
-
-@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
- start_cpu_timer(cpu);
- #endif
- #ifdef CONFIG_PROC_FS
-- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
-- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
-- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
-- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
-+ {
-+ mode_t gr_mode = S_IRUGO;
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+ gr_mode = S_IRUSR;
-+#endif
-+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
-+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
-+#else
-+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
-+#endif
-+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
-+ }
- #endif
- return 0;
- }
-diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
-index 963f285..3e3874d 100644
---- a/net/8021q/vlan.c
-+++ b/net/8021q/vlan.c
-@@ -513,7 +513,7 @@ out:
- return NOTIFY_DONE;
- }
-
--static struct notifier_block vlan_notifier_block __read_mostly = {
-+static struct notifier_block vlan_notifier_block = {
- .notifier_call = vlan_device_event,
- };
-
-@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
- err = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
-- if ((args.u.name_type >= 0) &&
-- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
-+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
- struct vlan_net *vn;
-
- vn = net_generic(net, vlan_net_id);
-diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
-index c705612..8f2e391 100644
---- a/net/8021q/vlan_netlink.c
-+++ b/net/8021q/vlan_netlink.c
-@@ -214,7 +214,7 @@ nla_put_failure:
- return -EMSGSIZE;
- }
-
--struct rtnl_link_ops vlan_link_ops __read_mostly = {
-+struct rtnl_link_ops vlan_link_ops = {
- .kind = "vlan",
- .maxtype = IFLA_VLAN_MAX,
- .policy = vlan_policy,
-diff --git a/net/9p/client.c b/net/9p/client.c
-index 854ca7a..fc1bfc8 100644
---- a/net/9p/client.c
-+++ b/net/9p/client.c
-@@ -582,7 +582,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
- len - inline_len);
- } else {
- err = copy_from_user(ename + inline_len,
-- uidata, len - inline_len);
-+ (char __force_user *)uidata, len - inline_len);
- if (err) {
- err = -EFAULT;
- goto out_free;
-@@ -1528,7 +1528,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
- kernel_buf = 1;
- indata = data;
- } else
-- indata = (char *)udata;
-+ indata = (__force_kernel char *)udata;
- /*
- * response header len is 11
- * PDU Header(7) + IO Size (4)
-@@ -1603,7 +1603,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
- kernel_buf = 1;
- odata = data;
- } else
-- odata = (char *)udata;
-+ odata = (char __force_kernel *)udata;
- req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
- P9_ZC_HDR_SZ, kernel_buf, "dqd",
- fid->fid, offset, rsize);
-diff --git a/net/9p/mod.c b/net/9p/mod.c
-index 2664d12..b2803fe 100644
---- a/net/9p/mod.c
-+++ b/net/9p/mod.c
-@@ -57,7 +57,7 @@ static LIST_HEAD(v9fs_trans_list);
- void v9fs_register_trans(struct p9_trans_module *m)
- {
- spin_lock(&v9fs_trans_lock);
-- list_add_tail(&m->list, &v9fs_trans_list);
-+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
- spin_unlock(&v9fs_trans_lock);
- }
- EXPORT_SYMBOL(v9fs_register_trans);
-@@ -70,7 +70,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
- void v9fs_unregister_trans(struct p9_trans_module *m)
- {
- spin_lock(&v9fs_trans_lock);
-- list_del_init(&m->list);
-+ pax_list_del_init((struct list_head *)&m->list);
- spin_unlock(&v9fs_trans_lock);
- }
- EXPORT_SYMBOL(v9fs_unregister_trans);
-diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
-index fdfdb57..38d368c 100644
---- a/net/9p/trans_fd.c
-+++ b/net/9p/trans_fd.c
-@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
- oldfs = get_fs();
- set_fs(get_ds());
- /* The cast to a user pointer is valid due to the set_fs() */
-- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
-+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
- set_fs(oldfs);
-
- if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
-diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
-index 55f0c09..d5bf348 100644
---- a/net/9p/trans_virtio.c
-+++ b/net/9p/trans_virtio.c
-@@ -324,7 +324,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
- int count = nr_pages;
- while (nr_pages) {
- s = rest_of_page(data);
-- pages[index++] = kmap_to_page(data);
-+ if (is_vmalloc_addr(data))
-+ pages[index++] = vmalloc_to_page(data);
-+ else
-+ pages[index++] = kmap_to_page(data);
- data += s;
- nr_pages--;
- }
-diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
-index b5b1a22..700277b 100644
---- a/net/appletalk/atalk_proc.c
-+++ b/net/appletalk/atalk_proc.c
-@@ -255,7 +255,7 @@ int __init atalk_proc_init(void)
- struct proc_dir_entry *p;
- int rc = -ENOMEM;
-
-- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
-+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
- if (!atalk_proc_dir)
- goto out;
-
-diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
-index f41f026..fe76ea8 100644
---- a/net/atm/atm_misc.c
-+++ b/net/atm/atm_misc.c
-@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
- if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
- return 1;
- atm_return(vcc, truesize);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- return 0;
- }
- EXPORT_SYMBOL(atm_charge);
-@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
- }
- }
- atm_return(vcc, guess);
-- atomic_inc(&vcc->stats->rx_drop);
-+ atomic_inc_unchecked(&vcc->stats->rx_drop);
- return NULL;
- }
- EXPORT_SYMBOL(atm_alloc_charge);
-@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
-
- void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
- {
--#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
-+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
- __SONET_ITEMS
- #undef __HANDLE_ITEM
- }
-@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
-
- void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
- {
--#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
-+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
- __SONET_ITEMS
- #undef __HANDLE_ITEM
- }
-diff --git a/net/atm/lec.c b/net/atm/lec.c
-index f1964ca..f309d61 100644
---- a/net/atm/lec.c
-+++ b/net/atm/lec.c
-@@ -116,9 +116,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
- }
-
- static struct lane2_ops lane2_ops = {
-- lane2_resolve, /* resolve, spec 3.1.3 */
-- lane2_associate_req, /* associate_req, spec 3.1.4 */
-- NULL /* associate indicator, spec 3.1.5 */
-+ .resolve = lane2_resolve,
-+ .associate_req = lane2_associate_req,
-+ .associate_indicator = NULL
- };
-
- static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-diff --git a/net/atm/lec.h b/net/atm/lec.h
-index dfc0719..47c5322 100644
---- a/net/atm/lec.h
-+++ b/net/atm/lec.h
-@@ -48,7 +48,7 @@ struct lane2_ops {
- const u8 *tlvs, u32 sizeoftlvs);
- void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
- const u8 *tlvs, u32 sizeoftlvs);
--};
-+} __no_const;
-
- /*
- * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
-diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
-index d1b2d9a..d549f7f 100644
---- a/net/atm/mpoa_caches.c
-+++ b/net/atm/mpoa_caches.c
-@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
-
-
- static struct in_cache_ops ingress_ops = {
-- in_cache_add_entry, /* add_entry */
-- in_cache_get, /* get */
-- in_cache_get_with_mask, /* get_with_mask */
-- in_cache_get_by_vcc, /* get_by_vcc */
-- in_cache_put, /* put */
-- in_cache_remove_entry, /* remove_entry */
-- cache_hit, /* cache_hit */
-- clear_count_and_expired, /* clear_count */
-- check_resolving_entries, /* check_resolving */
-- refresh_entries, /* refresh */
-- in_destroy_cache /* destroy_cache */
-+ .add_entry = in_cache_add_entry,
-+ .get = in_cache_get,
-+ .get_with_mask = in_cache_get_with_mask,
-+ .get_by_vcc = in_cache_get_by_vcc,
-+ .put = in_cache_put,
-+ .remove_entry = in_cache_remove_entry,
-+ .cache_hit = cache_hit,
-+ .clear_count = clear_count_and_expired,
-+ .check_resolving = check_resolving_entries,
-+ .refresh = refresh_entries,
-+ .destroy_cache = in_destroy_cache
- };
-
- static struct eg_cache_ops egress_ops = {
-- eg_cache_add_entry, /* add_entry */
-- eg_cache_get_by_cache_id, /* get_by_cache_id */
-- eg_cache_get_by_tag, /* get_by_tag */
-- eg_cache_get_by_vcc, /* get_by_vcc */
-- eg_cache_get_by_src_ip, /* get_by_src_ip */
-- eg_cache_put, /* put */
-- eg_cache_remove_entry, /* remove_entry */
-- update_eg_cache_entry, /* update */
-- clear_expired, /* clear_expired */
-- eg_destroy_cache /* destroy_cache */
-+ .add_entry = eg_cache_add_entry,
-+ .get_by_cache_id = eg_cache_get_by_cache_id,
-+ .get_by_tag = eg_cache_get_by_tag,
-+ .get_by_vcc = eg_cache_get_by_vcc,
-+ .get_by_src_ip = eg_cache_get_by_src_ip,
-+ .put = eg_cache_put,
-+ .remove_entry = eg_cache_remove_entry,
-+ .update = update_eg_cache_entry,
-+ .clear_expired = clear_expired,
-+ .destroy_cache = eg_destroy_cache
- };
-
-
-diff --git a/net/atm/proc.c b/net/atm/proc.c
-index 0d020de..011c7bb 100644
---- a/net/atm/proc.c
-+++ b/net/atm/proc.c
-@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
- const struct k_atm_aal_stats *stats)
- {
- seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
-- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
-- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
-- atomic_read(&stats->rx_drop));
-+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
-+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
-+ atomic_read_unchecked(&stats->rx_drop));
- }
-
- static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
-diff --git a/net/atm/resources.c b/net/atm/resources.c
-index 23f45ce..c748f1a 100644
---- a/net/atm/resources.c
-+++ b/net/atm/resources.c
-@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
- static void copy_aal_stats(struct k_atm_aal_stats *from,
- struct atm_aal_stats *to)
- {
--#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
-+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
- __AAL_STAT_ITEMS
- #undef __HANDLE_ITEM
- }
-@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
- static void subtract_aal_stats(struct k_atm_aal_stats *from,
- struct atm_aal_stats *to)
- {
--#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
-+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
- __AAL_STAT_ITEMS
- #undef __HANDLE_ITEM
- }
-diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
-index c6715ee..69745c0 100644
---- a/net/ax25/ax25_subr.c
-+++ b/net/ax25/ax25_subr.c
-@@ -265,6 +265,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
- {
- ax25_clear_queues(ax25);
-
-+ ax25_stop_heartbeat(ax25);
- ax25_stop_t1timer(ax25);
- ax25_stop_t2timer(ax25);
- ax25_stop_t3timer(ax25);
-diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
-index ebe0ef3..d5b0a8e 100644
---- a/net/ax25/sysctl_net_ax25.c
-+++ b/net/ax25/sysctl_net_ax25.c
-@@ -31,7 +31,7 @@ static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
-
- static struct ctl_table_header *ax25_table_header;
-
--static ctl_table *ax25_table;
-+static ctl_table_no_const *ax25_table;
- static int ax25_table_size;
-
- static struct ctl_path ax25_path[] = {
-@@ -174,7 +174,7 @@ void ax25_register_sysctl(void)
- }
-
- for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
-- struct ctl_table *child = kmemdup(ax25_param_table,
-+ ctl_table_no_const *child = kmemdup(ax25_param_table,
- sizeof(ax25_param_table),
- GFP_ATOMIC);
- if (!child) {
-diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
-index 3512e25..2b33401 100644
---- a/net/batman-adv/bat_iv_ogm.c
-+++ b/net/batman-adv/bat_iv_ogm.c
-@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
-
- /* change sequence number to network order */
- batman_ogm_packet->seqno =
-- htonl((uint32_t)atomic_read(&hard_iface->seqno));
-+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
-
- batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batman_ogm_packet->tt_crc = htons((uint16_t)
-@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
- else
- batman_ogm_packet->gw_flags = NO_FLAGS;
-
-- atomic_inc(&hard_iface->seqno);
-+ atomic_inc_unchecked(&hard_iface->seqno);
-
- slide_own_bcast_window(hard_iface);
- bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
-@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
- return;
-
- /* could be changed by schedule_own_packet() */
-- if_incoming_seqno = atomic_read(&if_incoming->seqno);
-+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
-
- has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
-
-diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
-index 7704df4..beb4e16 100644
---- a/net/batman-adv/hard-interface.c
-+++ b/net/batman-adv/hard-interface.c
-@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
- hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
- dev_add_pack(&hard_iface->batman_adv_ptype);
-
-- atomic_set(&hard_iface->seqno, 1);
-- atomic_set(&hard_iface->frag_seqno, 1);
-+ atomic_set_unchecked(&hard_iface->seqno, 1);
-+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
- bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
- hard_iface->net_dev->name);
-
-diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
-index f9cc957..efd9dae 100644
---- a/net/batman-adv/soft-interface.c
-+++ b/net/batman-adv/soft-interface.c
-@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
-
- /* set broadcast sequence number */
- bcast_packet->seqno =
-- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
-+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
-
- add_bcast_packet_to_list(bat_priv, skb, 1);
-
-@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
- atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
-
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
-- atomic_set(&bat_priv->bcast_seqno, 1);
-+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
- atomic_set(&bat_priv->ttvn, 0);
- atomic_set(&bat_priv->tt_local_changes, 0);
- atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
-diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
-index ab8d0fe..ceba3fd 100644
---- a/net/batman-adv/types.h
-+++ b/net/batman-adv/types.h
-@@ -38,8 +38,8 @@ struct hard_iface {
- int16_t if_num;
- char if_status;
- struct net_device *net_dev;
-- atomic_t seqno;
-- atomic_t frag_seqno;
-+ atomic_unchecked_t seqno;
-+ atomic_unchecked_t frag_seqno;
- unsigned char *packet_buff;
- int packet_len;
- struct kobject *hardif_obj;
-@@ -154,7 +154,7 @@ struct bat_priv {
- atomic_t orig_interval; /* uint */
- atomic_t hop_penalty; /* uint */
- atomic_t log_level; /* uint */
-- atomic_t bcast_seqno;
-+ atomic_unchecked_t bcast_seqno;
- atomic_t bcast_queue_left;
- atomic_t batman_queue_left;
- atomic_t ttvn; /* translation table version number */
-diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
-index 07d1c1d..7e9bea9 100644
---- a/net/batman-adv/unicast.c
-+++ b/net/batman-adv/unicast.c
-@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- frag1->flags = UNI_FRAG_HEAD | large_tail;
- frag2->flags = large_tail;
-
-- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
-+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
- frag1->seqno = htons(seqno - 1);
- frag2->seqno = htons(seqno);
-
-diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
-index 9b67f3d..f6d7e5c 100644
---- a/net/bluetooth/Makefile
-+++ b/net/bluetooth/Makefile
-@@ -8,6 +8,6 @@ obj-$(CONFIG_BT_BNEP) += bnep/
- obj-$(CONFIG_BT_CMTP) += cmtp/
- obj-$(CONFIG_BT_HIDP) += hidp/
-
--bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
--bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
-+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o smp.o
-+bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
- bluetooth-$(CONFIG_BT_SCO) += sco.o
-diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
-index f456645..810d5f3 100644
---- a/net/bluetooth/hci_conn.c
-+++ b/net/bluetooth/hci_conn.c
-@@ -235,7 +235,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-- memcpy(cp.ltk, ltk, sizeof(ltk));
-+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
- }
-diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
-index bb78c75..a48650e 100644
---- a/net/bluetooth/hci_sock.c
-+++ b/net/bluetooth/hci_sock.c
-@@ -605,7 +605,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
- uf.event_mask[1] = *((u32 *) f->event_mask + 1);
- }
-
-- len = min_t(unsigned int, len, sizeof(uf));
-+ len = min((size_t)len, sizeof(uf));
- if (copy_from_user(&uf, optval, len)) {
- err = -EFAULT;
- break;
-diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
-index 0274157..f8afbf3c7 100644
---- a/net/bluetooth/hidp/core.c
-+++ b/net/bluetooth/hidp/core.c
-@@ -945,9 +945,9 @@ static int hidp_setup_hid(struct hidp_session *session,
- hid->version = req->version;
- hid->country = req->country;
-
-- strncpy(hid->name, req->name, sizeof(req->name) - 1);
-- strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
-- strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
-+ strncpy(hid->name, req->name, sizeof(hid->name) - 1);
-+ strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), sizeof(hid->phys) - 1);
-+ strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), sizeof(hid->uniq) - 1);
-
- hid->dev.parent = hidp_get_device(session);
- hid->ll_driver = &hidp_hid_driver;
-diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index dd7c019..9d19c31 100644
---- a/net/bluetooth/l2cap_core.c
-+++ b/net/bluetooth/l2cap_core.c
-@@ -2181,8 +2181,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
- break;
-
- case L2CAP_CONF_RFC:
-- if (olen == sizeof(rfc))
-- memcpy(&rfc, (void *)val, olen);
-+ if (olen != sizeof(rfc))
-+ break;
-+
-+ memcpy(&rfc, (void *)val, olen);
-
- if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
- rfc.mode != chan->mode)
-diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
-index 74e59cd..cb58017 100644
---- a/net/bluetooth/l2cap_sock.c
-+++ b/net/bluetooth/l2cap_sock.c
-@@ -484,7 +484,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
- struct sock *sk = sock->sk;
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- struct l2cap_options opts;
-- int len, err = 0;
-+ int err = 0;
-+ size_t len = optlen;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-@@ -506,7 +507,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
- opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
-
-- len = min_t(unsigned int, sizeof(opts), optlen);
-+ len = min(sizeof(opts), len);
- if (copy_from_user((char *) &opts, optval, len)) {
- err = -EFAULT;
- break;
-@@ -572,7 +573,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
- struct bt_security sec;
- struct bt_power pwr;
- struct l2cap_conn *conn;
-- int len, err = 0;
-+ int err = 0;
-+ size_t len = optlen;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-@@ -595,7 +597,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
-
- sec.level = BT_SECURITY_LOW;
-
-- len = min_t(unsigned int, sizeof(sec), optlen);
-+ len = min(sizeof(sec), len);
- if (copy_from_user((char *) &sec, optval, len)) {
- err = -EFAULT;
- break;
-@@ -671,7 +673,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
-
- pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
-
-- len = min_t(unsigned int, sizeof(pwr), optlen);
-+ len = min(sizeof(pwr), len);
- if (copy_from_user((char *) &pwr, optval, len)) {
- err = -EFAULT;
- break;
-diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
-index 93a8241..b9ef30c 100644
---- a/net/bluetooth/rfcomm/sock.c
-+++ b/net/bluetooth/rfcomm/sock.c
-@@ -684,7 +684,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int err = 0;
-- size_t len;
-+ size_t len = optlen;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-@@ -706,7 +706,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
-
- sec.level = BT_SECURITY_LOW;
-
-- len = min_t(unsigned int, sizeof(sec), optlen);
-+ len = min(sizeof(sec), len);
- if (copy_from_user((char *) &sec, optval, len)) {
- err = -EFAULT;
- break;
-diff --git a/net/bridge/br.c b/net/bridge/br.c
-index f20c4fd..73aee41 100644
---- a/net/bridge/br.c
-+++ b/net/bridge/br.c
-@@ -34,6 +34,8 @@ static int __init br_init(void)
- {
- int err;
-
-+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
-+
- err = stp_proto_register(&br_stp_proto);
- if (err < 0) {
- pr_err("bridge: can't register sap for STP\n");
-diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index 398a297..83fc29c 100644
---- a/net/bridge/br_multicast.c
-+++ b/net/bridge/br_multicast.c
-@@ -1416,7 +1416,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
- nexthdr = ip6h->nexthdr;
- offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
-
-- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
-+ if (nexthdr != IPPROTO_ICMPV6)
- return 0;
-
- /* Okay, we found ICMPv6 header */
-diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
-index 99a48a39..f0638fe 100644
---- a/net/bridge/br_netlink.c
-+++ b/net/bridge/br_netlink.c
-@@ -225,7 +225,7 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
- return register_netdevice(dev);
- }
-
--struct rtnl_link_ops br_link_ops __read_mostly = {
-+struct rtnl_link_ops br_link_ops = {
- .kind = "bridge",
- .priv_size = sizeof(struct net_bridge),
- .setup = br_dev_setup,
-diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
-index 5449294..c1d8d99 100644
---- a/net/bridge/netfilter/ebt_ulog.c
-+++ b/net/bridge/netfilter/ebt_ulog.c
-@@ -158,6 +158,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
- ub->qlen++;
-
- pm = NLMSG_DATA(nlh);
-+ memset(pm, 0, sizeof(*pm));
-
- /* Fill in the ulog data */
- pm->version = EBT_ULOG_VERSION;
-@@ -170,8 +171,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
- pm->hook = hooknr;
- if (uloginfo->prefix != NULL)
- strcpy(pm->prefix, uloginfo->prefix);
-- else
-- *(pm->prefix) = '\0';
-
- if (in) {
- strcpy(pm->physindev, in->name);
-@@ -181,16 +180,14 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
- strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
- else
- strcpy(pm->indev, in->name);
-- } else
-- pm->indev[0] = pm->physindev[0] = '\0';
-+ }
-
- if (out) {
- /* If out exists, then out is a bridge port */
- strcpy(pm->physoutdev, out->name);
- /* rcu_read_lock()ed by nf_hook_slow */
- strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
-- } else
-- pm->outdev[0] = pm->physoutdev[0] = '\0';
-+ }
-
- if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
- BUG();
-diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
-index 45f93f8..550f429 100644
---- a/net/bridge/netfilter/ebtables.c
-+++ b/net/bridge/netfilter/ebtables.c
-@@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
- tmp.valid_hooks = t->table->valid_hooks;
- }
- mutex_unlock(&ebt_mutex);
-- if (copy_to_user(user, &tmp, *len) != 0){
-+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
- BUGPRINT("c2u Didn't work\n");
- ret = -EFAULT;
- break;
-@@ -2322,7 +2322,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
- goto out;
- tmp.valid_hooks = t->valid_hooks;
-
-- if (copy_to_user(user, &tmp, *len) != 0) {
-+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
- ret = -EFAULT;
- break;
- }
-@@ -2333,7 +2333,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
- tmp.entries_size = t->table->entries_size;
- tmp.valid_hooks = t->table->valid_hooks;
-
-- if (copy_to_user(user, &tmp, *len) != 0) {
-+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
- ret = -EFAULT;
- break;
- }
-diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
-index 7eed9eb..fd7291e 100644
---- a/net/caif/caif_socket.c
-+++ b/net/caif/caif_socket.c
-@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
- #ifdef CONFIG_DEBUG_FS
- struct debug_fs_counter {
- atomic_t caif_nr_socks;
-- atomic_t caif_sock_create;
-- atomic_t num_connect_req;
-- atomic_t num_connect_resp;
-- atomic_t num_connect_fail_resp;
-- atomic_t num_disconnect;
-- atomic_t num_remote_shutdown_ind;
-- atomic_t num_tx_flow_off_ind;
-- atomic_t num_tx_flow_on_ind;
-- atomic_t num_rx_flow_off;
-- atomic_t num_rx_flow_on;
-+ atomic_unchecked_t caif_sock_create;
-+ atomic_unchecked_t num_connect_req;
-+ atomic_unchecked_t num_connect_resp;
-+ atomic_unchecked_t num_connect_fail_resp;
-+ atomic_unchecked_t num_disconnect;
-+ atomic_unchecked_t num_remote_shutdown_ind;
-+ atomic_unchecked_t num_tx_flow_off_ind;
-+ atomic_unchecked_t num_tx_flow_on_ind;
-+ atomic_unchecked_t num_rx_flow_off;
-+ atomic_unchecked_t num_rx_flow_on;
- };
- static struct debug_fs_counter cnt;
- #define dbfs_atomic_inc(v) atomic_inc_return(v)
-+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
- #define dbfs_atomic_dec(v) atomic_dec_return(v)
- #else
- #define dbfs_atomic_inc(v) 0
-@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
- atomic_read(&cf_sk->sk.sk_rmem_alloc),
- sk_rcvbuf_lowwater(cf_sk));
- set_rx_flow_off(cf_sk);
-- dbfs_atomic_inc(&cnt.num_rx_flow_off);
-+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
- caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
- }
-
-@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
- set_rx_flow_off(cf_sk);
- if (net_ratelimit())
- pr_debug("sending flow OFF due to rmem_schedule\n");
-- dbfs_atomic_inc(&cnt.num_rx_flow_off);
-+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
- caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
- }
- skb->dev = NULL;
-@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
- switch (flow) {
- case CAIF_CTRLCMD_FLOW_ON_IND:
- /* OK from modem to start sending again */
-- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
-+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
- set_tx_flow_on(cf_sk);
- cf_sk->sk.sk_state_change(&cf_sk->sk);
- break;
-
- case CAIF_CTRLCMD_FLOW_OFF_IND:
- /* Modem asks us to shut up */
-- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
-+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
- set_tx_flow_off(cf_sk);
- cf_sk->sk.sk_state_change(&cf_sk->sk);
- break;
-@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
- /* We're now connected */
- caif_client_register_refcnt(&cf_sk->layer,
- cfsk_hold, cfsk_put);
-- dbfs_atomic_inc(&cnt.num_connect_resp);
-+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
- cf_sk->sk.sk_state = CAIF_CONNECTED;
- set_tx_flow_on(cf_sk);
- cf_sk->sk.sk_state_change(&cf_sk->sk);
-@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
-
- case CAIF_CTRLCMD_INIT_FAIL_RSP:
- /* Connect request failed */
-- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
-+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
- cf_sk->sk.sk_err = ECONNREFUSED;
- cf_sk->sk.sk_state = CAIF_DISCONNECTED;
- cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
-@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
-
- case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
- /* Modem has closed this connection, or device is down. */
-- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
-+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
- cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
- cf_sk->sk.sk_err = ECONNRESET;
- set_rx_flow_on(cf_sk);
-@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
- return;
-
- if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
-- dbfs_atomic_inc(&cnt.num_rx_flow_on);
-+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
- set_rx_flow_on(cf_sk);
- caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
- }
-@@ -852,7 +853,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
- /*ifindex = id of the interface.*/
- cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
-
-- dbfs_atomic_inc(&cnt.num_connect_req);
-+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
- cf_sk->layer.receive = caif_sktrecv_cb;
-
- err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
-@@ -941,7 +942,7 @@ static int caif_release(struct socket *sock)
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- sock->sk = NULL;
-
-- dbfs_atomic_inc(&cnt.num_disconnect);
-+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
-
- WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
- if (cf_sk->debugfs_socket_dir != NULL)
-@@ -1120,7 +1121,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
- cf_sk->conn_req.protocol = protocol;
- /* Increase the number of sockets created. */
- dbfs_atomic_inc(&cnt.caif_nr_socks);
-- num = dbfs_atomic_inc(&cnt.caif_sock_create);
-+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
- #ifdef CONFIG_DEBUG_FS
- if (!IS_ERR(debugfsdir)) {
-
-diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
-index 84efbe4..51d47bc 100644
---- a/net/caif/cfctrl.c
-+++ b/net/caif/cfctrl.c
-@@ -9,6 +9,7 @@
- #include <linux/stddef.h>
- #include <linux/spinlock.h>
- #include <linux/slab.h>
-+#include <linux/sched.h>
- #include <net/caif/caif_layer.h>
- #include <net/caif/cfpkt.h>
- #include <net/caif/cfctrl.h>
-@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
- memset(&dev_info, 0, sizeof(dev_info));
- dev_info.id = 0xff;
- cfsrvl_init(&this->serv, 0, &dev_info, false);
-- atomic_set(&this->req_seq_no, 1);
-- atomic_set(&this->rsp_seq_no, 1);
-+ atomic_set_unchecked(&this->req_seq_no, 1);
-+ atomic_set_unchecked(&this->rsp_seq_no, 1);
- this->serv.layer.receive = cfctrl_recv;
- sprintf(this->serv.layer.name, "ctrl");
- this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
-@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
- struct cfctrl_request_info *req)
- {
- spin_lock_bh(&ctrl->info_list_lock);
-- atomic_inc(&ctrl->req_seq_no);
-- req->sequence_no = atomic_read(&ctrl->req_seq_no);
-+ atomic_inc_unchecked(&ctrl->req_seq_no);
-+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
- list_add_tail(&req->list, &ctrl->list);
- spin_unlock_bh(&ctrl->info_list_lock);
- }
-@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
- if (p != first)
- pr_warn("Requests are not received in order\n");
-
-- atomic_set(&ctrl->rsp_seq_no,
-+ atomic_set_unchecked(&ctrl->rsp_seq_no,
- p->sequence_no);
- list_del(&p->list);
- goto out;
-diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
-index b525aec..9659b25 100644
---- a/net/caif/chnl_net.c
-+++ b/net/caif/chnl_net.c
-@@ -74,7 +74,6 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
- struct sk_buff *skb;
- struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
- int pktlen;
-- int err = 0;
- const u8 *ip_version;
- u8 buf;
-
-@@ -95,8 +94,11 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
-
- /* check the version of IP */
- ip_version = skb_header_pointer(skb, 0, 1, &buf);
-- if (!ip_version)
-+ if (!ip_version) {
-+ kfree_skb(skb);
- return -EINVAL;
-+ }
-+
- switch (*ip_version >> 4) {
- case 4:
- skb->protocol = htons(ETH_P_IP);
-@@ -105,6 +107,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
- skb->protocol = htons(ETH_P_IPV6);
- break;
- default:
-+ kfree_skb(skb);
-+ priv->netdev->stats.rx_errors++;
- return -EINVAL;
- }
-
-@@ -123,7 +127,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
- priv->netdev->stats.rx_packets++;
- priv->netdev->stats.rx_bytes += pktlen;
-
-- return err;
-+ return 0;
- }
-
- static int delete_device(struct chnl_net *dev)
-@@ -221,12 +225,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
-
- if (skb->len > priv->netdev->mtu) {
- pr_warn("Size of skb exceeded MTU\n");
-- return -ENOSPC;
-+ kfree_skb(skb);
-+ dev->stats.tx_errors++;
-+ return NETDEV_TX_OK;
- }
-
- if (!priv->flowenabled) {
- pr_debug("dropping packets flow off\n");
-- return NETDEV_TX_BUSY;
-+ kfree_skb(skb);
-+ dev->stats.tx_dropped++;
-+ return NETDEV_TX_OK;
- }
-
- if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
-@@ -240,9 +248,8 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
- /* Send the packet down the stack. */
- result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
- if (result) {
-- if (result == -EAGAIN)
-- result = NETDEV_TX_BUSY;
-- return result;
-+ dev->stats.tx_dropped++;
-+ return NETDEV_TX_OK;
- }
-
- /* Update statistics. */
-@@ -507,7 +514,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
- };
-
-
--static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
-+static struct rtnl_link_ops ipcaif_link_ops = {
- .kind = "caif",
- .priv_size = sizeof(struct chnl_net),
- .setup = ipcaif_net_setup,
-diff --git a/net/can/af_can.c b/net/can/af_can.c
-index 7d9dff222..a1e901b 100644
---- a/net/can/af_can.c
-+++ b/net/can/af_can.c
-@@ -821,7 +821,7 @@ static const struct net_proto_family can_family_ops = {
- };
-
- /* notifier block for netdevice event */
--static struct notifier_block can_netdev_notifier __read_mostly = {
-+static struct notifier_block can_netdev_notifier = {
- .notifier_call = can_notifier,
- };
-
-diff --git a/net/can/bcm.c b/net/can/bcm.c
-index 3910c1f..268b30e 100644
---- a/net/can/bcm.c
-+++ b/net/can/bcm.c
-@@ -1618,7 +1618,7 @@ static int __init bcm_module_init(void)
- }
-
- /* create /proc/net/can-bcm directory */
-- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
-+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
- return 0;
- }
-
-diff --git a/net/can/gw.c b/net/can/gw.c
-index f78f898..d7aa843 100644
---- a/net/can/gw.c
-+++ b/net/can/gw.c
-@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
- MODULE_ALIAS("can-gw");
-
- HLIST_HEAD(cgw_list);
--static struct notifier_block notifier;
-
- static struct kmem_cache *cgw_cache __read_mostly;
-
-@@ -911,6 +910,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
- return err;
- }
-
-+static struct notifier_block notifier = {
-+ .notifier_call = cgw_notifier
-+};
-+
- static __init int cgw_module_init(void)
- {
- printk(banner);
-@@ -922,7 +925,6 @@ static __init int cgw_module_init(void)
- return -ENOMEM;
-
- /* set notifier */
-- notifier.notifier_call = cgw_notifier;
- register_netdevice_notifier(&notifier);
-
- if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
-diff --git a/net/can/proc.c b/net/can/proc.c
-index ba873c3..3b00036 100644
---- a/net/can/proc.c
-+++ b/net/can/proc.c
-@@ -472,7 +472,7 @@ static void can_remove_proc_readentry(const char *name)
- void can_init_proc(void)
- {
- /* create /proc/net/can directory */
-- can_dir = proc_mkdir("can", init_net.proc_net);
-+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
-
- if (!can_dir) {
- printk(KERN_INFO "can: failed to create /proc/net/can . "
-diff --git a/net/compat.c b/net/compat.c
-index f06994d..b7fd27f 100644
---- a/net/compat.c
-+++ b/net/compat.c
-@@ -80,9 +80,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
-
- if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
- kmsg->msg_namelen = sizeof(struct sockaddr_storage);
-- kmsg->msg_name = compat_ptr(tmp1);
-- kmsg->msg_iov = compat_ptr(tmp2);
-- kmsg->msg_control = compat_ptr(tmp3);
-+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
-+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
-+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
- return 0;
- }
-
-@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
-
- if (kern_msg->msg_name && kern_msg->msg_namelen) {
- if (mode == VERIFY_READ) {
-- int err = move_addr_to_kernel(kern_msg->msg_name,
-+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
- kern_msg->msg_namelen,
- kern_address);
- if (err < 0)
-@@ -107,7 +107,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
- }
-
- tot_len = iov_from_user_compat_to_kern(kern_iov,
-- (struct compat_iovec __user *)kern_msg->msg_iov,
-+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
- kern_msg->msg_iovlen);
- if (tot_len >= 0)
- kern_msg->msg_iov = kern_iov;
-@@ -127,20 +127,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
-
- #define CMSG_COMPAT_FIRSTHDR(msg) \
- (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
-- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
-+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
- (struct compat_cmsghdr __user *)NULL)
-
- #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
- ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
- (ucmlen) <= (unsigned long) \
- ((mhdr)->msg_controllen - \
-- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
-+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
-
- static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
- struct compat_cmsghdr __user *cmsg, int cmsg_len)
- {
- char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
-- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
-+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
- msg->msg_controllen)
- return NULL;
- return (struct compat_cmsghdr __user *)ptr;
-@@ -232,7 +232,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
- {
- struct compat_timeval ctv;
- struct compat_timespec cts[3];
-- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
-+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
- struct compat_cmsghdr cmhdr;
- int cmlen;
-
-@@ -284,7 +284,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
-
- void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
- {
-- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
-+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
- int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
- int fdnum = scm->fp->count;
- struct file **fp = scm->fp->fp;
-@@ -337,14 +337,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
- __scm_destroy(scm);
- }
-
--/*
-- * A struct sock_filter is architecture independent.
-- */
--struct compat_sock_fprog {
-- u16 len;
-- compat_uptr_t filter; /* struct sock_filter * */
--};
--
- static int do_set_attach_filter(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
- {
-@@ -381,7 +373,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
- return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
-+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
- set_fs(old_fs);
-
- return err;
-@@ -442,7 +434,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
- len = sizeof(ktime);
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
-+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
- set_fs(old_fs);
-
- if (!err) {
-@@ -577,7 +569,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
- case MCAST_JOIN_GROUP:
- case MCAST_LEAVE_GROUP:
- {
-- struct compat_group_req __user *gr32 = (void *)optval;
-+ struct compat_group_req __user *gr32 = (void __user *)optval;
- struct group_req __user *kgr =
- compat_alloc_user_space(sizeof(struct group_req));
- u32 interface;
-@@ -598,7 +590,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
- case MCAST_BLOCK_SOURCE:
- case MCAST_UNBLOCK_SOURCE:
- {
-- struct compat_group_source_req __user *gsr32 = (void *)optval;
-+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
- struct group_source_req __user *kgsr = compat_alloc_user_space(
- sizeof(struct group_source_req));
- u32 interface;
-@@ -619,7 +611,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
- }
- case MCAST_MSFILTER:
- {
-- struct compat_group_filter __user *gf32 = (void *)optval;
-+ struct compat_group_filter __user *gf32 = (void __user *)optval;
- struct group_filter __user *kgf;
- u32 interface, fmode, numsrc;
-
-@@ -657,7 +649,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
- char __user *optval, int __user *optlen,
- int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
- {
-- struct compat_group_filter __user *gf32 = (void *)optval;
-+ struct compat_group_filter __user *gf32 = (void __user *)optval;
- struct group_filter __user *kgf;
- int __user *koptlen;
- u32 interface, fmode, numsrc;
-@@ -801,7 +793,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
-
- if (call < SYS_SOCKET || call > SYS_SENDMMSG)
- return -EINVAL;
-- if (copy_from_user(a, args, nas[call]))
-+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
- return -EFAULT;
- a0 = a[0];
- a1 = a[1];
-diff --git a/net/core/datagram.c b/net/core/datagram.c
-index 68bbf9f..5ef0d12 100644
---- a/net/core/datagram.c
-+++ b/net/core/datagram.c
-@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
- }
-
- kfree_skb(skb);
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- sk_mem_reclaim_partial(sk);
-
- return err;
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 1c0d862..d4946e6 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -1142,10 +1142,14 @@ void dev_load(struct net *net, const char *name)
- if (no_module && capable(CAP_NET_ADMIN))
- no_module = request_module("netdev-%s", name);
- if (no_module && capable(CAP_SYS_MODULE)) {
-+#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
-+#else
- if (!request_module("%s", name))
- pr_err("Loading kernel module for a network device "
- "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
- "instead\n", name);
-+#endif
- }
- }
- EXPORT_SYMBOL(dev_load);
-@@ -1597,7 +1601,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
- {
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-- atomic_long_inc(&dev->rx_dropped);
-+ atomic_long_inc_unchecked(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -1607,7 +1611,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
- nf_reset(skb);
-
- if (unlikely(!is_skb_forwardable(dev, skb))) {
-- atomic_long_inc(&dev->rx_dropped);
-+ atomic_long_inc_unchecked(&dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -1961,13 +1965,13 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
-
- if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- struct net_device *dev = skb->dev;
-- struct ethtool_drvinfo info = {};
-+ const char *driver = "";
-
-- if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
-- dev->ethtool_ops->get_drvinfo(dev, &info);
-+ if (dev && dev->dev.parent)
-+ driver = dev_driver_string(dev->dev.parent);
-
- WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
-- info.driver, dev ? dev->features : 0L,
-+ driver, dev ? dev->features : 0L,
- skb->sk ? skb->sk->sk_route_caps : 0L,
- skb->len, skb->data_len, skb->ip_summed);
-
-@@ -2048,7 +2052,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
-
- struct dev_gso_cb {
- void (*destructor)(struct sk_buff *skb);
--};
-+} __no_const;
-
- #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-
-@@ -2972,7 +2976,7 @@ enqueue:
-
- local_irq_restore(flags);
-
-- atomic_long_inc(&skb->dev->rx_dropped);
-+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -3046,7 +3050,7 @@ int netif_rx_ni(struct sk_buff *skb)
- }
- EXPORT_SYMBOL(netif_rx_ni);
-
--static void net_tx_action(struct softirq_action *h)
-+static __latent_entropy void net_tx_action(void)
- {
- struct softnet_data *sd = &__get_cpu_var(softnet_data);
-
-@@ -3345,7 +3349,7 @@ ncls:
- if (pt_prev) {
- ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
- } else {
-- atomic_long_inc(&skb->dev->rx_dropped);
-+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
- kfree_skb(skb);
- /* Jamal, now you will not able to escape explaining
- * me how you were going to use this. :-)
-@@ -3911,7 +3915,7 @@ void netif_napi_del(struct napi_struct *napi)
- }
- EXPORT_SYMBOL(netif_napi_del);
-
--static void net_rx_action(struct softirq_action *h)
-+static __latent_entropy void net_rx_action(void)
- {
- struct softnet_data *sd = &__get_cpu_var(softnet_data);
- unsigned long time_limit = jiffies + 2;
-@@ -4189,7 +4193,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
- struct rtnl_link_stats64 temp;
- const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
-
-- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
-+ if (gr_proc_is_restricted())
-+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
-+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
-+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
-+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
-+ else
-+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
- "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
- dev->name, stats->rx_bytes, stats->rx_packets,
- stats->rx_errors,
-@@ -4264,7 +4274,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
- return 0;
- }
-
--static const struct seq_operations dev_seq_ops = {
-+const struct seq_operations dev_seq_ops = {
- .start = dev_seq_start,
- .next = dev_seq_next,
- .stop = dev_seq_stop,
-@@ -4294,7 +4304,7 @@ static const struct seq_operations softnet_seq_ops = {
-
- static int softnet_seq_open(struct inode *inode, struct file *file)
- {
-- return seq_open(file, &softnet_seq_ops);
-+ return seq_open_restrict(file, &softnet_seq_ops);
- }
-
- static const struct file_operations softnet_seq_fops = {
-@@ -4381,8 +4391,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
- else
- seq_printf(seq, "%04x", ntohs(pt->type));
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ seq_printf(seq, " %-8s %p\n",
-+ pt->dev ? pt->dev->name : "", NULL);
-+#else
- seq_printf(seq, " %-8s %pF\n",
- pt->dev ? pt->dev->name : "", pt->func);
-+#endif
- }
-
- return 0;
-@@ -4444,7 +4459,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
- proc_net_remove(net, "dev");
- }
-
--static struct pernet_operations __net_initdata dev_proc_ops = {
-+static struct pernet_operations __net_initconst dev_proc_ops = {
- .init = dev_proc_net_init,
- .exit = dev_proc_net_exit,
- };
-@@ -5939,7 +5954,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- } else {
- netdev_stats_to_stats64(storage, &dev->stats);
- }
-- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
- return storage;
- }
- EXPORT_SYMBOL(dev_get_stats);
-@@ -6528,7 +6543,7 @@ static void __net_exit netdev_exit(struct net *net)
- kfree(net->dev_index_head);
- }
-
--static struct pernet_operations __net_initdata netdev_net_ops = {
-+static struct pernet_operations __net_initconst netdev_net_ops = {
- .init = netdev_init,
- .exit = netdev_exit,
- };
-@@ -6590,7 +6605,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
- rtnl_unlock();
- }
-
--static struct pernet_operations __net_initdata default_device_ops = {
-+static struct pernet_operations __net_initconst default_device_ops = {
- .exit = default_device_exit,
- .exit_batch = default_device_exit_batch,
- };
-diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
-index cd09414..d070f83 100644
---- a/net/core/dev_addr_lists.c
-+++ b/net/core/dev_addr_lists.c
-@@ -723,7 +723,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
- proc_net_remove(net, "dev_mcast");
- }
-
--static struct pernet_operations __net_initdata dev_mc_net_ops = {
-+static struct pernet_operations __net_initconst dev_mc_net_ops = {
- .init = dev_mc_net_init,
- .exit = dev_mc_net_exit,
- };
-diff --git a/net/core/ethtool.c b/net/core/ethtool.c
-index 2367246..4a0a677 100644
---- a/net/core/ethtool.c
-+++ b/net/core/ethtool.c
-@@ -1612,10 +1612,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
- if (ret)
- return ret;
-
-- len = (tmp.len > dump.len) ? dump.len : tmp.len;
-+ len = min(tmp.len, dump.len);
- if (!len)
- return -EFAULT;
-
-+ /* Don't ever let the driver think there's more space available
-+ * than it requested with .get_dump_flag().
-+ */
-+ dump.len = len;
-+
-+ /* Always allocate enough space to hold the whole thing so that the
-+ * driver does not need to check the length and bother with partial
-+ * dumping.
-+ */
- data = vzalloc(tmp.len);
- if (!data)
- return -ENOMEM;
-@@ -1623,6 +1632,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
- if (ret)
- goto out;
-
-+ /* There are two sane possibilities:
-+ * 1. The driver's .get_dump_data() does not touch dump.len.
-+ * 2. Or it may set dump.len to how much it really writes, which
-+ * should be tmp.len (or len if it can do a partial dump).
-+ * In any case respond to userspace with the actual length of data
-+ * it's receiving.
-+ */
-+ WARN_ON(dump.len != len && dump.len != tmp.len);
-+ dump.len = len;
-+
- if (copy_to_user(useraddr, &dump, sizeof(dump))) {
- ret = -EFAULT;
- goto out;
-diff --git a/net/core/filter.c b/net/core/filter.c
-index 9c88080..403ac26c 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -39,6 +39,7 @@
- #include <linux/filter.h>
- #include <linux/reciprocal_div.h>
- #include <linux/ratelimit.h>
-+#include <linux/seccomp.h>
-
- /* No hurry in this branch */
- static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
-@@ -115,7 +116,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
- void *ptr;
- u32 A = 0; /* Accumulator */
- u32 X = 0; /* Index Register */
-- u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
-+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
- u32 tmp;
- int k;
-
-@@ -266,10 +267,10 @@ load_b:
- X = K;
- continue;
- case BPF_S_LD_MEM:
-- A = mem[K];
-+ A = mem[K&15];
- continue;
- case BPF_S_LDX_MEM:
-- X = mem[K];
-+ X = mem[K&15];
- continue;
- case BPF_S_MISC_TAX:
- X = A;
-@@ -282,10 +283,10 @@ load_b:
- case BPF_S_RET_A:
- return A;
- case BPF_S_ST:
-- mem[K] = A;
-+ mem[K&15] = A;
- continue;
- case BPF_S_STX:
-- mem[K] = X;
-+ mem[K&15] = X;
- continue;
- case BPF_S_ANC_PROTOCOL:
- A = ntohs(skb->protocol);
-@@ -354,10 +355,16 @@ load_b:
- A = 0;
- continue;
- }
-+#ifdef CONFIG_SECCOMP_FILTER
-+ case BPF_S_ANC_SECCOMP_LD_W:
-+ A = seccomp_bpf_load(fentry->k);
-+ continue;
-+#endif
- default:
-- WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
-+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
- fentry->code, fentry->jt,
- fentry->jf, fentry->k);
-+ BUG();
- return 0;
- }
- }
-@@ -380,7 +387,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
- u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
- int pc, ret = 0;
-
-- BUILD_BUG_ON(BPF_MEMWORDS > 16);
-+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
- masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
- if (!masks)
- return -ENOMEM;
-@@ -494,6 +501,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
- [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
- };
- int pc;
-+ bool anc_found;
-
- if (flen == 0 || flen > BPF_MAXINSNS)
- return -EINVAL;
-@@ -549,8 +557,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
-+ anc_found = false;
- #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
- code = BPF_S_ANC_##CODE; \
-+ anc_found = true; \
- break
- switch (ftest->k) {
- ANCILLARY(PROTOCOL);
-@@ -564,6 +574,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
- ANCILLARY(RXHASH);
- ANCILLARY(CPU);
- }
-+
-+ /* ancillary operation unknown or unsupported */
-+ if (anc_found == false && ftest->k >= SKF_AD_OFF)
-+ return -EINVAL;
- }
- ftest->code = code;
- }
-diff --git a/net/core/flow.c b/net/core/flow.c
-index e318c7e..168b1d0 100644
---- a/net/core/flow.c
-+++ b/net/core/flow.c
-@@ -61,7 +61,7 @@ struct flow_cache {
- struct timer_list rnd_timer;
- };
-
--atomic_t flow_cache_genid = ATOMIC_INIT(0);
-+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
- EXPORT_SYMBOL(flow_cache_genid);
- static struct flow_cache flow_cache_global;
- static struct kmem_cache *flow_cachep __read_mostly;
-@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
-
- static int flow_entry_valid(struct flow_cache_entry *fle)
- {
-- if (atomic_read(&flow_cache_genid) != fle->genid)
-+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
- return 0;
- if (fle->object && !fle->object->ops->check(fle->object))
- return 0;
-@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
- hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
- fcp->hash_count++;
- }
-- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
-+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
- flo = fle->object;
- if (!flo)
- goto ret_object;
-@@ -280,7 +280,7 @@ nocache:
- }
- flo = resolver(net, key, family, dir, flo, ctx);
- if (fle) {
-- fle->genid = atomic_read(&flow_cache_genid);
-+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
- if (!IS_ERR(flo))
- fle->object = flo;
- else
-diff --git a/net/core/iovec.c b/net/core/iovec.c
-index 66e3f1f..317ae80 100644
---- a/net/core/iovec.c
-+++ b/net/core/iovec.c
-@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
- if (m->msg_name && m->msg_namelen) {
- if (mode == VERIFY_READ) {
- void __user *namep;
-- namep = (void __user __force *) m->msg_name;
-+ namep = (void __force_user *) m->msg_name;
- err = move_addr_to_kernel(namep, m->msg_namelen,
- address);
- if (err < 0)
-@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
- }
-
- size = m->msg_iovlen * sizeof(struct iovec);
-- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
-+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
- return -EFAULT;
-
- m->msg_iov = iov;
-diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index 0ea3fd3..d87fef1 100644
---- a/net/core/neighbour.c
-+++ b/net/core/neighbour.c
-@@ -2803,11 +2803,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- /* Terminate the table early */
- memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
- } else {
-+ struct neigh_table *ntable = container_of(p, struct neigh_table, parms);
- dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
-- t->neigh_vars[14].data = (int *)(p + 1);
-- t->neigh_vars[15].data = (int *)(p + 1) + 1;
-- t->neigh_vars[16].data = (int *)(p + 1) + 2;
-- t->neigh_vars[17].data = (int *)(p + 1) + 3;
-+ t->neigh_vars[14].data = &ntable->gc_interval;
-+ t->neigh_vars[15].data = &ntable->gc_thresh1;
-+ t->neigh_vars[16].data = &ntable->gc_thresh2;
-+ t->neigh_vars[17].data = &ntable->gc_thresh3;
- }
-
-
-diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
-index 0329404..ab4e13a 100644
---- a/net/core/net-sysfs.c
-+++ b/net/core/net-sysfs.c
-@@ -1334,7 +1334,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
- }
- EXPORT_SYMBOL(netdev_class_remove_file);
-
--int netdev_kobject_init(void)
-+int __init netdev_kobject_init(void)
- {
- kobj_ns_type_register(&net_ns_type_operations);
- return class_register(&net_class);
-diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
-index dd00b71..74d1779 100644
---- a/net/core/net_namespace.c
-+++ b/net/core/net_namespace.c
-@@ -422,7 +422,7 @@ static int __register_pernet_operations(struct list_head *list,
- int error;
- LIST_HEAD(net_exit_list);
-
-- list_add_tail(&ops->list, list);
-+ pax_list_add_tail((struct list_head *)&ops->list, list);
- if (ops->init || (ops->id && ops->size)) {
- for_each_net(net) {
- error = ops_init(ops, net);
-@@ -435,7 +435,7 @@ static int __register_pernet_operations(struct list_head *list,
-
- out_undo:
- /* If I have an error cleanup all namespaces I initialized */
-- list_del(&ops->list);
-+ pax_list_del((struct list_head *)&ops->list);
- ops_exit_list(ops, &net_exit_list);
- ops_free_list(ops, &net_exit_list);
- return error;
-@@ -446,7 +446,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
- struct net *net;
- LIST_HEAD(net_exit_list);
-
-- list_del(&ops->list);
-+ pax_list_del((struct list_head *)&ops->list);
- for_each_net(net)
- list_add_tail(&net->exit_list, &net_exit_list);
- ops_exit_list(ops, &net_exit_list);
-@@ -580,7 +580,7 @@ int register_pernet_device(struct pernet_operations *ops)
- mutex_lock(&net_mutex);
- error = register_pernet_operations(&pernet_list, ops);
- if (!error && (first_device == &pernet_list))
-- first_device = &ops->list;
-+ first_device = (struct list_head *)&ops->list;
- mutex_unlock(&net_mutex);
- return error;
- }
-diff --git a/net/core/pktgen.c b/net/core/pktgen.c
-index 80aeac9..b08d0a8 100644
---- a/net/core/pktgen.c
-+++ b/net/core/pktgen.c
-@@ -3726,7 +3726,7 @@ static int __init pg_init(void)
-
- pr_info("%s", version);
-
-- pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
-+ pg_proc_dir = proc_mkdir_restrict(PG_PROC_DIR, init_net.proc_net);
- if (!pg_proc_dir)
- return -ENODEV;
-
-diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 5b412f0..595dfcd 100644
---- a/net/core/rtnetlink.c
-+++ b/net/core/rtnetlink.c
-@@ -57,7 +57,7 @@ struct rtnl_link {
- rtnl_doit_func doit;
- rtnl_dumpit_func dumpit;
- rtnl_calcit_func calcit;
--};
-+} __no_const;
-
- static DEFINE_MUTEX(rtnl_mutex);
-
-@@ -284,10 +284,13 @@ static LIST_HEAD(link_ops);
- */
- int __rtnl_link_register(struct rtnl_link_ops *ops)
- {
-- if (!ops->dellink)
-- ops->dellink = unregister_netdevice_queue;
-+ if (!ops->dellink) {
-+ pax_open_kernel();
-+ *(void **)&ops->dellink = unregister_netdevice_queue;
-+ pax_close_kernel();
-+ }
-
-- list_add_tail(&ops->list, &link_ops);
-+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
- return 0;
- }
- EXPORT_SYMBOL_GPL(__rtnl_link_register);
-@@ -334,7 +337,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
- for_each_net(net) {
- __rtnl_kill_links(net, ops);
- }
-- list_del(&ops->list);
-+ pax_list_del((struct list_head *)&ops->list);
- }
- EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
-
-@@ -1484,10 +1487,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
- goto errout;
-
- nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
-- if (nla_type(attr) != IFLA_VF_PORT)
-- continue;
-- err = nla_parse_nested(port, IFLA_PORT_MAX,
-- attr, ifla_port_policy);
-+ if (nla_type(attr) != IFLA_VF_PORT ||
-+ nla_len(attr) < NLA_HDRLEN) {
-+ err = -EINVAL;
-+ goto errout;
-+ }
-+ err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
-+ ifla_port_policy);
- if (err < 0)
- goto errout;
- if (!port[IFLA_PORT_VF]) {
-diff --git a/net/core/scm.c b/net/core/scm.c
-index ff52ad0..aff1c0f 100644
---- a/net/core/scm.c
-+++ b/net/core/scm.c
-@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
- int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
- {
- struct cmsghdr __user *cm
-- = (__force struct cmsghdr __user *)msg->msg_control;
-+ = (struct cmsghdr __force_user *)msg->msg_control;
- struct cmsghdr cmhdr;
- int cmlen = CMSG_LEN(len);
- int err;
-@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
- err = -EFAULT;
- if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
- goto out;
-- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
-+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
- goto out;
- cmlen = CMSG_SPACE(len);
- if (msg->msg_controllen < cmlen)
-@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
- void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
- {
- struct cmsghdr __user *cm
-- = (__force struct cmsghdr __user*)msg->msg_control;
-+ = (struct cmsghdr __force_user *)msg->msg_control;
-
- int fdmax = 0;
- int fdnum = scm->fp->count;
-@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
- if (fdnum < fdmax)
- fdmax = fdnum;
-
-- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
-+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
- i++, cmfptr++)
- {
- int new_fd;
-diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
-index fd53919..2b0810e 100644
---- a/net/core/secure_seq.c
-+++ b/net/core/secure_seq.c
-@@ -12,12 +12,10 @@
-
- static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-
--static int __init net_secret_init(void)
-+void net_secret_init(void)
- {
- get_random_bytes(net_secret, sizeof(net_secret));
-- return 0;
- }
--late_initcall(net_secret_init);
-
- #ifdef CONFIG_INET
- static u32 seq_scale(u32 seq)
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 7121d9b..d256e3c 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -2876,13 +2876,15 @@ void __init skb_init(void)
- skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
- sizeof(struct sk_buff),
- 0,
-- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
-+ SLAB_NO_SANITIZE,
- NULL);
- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
- (2*sizeof(struct sk_buff)) +
- sizeof(atomic_t),
- 0,
-- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
-+ SLAB_NO_SANITIZE,
- NULL);
- }
-
-diff --git a/net/core/sock.c b/net/core/sock.c
-index e093528..3966d08 100644
---- a/net/core/sock.c
-+++ b/net/core/sock.c
-@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
- struct sk_buff_head *list = &sk->sk_receive_queue;
-
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- trace_sock_rcvqueue_full(sk, skb);
- return -ENOMEM;
- }
-@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
- return err;
-
- if (!sk_rmem_schedule(sk, skb->truesize)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- return -ENOBUFS;
- }
-
-@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
- skb_dst_force(skb);
-
- spin_lock_irqsave(&list->lock, flags);
-- skb->dropcount = atomic_read(&sk->sk_drops);
-+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
- __skb_queue_tail(list, skb);
- spin_unlock_irqrestore(&list->lock, flags);
-
-@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
- skb->dev = NULL;
-
- if (sk_rcvqueues_full(sk, skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- goto discard_and_relse;
- }
- if (nested)
-@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else if (sk_add_backlog(sk, skb)) {
- bh_unlock_sock(sk);
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- goto discard_and_relse;
- }
-
-@@ -406,7 +406,7 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
- }
- EXPORT_SYMBOL(sk_dst_check);
-
--static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
-+static int sock_bindtodevice(struct sock *sk, char __user *optval, unsigned int optlen)
- {
- int ret = -ENOPROTOOPT;
- #ifdef CONFIG_NETDEVICES
-@@ -420,7 +420,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
- goto out;
-
- ret = -EINVAL;
-- if (optlen < 0)
-+ if (optlen > INT_MAX)
- goto out;
-
- /* Bind this socket to a particular device like "eth0",
-@@ -786,12 +786,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
- struct timeval tm;
- } v;
-
-- int lv = sizeof(int);
-- int len;
-+ unsigned int lv = sizeof(int);
-+ unsigned int len;
-
- if (get_user(len, optlen))
- return -EFAULT;
-- if (len < 0)
-+ if (len > INT_MAX)
- return -EINVAL;
-
- memset(&v, 0, sizeof(v));
-@@ -932,18 +932,18 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
- if (len > sizeof(peercred))
- len = sizeof(peercred);
- cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
-- if (copy_to_user(optval, &peercred, len))
-+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
- return -EFAULT;
- goto lenout;
- }
-
- case SO_PEERNAME:
- {
-- char address[128];
-+ char address[_K_SS_MAXSIZE];
-
- if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
- return -ENOTCONN;
-- if (lv < len)
-+ if (lv < len || sizeof address < len)
- return -EINVAL;
- if (copy_to_user(optval, address, len))
- return -EFAULT;
-@@ -978,7 +978,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
-
- if (len > lv)
- len = lv;
-- if (copy_to_user(optval, &v, len))
-+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
- return -EFAULT;
- lenout:
- if (put_user(len, optlen))
-@@ -1455,6 +1455,8 @@ EXPORT_SYMBOL(sock_kmalloc);
- */
- void sock_kfree_s(struct sock *sk, void *mem, int size)
- {
-+ if (WARN_ON_ONCE(!mem))
-+ return;
- kfree(mem);
- atomic_sub(size, &sk->sk_omem_alloc);
- }
-@@ -2026,7 +2028,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
- */
- smp_wmb();
- atomic_set(&sk->sk_refcnt, 1);
-- atomic_set(&sk->sk_drops, 0);
-+ atomic_set_unchecked(&sk->sk_drops, 0);
- }
- EXPORT_SYMBOL(sock_init_data);
-
-@@ -2563,7 +2565,7 @@ static __net_exit void proto_exit_net(struct net *net)
- }
-
-
--static __net_initdata struct pernet_operations proto_net_ops = {
-+static __net_initconst struct pernet_operations proto_net_ops = {
- .init = proto_init_net,
- .exit = proto_exit_net,
- };
-diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
-index f9496c4..5345e58 100644
---- a/net/core/sysctl_net_core.c
-+++ b/net/core/sysctl_net_core.c
-@@ -30,7 +30,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
- {
- unsigned int orig_size, size;
- int ret, i;
-- ctl_table tmp = {
-+ ctl_table_no_const tmp = {
- .data = &size,
- .maxlen = sizeof(size),
- .mode = table->mode
-@@ -216,29 +216,27 @@ __net_initdata struct ctl_path net_core_path[] = {
-
- static __net_init int sysctl_core_net_init(struct net *net)
- {
-- struct ctl_table *tbl;
-+ ctl_table_no_const *tbl = NULL;
-
- net->core.sysctl_somaxconn = SOMAXCONN;
-
-- tbl = netns_core_table;
- if (!net_eq(net, &init_net)) {
-- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
-+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
- if (tbl == NULL)
- goto err_dup;
-
- tbl[0].data = &net->core.sysctl_somaxconn;
-- }
-+ net->core.sysctl_hdr = register_net_sysctl_table(net, net_core_path, tbl);
-+ } else
-+ net->core.sysctl_hdr = register_net_sysctl_table(net, net_core_path, netns_core_table);
-
-- net->core.sysctl_hdr = register_net_sysctl_table(net,
-- net_core_path, tbl);
- if (net->core.sysctl_hdr == NULL)
- goto err_reg;
-
- return 0;
-
- err_reg:
-- if (tbl != netns_core_table)
-- kfree(tbl);
-+ kfree(tbl);
- err_dup:
- return -ENOMEM;
- }
-@@ -253,7 +251,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
- kfree(tbl);
- }
-
--static __net_initdata struct pernet_operations sysctl_core_ops = {
-+static __net_initconst struct pernet_operations sysctl_core_ops = {
- .init = sysctl_core_net_init,
- .exit = sysctl_core_net_exit,
- };
-diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
-index 16fbf8c..ff4b0fc 100644
---- a/net/decnet/af_decnet.c
-+++ b/net/decnet/af_decnet.c
-@@ -469,6 +469,7 @@ static struct proto dn_proto = {
- .sysctl_rmem = sysctl_decnet_rmem,
- .max_header = DN_MAX_NSP_DATA_HEADER + 64,
- .obj_size = sizeof(struct dn_sock),
-+ .slab_flags = SLAB_USERCOPY,
- };
-
- static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
-diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
-index 74d321a..3314f68 100644
---- a/net/decnet/dn_dev.c
-+++ b/net/decnet/dn_dev.c
-@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
- .extra1 = &min_t3,
- .extra2 = &max_t3
- },
-- {0}
-+ { }
- },
- };
-
-diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
-index d50a13c..1f612ff 100644
---- a/net/decnet/sysctl_net_decnet.c
-+++ b/net/decnet/sysctl_net_decnet.c
-@@ -175,7 +175,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
-
- if (len > *lenp) len = *lenp;
-
-- if (copy_to_user(buffer, addr, len))
-+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
- return -EFAULT;
-
- *lenp = len;
-@@ -238,7 +238,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
-
- if (len > *lenp) len = *lenp;
-
-- if (copy_to_user(buffer, devname, len))
-+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
- return -EFAULT;
-
- *lenp = len;
-diff --git a/net/econet/Kconfig b/net/econet/Kconfig
-index 39a2d29..f39c0fe 100644
---- a/net/econet/Kconfig
-+++ b/net/econet/Kconfig
-@@ -4,7 +4,7 @@
-
- config ECONET
- tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
-- depends on EXPERIMENTAL && INET
-+ depends on EXPERIMENTAL && INET && BROKEN
- ---help---
- Econet is a fairly old and slow networking protocol mainly used by
- Acorn computers to access file and print servers. It uses native
-diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
-index 5d42df2..e6c3389 100644
---- a/net/ieee802154/6lowpan.c
-+++ b/net/ieee802154/6lowpan.c
-@@ -329,7 +329,7 @@ static int lowpan_header_create(struct sk_buff *skb,
- hc06_ptr += 3;
- } else {
- /* compress nothing */
-- memcpy(hc06_ptr, &hdr, 4);
-+ memcpy(hc06_ptr, hdr, 4);
- /* replace the top byte with new ECN | DSCP format */
- *hc06_ptr = tmp;
- hc06_ptr += 4;
-@@ -837,7 +837,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
- dev_put(real_dev);
- }
-
--static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
-+static struct rtnl_link_ops lowpan_link_ops = {
- .kind = "lowpan",
- .priv_size = sizeof(struct lowpan_dev_info),
- .setup = lowpan_setup,
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index 5d228de..91bdee5 100644
---- a/net/ipv4/af_inet.c
-+++ b/net/ipv4/af_inet.c
-@@ -114,6 +114,7 @@
- #include <net/inet_common.h>
- #include <net/xfrm.h>
- #include <net/net_namespace.h>
-+#include <net/secure_seq.h>
- #ifdef CONFIG_IP_MROUTE
- #include <linux/mroute.h>
- #endif
-@@ -241,8 +242,10 @@ void build_ehash_secret(void)
- get_random_bytes(&rnd, sizeof(rnd));
- } while (rnd == 0);
-
-- if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
-+ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
- get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-+ net_secret_init();
-+ }
- }
- EXPORT_SYMBOL(build_ehash_secret);
-
-@@ -1612,7 +1615,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
- snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
- }
-
--static __net_initdata struct pernet_operations ipv4_mib_ops = {
-+static __net_initconst struct pernet_operations ipv4_mib_ops = {
- .init = ipv4_mib_init_net,
- .exit = ipv4_mib_exit_net,
- };
-@@ -1646,13 +1649,9 @@ static int __init inet_init(void)
-
- BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
-
-- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
-- if (!sysctl_local_reserved_ports)
-- goto out;
--
- rc = proto_register(&tcp_prot, 1);
- if (rc)
-- goto out_free_reserved_ports;
-+ goto out;
-
- rc = proto_register(&udp_prot, 1);
- if (rc)
-@@ -1759,8 +1758,6 @@ out_unregister_udp_proto:
- proto_unregister(&udp_prot);
- out_unregister_tcp_proto:
- proto_unregister(&tcp_prot);
--out_free_reserved_ports:
-- kfree(sysctl_local_reserved_ports);
- goto out;
- }
-
-diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
-index d1f56e1..0aee701 100644
---- a/net/ipv4/arp.c
-+++ b/net/ipv4/arp.c
-@@ -947,24 +947,25 @@ static void parp_redo(struct sk_buff *skb)
- static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
- {
-- struct arphdr *arp;
-+ const struct arphdr *arp;
-
-- /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
-- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
-- goto freeskb;
--
-- arp = arp_hdr(skb);
-- if (arp->ar_hln != dev->addr_len ||
-- dev->flags & IFF_NOARP ||
-+ if (dev->flags & IFF_NOARP ||
- skb->pkt_type == PACKET_OTHERHOST ||
-- skb->pkt_type == PACKET_LOOPBACK ||
-- arp->ar_pln != 4)
-+ skb->pkt_type == PACKET_LOOPBACK)
- goto freeskb;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
-- if (skb == NULL)
-+ if (!skb)
- goto out_of_mem;
-
-+ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
-+ if (!pskb_may_pull(skb, arp_hdr_len(dev)))
-+ goto freeskb;
-+
-+ arp = arp_hdr(skb);
-+ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
-+ goto freeskb;
-+
- memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
-
- return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
-diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index e41c40f..f476dfd6 100644
---- a/net/ipv4/devinet.c
-+++ b/net/ipv4/devinet.c
-@@ -68,7 +68,8 @@
-
- static struct ipv4_devconf ipv4_devconf = {
- .data = {
-- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
-+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
-+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
- [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
- [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
- [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
-@@ -77,7 +78,8 @@ static struct ipv4_devconf ipv4_devconf = {
-
- static struct ipv4_devconf ipv4_devconf_dflt = {
- .data = {
-- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
-+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
-+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
- [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
- [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
- [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
-@@ -827,9 +829,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
- if (!ifa) {
- ret = -ENOBUFS;
- ifa = inet_alloc_ifa();
-+ if (!ifa)
-+ break;
- INIT_HLIST_NODE(&ifa->hash);
-- if (!ifa)
-- break;
- if (colon)
- memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
- else
-@@ -1584,7 +1586,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
- #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
- DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
-
--static struct devinet_sysctl_table {
-+static const struct devinet_sysctl_table {
- struct ctl_table_header *sysctl_header;
- struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
- char *dev_name;
-@@ -1729,7 +1731,7 @@ static __net_init int devinet_init_net(struct net *net)
- int err;
- struct ipv4_devconf *all, *dflt;
- #ifdef CONFIG_SYSCTL
-- struct ctl_table *tbl = ctl_forward_entry;
-+ ctl_table_no_const *tbl = NULL;
- struct ctl_table_header *forw_hdr;
- #endif
-
-@@ -1747,7 +1749,7 @@ static __net_init int devinet_init_net(struct net *net)
- goto err_alloc_dflt;
-
- #ifdef CONFIG_SYSCTL
-- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
-+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
- if (tbl == NULL)
- goto err_alloc_ctl;
-
-@@ -1767,7 +1769,10 @@ static __net_init int devinet_init_net(struct net *net)
- goto err_reg_dflt;
-
- err = -ENOMEM;
-- forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
-+ if (!net_eq(net, &init_net))
-+ forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
-+ else
-+ forw_hdr = register_net_sysctl_table(net, net_ipv4_path, ctl_forward_entry);
- if (forw_hdr == NULL)
- goto err_reg_ctl;
- net->ipv4.forw_hdr = forw_hdr;
-@@ -1783,8 +1788,7 @@ err_reg_ctl:
- err_reg_dflt:
- __devinet_sysctl_unregister(all);
- err_reg_all:
-- if (tbl != ctl_forward_entry)
-- kfree(tbl);
-+ kfree(tbl);
- err_alloc_ctl:
- #endif
- if (dflt != &ipv4_devconf_dflt)
-@@ -1811,7 +1815,7 @@ static __net_exit void devinet_exit_net(struct net *net)
- kfree(net->ipv4.devconf_all);
- }
-
--static __net_initdata struct pernet_operations devinet_ops = {
-+static __net_initconst struct pernet_operations devinet_ops = {
- .init = devinet_init_net,
- .exit = devinet_exit_net,
- };
-diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
-index 238fc3b..4455673 100644
---- a/net/ipv4/esp4.c
-+++ b/net/ipv4/esp4.c
-@@ -472,7 +472,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
- }
-
- return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-- net_adj) & ~(align - 1)) + (net_adj - 2);
-+ net_adj) & ~(align - 1)) + net_adj - 2;
- }
-
- static void esp4_err(struct sk_buff *skb, u32 info)
-diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
-index 92fc5f6..b790d91 100644
---- a/net/ipv4/fib_frontend.c
-+++ b/net/ipv4/fib_frontend.c
-@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
- #ifdef CONFIG_IP_ROUTE_MULTIPATH
- fib_sync_up(dev);
- #endif
-- atomic_inc(&net->ipv4.dev_addr_genid);
-+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
- rt_cache_flush(dev_net(dev), -1);
- break;
- case NETDEV_DOWN:
- fib_del_ifaddr(ifa, NULL);
-- atomic_inc(&net->ipv4.dev_addr_genid);
-+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
- if (ifa->ifa_dev->ifa_list == NULL) {
- /* Last address was deleted from this interface.
- * Disable IP.
-@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
- #ifdef CONFIG_IP_ROUTE_MULTIPATH
- fib_sync_up(dev);
- #endif
-- atomic_inc(&net->ipv4.dev_addr_genid);
-+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
- rt_cache_flush(dev_net(dev), -1);
- break;
- case NETDEV_DOWN:
-diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index 1cdb4a9..b5efed8 100644
---- a/net/ipv4/fib_semantics.c
-+++ b/net/ipv4/fib_semantics.c
-@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
- nh->nh_saddr = inet_select_addr(nh->nh_dev,
- nh->nh_gw,
- nh->nh_parent->fib_scope);
-- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
-+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
-
- return nh->nh_saddr;
- }
-diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
-index ab188ae..662585c 100644
---- a/net/ipv4/icmp.c
-+++ b/net/ipv4/icmp.c
-@@ -1195,7 +1195,7 @@ fail:
- return err;
- }
-
--static struct pernet_operations __net_initdata icmp_sk_ops = {
-+static struct pernet_operations __net_initconst icmp_sk_ops = {
- .init = icmp_sk_init,
- .exit = icmp_sk_exit,
- };
-diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
-index 907ef2c..eba7111 100644
---- a/net/ipv4/inet_connection_sock.c
-+++ b/net/ipv4/inet_connection_sock.c
-@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
- .range = { 32768, 61000 },
- };
-
--unsigned long *sysctl_local_reserved_ports;
-+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
- EXPORT_SYMBOL(sysctl_local_reserved_ports);
-
- void inet_get_local_port_range(int *low, int *high)
-diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
-index 6be5e8e..daa1ffb 100644
---- a/net/ipv4/inet_diag.c
-+++ b/net/ipv4/inet_diag.c
-@@ -71,6 +71,19 @@ static inline void inet_diag_unlock_handler(
- mutex_unlock(&inet_diag_table_mutex);
- }
-
-+static size_t inet_sk_attr_size(void)
-+{
-+ return nla_total_size(sizeof(struct tcp_info))
-+ + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-+ + nla_total_size(1) /* INET_DIAG_TOS */
-+ + nla_total_size(1) /* INET_DIAG_TCLASS */
-+ + nla_total_size(sizeof(struct inet_diag_meminfo))
-+ + nla_total_size(sizeof(struct inet_diag_msg))
-+ + nla_total_size(TCP_CA_NAME_MAX)
-+ + nla_total_size(sizeof(struct tcpvegas_info))
-+ + 64;
-+}
-+
- static int inet_csk_diag_fill(struct sock *sk,
- struct sk_buff *skb,
- int ext, u32 pid, u32 seq, u16 nlmsg_flags,
-@@ -114,8 +127,14 @@ static int inet_csk_diag_fill(struct sock *sk,
- r->idiag_retrans = 0;
-
- r->id.idiag_if = sk->sk_bound_dev_if;
-+
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ r->id.idiag_cookie[0] = 0;
-+ r->id.idiag_cookie[1] = 0;
-+#else
- r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
-+#endif
-
- r->id.idiag_sport = inet->inet_sport;
- r->id.idiag_dport = inet->inet_dport;
-@@ -215,8 +234,14 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
- r->idiag_retrans = 0;
-
- r->id.idiag_if = tw->tw_bound_dev_if;
-+
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ r->id.idiag_cookie[0] = 0;
-+ r->id.idiag_cookie[1] = 0;
-+#else
- r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
-+#endif
-
- r->id.idiag_sport = tw->tw_sport;
- r->id.idiag_dport = tw->tw_dport;
-@@ -305,18 +330,17 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
- if (sk == NULL)
- goto unlock;
-
-+#ifndef CONFIG_GRKERNSEC_HIDESYM
- err = -ESTALE;
- if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
- req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
- ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
- (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
- goto out;
-+#endif
-
- err = -ENOMEM;
-- rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
-- sizeof(struct inet_diag_meminfo) +
-- handler->idiag_info_size + 64)),
-- GFP_KERNEL);
-+ rep = alloc_skb(inet_sk_attr_size(), GFP_KERNEL);
- if (!rep)
- goto out;
-
-@@ -600,8 +624,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
- r->idiag_retrans = req->retrans;
-
- r->id.idiag_if = sk->sk_bound_dev_if;
-+
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ r->id.idiag_cookie[0] = 0;
-+ r->id.idiag_cookie[1] = 0;
-+#else
- r->id.idiag_cookie[0] = (u32)(unsigned long)req;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
-+#endif
-
- tmo = req->expires - jiffies;
- if (tmo < 0)
-diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 4afcf31..a15c188 100644
---- a/net/ipv4/inet_hashtables.c
-+++ b/net/ipv4/inet_hashtables.c
-@@ -18,12 +18,15 @@
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/wait.h>
-+#include <linux/security.h>
-
- #include <net/inet_connection_sock.h>
- #include <net/inet_hashtables.h>
- #include <net/secure_seq.h>
- #include <net/ip.h>
-
-+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
-+
- /*
- * Allocate and initialize a new local port bind bucket.
- * The bindhash mutex for snum's hash chain must be held here.
-@@ -530,6 +533,8 @@ ok:
- twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
- spin_unlock(&head->lock);
-
-+ gr_update_task_in_ip_table(inet_sk(sk));
-+
- if (tw) {
- inet_twsk_deschedule(tw, death_row);
- while (twrefcnt) {
-diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
-index 9d74cc7..5a73694 100644
---- a/net/ipv4/inetpeer.c
-+++ b/net/ipv4/inetpeer.c
-@@ -473,7 +473,7 @@ relookup:
- if (p) {
- p->daddr = *daddr;
- atomic_set(&p->refcnt, 1);
-- atomic_set(&p->rid, 0);
-+ atomic_set_unchecked(&p->rid, 0);
- p->tcp_ts_stamp = 0;
- p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
- p->rate_tokens = 0;
-diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
-index 16e25a4..cbb0cd5 100644
---- a/net/ipv4/ip_fragment.c
-+++ b/net/ipv4/ip_fragment.c
-@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
- return 0;
-
- start = qp->rid;
-- end = atomic_inc_return(&peer->rid);
-+ end = atomic_inc_return_unchecked(&peer->rid);
- qp->rid = end;
-
- rc = qp->q.fragments && (end - start) > max;
-@@ -776,21 +776,21 @@ static struct ctl_table ip4_frags_ctl_table[] = {
-
- static int __net_init ip4_frags_ns_ctl_register(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table = NULL;
- struct ctl_table_header *hdr;
-
-- table = ip4_frags_ns_ctl_table;
- if (!net_eq(net, &init_net)) {
-- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
-+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
- if (table == NULL)
- goto err_alloc;
-
- table[0].data = &net->ipv4.frags.high_thresh;
- table[1].data = &net->ipv4.frags.low_thresh;
- table[2].data = &net->ipv4.frags.timeout;
-- }
-+ hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
-+ } else
-+ hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, ip4_frags_ns_ctl_table);
-
-- hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
- if (hdr == NULL)
- goto err_reg;
-
-@@ -798,8 +798,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
- return 0;
-
- err_reg:
-- if (!net_eq(net, &init_net))
-- kfree(table);
-+ kfree(table);
- err_alloc:
- return -ENOMEM;
- }
-diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
-index 5f28fab..ebd7a97 100644
---- a/net/ipv4/ip_gre.c
-+++ b/net/ipv4/ip_gre.c
-@@ -118,7 +118,7 @@
- Alexey Kuznetsov.
- */
-
--static struct rtnl_link_ops ipgre_link_ops __read_mostly;
-+static struct rtnl_link_ops ipgre_link_ops;
- static int ipgre_tunnel_init(struct net_device *dev);
- static void ipgre_tunnel_setup(struct net_device *dev);
- static int ipgre_tunnel_bind_dev(struct net_device *dev);
-@@ -1669,7 +1669,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
- [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
- };
-
--static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
-+static struct rtnl_link_ops ipgre_link_ops = {
- .kind = "gre",
- .maxtype = IFLA_GRE_MAX,
- .policy = ipgre_policy,
-@@ -1682,7 +1682,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
- .fill_info = ipgre_fill_info,
- };
-
--static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
-+static struct rtnl_link_ops ipgre_tap_ops = {
- .kind = "gretap",
- .maxtype = IFLA_GRE_MAX,
- .policy = ipgre_policy,
-diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
-index 073a9b0..8c29a4f 100644
---- a/net/ipv4/ip_input.c
-+++ b/net/ipv4/ip_input.c
-@@ -145,6 +145,10 @@
- #include <linux/mroute.h>
- #include <linux/netlink.h>
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-+
- /*
- * Process Router Attention IP option (RFC 2113)
- */
-@@ -233,6 +237,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
- if (!raw) {
- if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
-+#endif
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_PROT_UNREACH, 0);
- }
-diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index 043d882..d938245 100644
---- a/net/ipv4/ip_sockglue.c
-+++ b/net/ipv4/ip_sockglue.c
-@@ -1117,7 +1117,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
- len = min_t(unsigned int, len, opt->optlen);
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, opt->__data, len))
-+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
-+ copy_to_user(optval, opt->__data, len))
- return -EFAULT;
- return 0;
- }
-@@ -1245,7 +1246,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
- if (sk->sk_type != SOCK_STREAM)
- return -ENOPROTOOPT;
-
-- msg.msg_control = optval;
-+ msg.msg_control = (void __force_kernel *)optval;
- msg.msg_controllen = len;
- msg.msg_flags = flags;
-
-diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
-index efb1ff5..bc97573 100644
---- a/net/ipv4/ipconfig.c
-+++ b/net/ipv4/ipconfig.c
-@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
-- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
-+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
- set_fs(oldfs);
- return res;
- }
-@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
-- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
-+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
- set_fs(oldfs);
- return res;
- }
-@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
-
- mm_segment_t oldfs = get_fs();
- set_fs(get_ds());
-- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
-+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
- set_fs(oldfs);
- return res;
- }
-diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
-index 7993d16..8954007 100644
---- a/net/ipv4/ipmr.c
-+++ b/net/ipv4/ipmr.c
-@@ -1323,6 +1323,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
- if (get_user(v, (u32 __user *)optval))
- return -EFAULT;
-
-+ /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
-+ if (v != RT_TABLE_DEFAULT && v >= 1000000000)
-+ return -EINVAL;
-+
- rtnl_lock();
- ret = 0;
- if (sk == rtnl_dereference(mrt->mroute_sk)) {
-diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
-index bcb6e61..5c995cd 100644
---- a/net/ipv4/netfilter/arp_tables.c
-+++ b/net/ipv4/netfilter/arp_tables.c
-@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
- #endif
-
- static int get_info(struct net *net, void __user *user,
-- const int *len, int compat)
-+ int len, int compat)
- {
- char name[XT_TABLE_MAXNAMELEN];
- struct xt_table *t;
- int ret;
-
-- if (*len != sizeof(struct arpt_getinfo)) {
-- duprintf("length %u != %Zu\n", *len,
-+ if (len != sizeof(struct arpt_getinfo)) {
-+ duprintf("length %u != %Zu\n", len,
- sizeof(struct arpt_getinfo));
- return -EINVAL;
- }
-@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
- info.size = private->size;
- strcpy(info.name, name);
-
-- if (copy_to_user(user, &info, *len) != 0)
-+ if (copy_to_user(user, &info, len) != 0)
- ret = -EFAULT;
- else
- ret = 0;
-@@ -1685,7 +1685,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
-
- switch (cmd) {
- case ARPT_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 1);
-+ ret = get_info(sock_net(sk), user, *len, 1);
- break;
- case ARPT_SO_GET_ENTRIES:
- ret = compat_get_entries(sock_net(sk), user, len);
-@@ -1730,7 +1730,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
-
- switch (cmd) {
- case ARPT_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 0);
-+ ret = get_info(sock_net(sk), user, *len, 0);
- break;
-
- case ARPT_SO_GET_ENTRIES:
-diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
-index f98a1cf..b05baff 100644
---- a/net/ipv4/netfilter/ip_tables.c
-+++ b/net/ipv4/netfilter/ip_tables.c
-@@ -1069,14 +1069,14 @@ static int compat_table_info(const struct xt_table_info *info,
- #endif
-
- static int get_info(struct net *net, void __user *user,
-- const int *len, int compat)
-+ int len, int compat)
- {
- char name[XT_TABLE_MAXNAMELEN];
- struct xt_table *t;
- int ret;
-
-- if (*len != sizeof(struct ipt_getinfo)) {
-- duprintf("length %u != %zu\n", *len,
-+ if (len != sizeof(struct ipt_getinfo)) {
-+ duprintf("length %u != %zu\n", len,
- sizeof(struct ipt_getinfo));
- return -EINVAL;
- }
-@@ -1113,7 +1113,7 @@ static int get_info(struct net *net, void __user *user,
- info.size = private->size;
- strcpy(info.name, name);
-
-- if (copy_to_user(user, &info, *len) != 0)
-+ if (copy_to_user(user, &info, len) != 0)
- ret = -EFAULT;
- else
- ret = 0;
-@@ -1969,7 +1969,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-
- switch (cmd) {
- case IPT_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 1);
-+ ret = get_info(sock_net(sk), user, *len, 1);
- break;
- case IPT_SO_GET_ENTRIES:
- ret = compat_get_entries(sock_net(sk), user, len);
-@@ -2016,7 +2016,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-
- switch (cmd) {
- case IPT_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 0);
-+ ret = get_info(sock_net(sk), user, *len, 0);
- break;
-
- case IPT_SO_GET_ENTRIES:
-diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
-index a639967..8f44480 100644
---- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
-+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
-@@ -707,7 +707,7 @@ static int __init clusterip_tg_init(void)
- goto cleanup_target;
-
- #ifdef CONFIG_PROC_FS
-- clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
-+ clusterip_procdir = proc_mkdir_restrict("ipt_CLUSTERIP", init_net.proc_net);
- if (!clusterip_procdir) {
- pr_err("Unable to proc dir entry\n");
- ret = -ENOMEM;
-diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index 7aa6225..31e741e 100644
---- a/net/ipv4/ping.c
-+++ b/net/ipv4/ping.c
-@@ -851,7 +851,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
- sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
- atomic_read(&sp->sk_refcnt), sp,
-- atomic_read(&sp->sk_drops), len);
-+ atomic_read_unchecked(&sp->sk_drops), len);
- }
-
- static int ping_seq_show(struct seq_file *seq, void *v)
-diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
-index f7fdbe9..63740b7 100644
---- a/net/ipv4/proc.c
-+++ b/net/ipv4/proc.c
-@@ -487,7 +487,7 @@ static __net_exit void ip_proc_exit_net(struct net *net)
- proc_net_remove(net, "sockstat");
- }
-
--static __net_initdata struct pernet_operations ip_proc_ops = {
-+static __net_initconst struct pernet_operations ip_proc_ops = {
- .init = ip_proc_init_net,
- .exit = ip_proc_exit_net,
- };
-diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
-index 063bcd5..2402343 100644
---- a/net/ipv4/raw.c
-+++ b/net/ipv4/raw.c
-@@ -305,7 +305,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
- int raw_rcv(struct sock *sk, struct sk_buff *skb)
- {
- if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -741,16 +741,20 @@ static int raw_init(struct sock *sk)
-
- static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
- {
-+ struct icmp_filter filter;
-+
- if (optlen > sizeof(struct icmp_filter))
- optlen = sizeof(struct icmp_filter);
-- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
-+ if (copy_from_user(&filter, optval, optlen))
- return -EFAULT;
-+ raw_sk(sk)->filter = filter;
- return 0;
- }
-
- static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
- {
- int len, ret = -EFAULT;
-+ struct icmp_filter filter;
-
- if (get_user(len, optlen))
- goto out;
-@@ -760,8 +764,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
- if (len > sizeof(struct icmp_filter))
- len = sizeof(struct icmp_filter);
- ret = -EFAULT;
-- if (put_user(len, optlen) ||
-- copy_to_user(optval, &raw_sk(sk)->filter, len))
-+ filter = raw_sk(sk)->filter;
-+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
- goto out;
- ret = 0;
- out: return ret;
-@@ -989,7 +993,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
- sk_wmem_alloc_get(sp),
- sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
-- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
-+ atomic_read(&sp->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sp,
-+#endif
-+ atomic_read_unchecked(&sp->sk_drops));
- }
-
- static int raw_seq_show(struct seq_file *seq, void *v)
-@@ -1052,7 +1062,7 @@ static __net_exit void raw_exit_net(struct net *net)
- proc_net_remove(net, "raw");
- }
-
--static __net_initdata struct pernet_operations raw_net_ops = {
-+static __net_initconst struct pernet_operations raw_net_ops = {
- .init = raw_init_net,
- .exit = raw_exit_net,
- };
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 8e79a9e..3767dfd 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -316,7 +316,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
-
- static inline int rt_genid(struct net *net)
- {
-- return atomic_read(&net->ipv4.rt_genid);
-+ return atomic_read_unchecked(&net->ipv4.rt_genid);
- }
-
- #ifdef CONFIG_PROC_FS
-@@ -554,7 +554,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
-
- static int rt_cpu_seq_open(struct inode *inode, struct file *file)
- {
-- return seq_open(file, &rt_cpu_seq_ops);
-+ return seq_open_restrict(file, &rt_cpu_seq_ops);
- }
-
- static const struct file_operations rt_cpu_seq_fops = {
-@@ -592,7 +592,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
-
- static int rt_acct_proc_open(struct inode *inode, struct file *file)
- {
-- return single_open(file, rt_acct_proc_show, NULL);
-+ return single_open_restrict(file, rt_acct_proc_show, NULL);
- }
-
- static const struct file_operations rt_acct_proc_fops = {
-@@ -644,7 +644,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
- #endif
- }
-
--static struct pernet_operations ip_rt_proc_ops __net_initdata = {
-+static struct pernet_operations ip_rt_proc_ops __net_initconst = {
- .init = ip_rt_do_proc_init,
- .exit = ip_rt_do_proc_exit,
- };
-@@ -940,7 +940,7 @@ static void rt_cache_invalidate(struct net *net)
- unsigned char shuffle;
-
- get_random_bytes(&shuffle, sizeof(shuffle));
-- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
-+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
- redirect_genid++;
- inetpeer_invalidate_tree(AF_INET);
- }
-@@ -1372,11 +1372,11 @@ void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
-
- #define IP_IDENTS_SZ 2048u
- struct ip_ident_bucket {
-- atomic_t id;
-+ atomic_unchecked_t id;
- u32 stamp32;
- };
-
--static struct ip_ident_bucket *ip_idents __read_mostly;
-+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
-
- /* In order to protect privacy, we add a perturbation to identifiers
- * if one generator is seldom used. This makes hard for an attacker
-@@ -1396,7 +1396,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
- delta = (u32)(x >> 32);
- }
-
-- return atomic_add_return(segs + delta, &bucket->id) - segs;
-+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
- }
- EXPORT_SYMBOL(ip_idents_reserve);
-
-@@ -3254,7 +3254,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
- {
- if (write) {
- int flush_delay;
-- ctl_table ctl;
-+ ctl_table_no_const ctl;
- struct net *net;
-
- memcpy(&ctl, __ctl, sizeof(ctl));
-@@ -3403,6 +3403,7 @@ static struct ctl_table ipv4_route_flush_table[] = {
- .maxlen = sizeof(int),
- .mode = 0200,
- .proc_handler = ipv4_sysctl_rtcache_flush,
-+ .extra1 = &init_net,
- },
- { },
- };
-@@ -3416,25 +3417,23 @@ static __net_initdata struct ctl_path ipv4_route_path[] = {
-
- static __net_init int sysctl_route_net_init(struct net *net)
- {
-- struct ctl_table *tbl;
-+ ctl_table_no_const *tbl = NULL;
-
-- tbl = ipv4_route_flush_table;
- if (!net_eq(net, &init_net)) {
-- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
-+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
- if (tbl == NULL)
- goto err_dup;
-- }
-- tbl[0].extra1 = net;
-
-- net->ipv4.route_hdr =
-- register_net_sysctl_table(net, ipv4_route_path, tbl);
-+ net->ipv4.route_hdr = register_net_sysctl_table(net, ipv4_route_path, tbl);
-+ } else
-+ net->ipv4.route_hdr = register_net_sysctl_table(net, ipv4_route_path, ipv4_route_flush_table);
-+
- if (net->ipv4.route_hdr == NULL)
- goto err_reg;
- return 0;
-
- err_reg:
-- if (tbl != ipv4_route_flush_table)
-- kfree(tbl);
-+ kfree(tbl);
- err_dup:
- return -ENOMEM;
- }
-@@ -3449,7 +3448,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
- kfree(tbl);
- }
-
--static __net_initdata struct pernet_operations sysctl_route_ops = {
-+static __net_initconst struct pernet_operations sysctl_route_ops = {
- .init = sysctl_route_net_init,
- .exit = sysctl_route_net_exit,
- };
-@@ -3464,7 +3463,7 @@ static __net_init int rt_genid_init(struct net *net)
- return 0;
- }
-
--static __net_initdata struct pernet_operations rt_genid_ops = {
-+static __net_initconst struct pernet_operations rt_genid_ops = {
- .init = rt_genid_init,
- };
-
-@@ -3487,11 +3486,7 @@ int __init ip_rt_init(void)
- {
- int rc = 0;
-
-- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
-- if (!ip_idents)
-- panic("IP: failed to allocate ip_idents\n");
--
-- get_random_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
-+ get_random_bytes(ip_idents, sizeof(ip_idents));
-
- #ifdef CONFIG_IP_ROUTE_CLASSID
- ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
-diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
-index 8a1bed2..d41ac11 100644
---- a/net/ipv4/syncookies.c
-+++ b/net/ipv4/syncookies.c
-@@ -89,8 +89,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
-
-
- static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
-- __be16 dport, __u32 sseq, __u32 count,
-- __u32 data)
-+ __be16 dport, __u32 sseq, __u32 data)
- {
- /*
- * Compute the secure sequence number.
-@@ -102,7 +101,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
- * As an extra hack, we add a small "data" value that encodes the
- * MSS into the second hash value.
- */
--
-+ u32 count = tcp_cookie_time();
- return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
- sseq + (count << COOKIEBITS) +
- ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
-@@ -114,22 +113,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
- * If the syncookie is bad, the data returned will be out of
- * range. This must be checked by the caller.
- *
-- * The count value used to generate the cookie must be within
-- * "maxdiff" if the current (passed-in) "count". The return value
-- * is (__u32)-1 if this test fails.
-+ * The count value used to generate the cookie must be less than
-+ * MAX_SYNCOOKIE_AGE minutes in the past.
-+ * The return value (__u32)-1 if this test fails.
- */
- static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
-- __be16 sport, __be16 dport, __u32 sseq,
-- __u32 count, __u32 maxdiff)
-+ __be16 sport, __be16 dport, __u32 sseq)
- {
-- __u32 diff;
-+ u32 diff, count = tcp_cookie_time();
-
- /* Strip away the layers from the cookie */
- cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
-
- /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
- diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
-- if (diff >= maxdiff)
-+ if (diff >= MAX_SYNCOOKIE_AGE)
- return (__u32)-1;
-
- return (cookie -
-@@ -138,22 +136,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
- }
-
- /*
-- * MSS Values are taken from the 2009 paper
-- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
-- * - values 1440 to 1460 accounted for 80% of observed mss values
-- * - values outside the 536-1460 range are rare (<0.2%).
-+ * MSS Values are chosen based on the 2011 paper
-+ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
-+ * Values ..
-+ * .. lower than 536 are rare (< 0.2%)
-+ * .. between 537 and 1299 account for less than < 1.5% of observed values
-+ * .. in the 1300-1349 range account for about 15 to 20% of observed mss values
-+ * .. exceeding 1460 are very rare (< 0.04%)
- *
-- * Table must be sorted.
-+ * 1460 is the single most frequently announced mss value (30 to 46% depending
-+ * on monitor location). Table must be sorted.
- */
- static __u16 const msstab[] = {
-- 64,
-- 512,
- 536,
-- 1024,
-- 1440,
-+ 1300,
-+ 1440, /* 1440, 1452: PPPoE */
- 1460,
-- 4312,
-- 8960,
- };
-
- /*
-@@ -178,17 +176,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
-
- return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
- th->source, th->dest, ntohl(th->seq),
-- jiffies / (HZ * 60), mssind);
-+ mssind);
- }
-
- /*
-- * This (misnamed) value is the age of syncookie which is permitted.
-- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
-- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
-- * backoff) to compute at runtime so it's currently hardcoded here.
-- */
--#define COUNTER_TRIES 4
--/*
- * Check if a ack sequence number is a valid syncookie.
- * Return the decoded mss if it is, or 0 if not.
- */
-@@ -198,9 +189,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
- const struct tcphdr *th = tcp_hdr(skb);
- __u32 seq = ntohl(th->seq) - 1;
- __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
-- th->source, th->dest, seq,
-- jiffies / (HZ * 60),
-- COUNTER_TRIES);
-+ th->source, th->dest, seq);
-
- return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
- }
-diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
-index 253dd80..ceba12a 100644
---- a/net/ipv4/sysctl_net_ipv4.c
-+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -53,7 +53,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
- {
- int ret;
- int range[2];
-- ctl_table tmp = {
-+ ctl_table_no_const tmp = {
- .data = &range,
- .maxlen = sizeof(range),
- .mode = table->mode,
-@@ -104,7 +104,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
- {
- int ret;
- gid_t range[2];
-- ctl_table tmp = {
-+ ctl_table_no_const tmp = {
- .data = &range,
- .maxlen = sizeof(range),
- .mode = table->mode,
-@@ -125,7 +125,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
- {
- char val[TCP_CA_NAME_MAX];
-- ctl_table tbl = {
-+ ctl_table_no_const tbl = {
- .data = val,
- .maxlen = TCP_CA_NAME_MAX,
- };
-@@ -144,7 +144,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
-- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
-+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
- int ret;
-
- tbl.data = kmalloc(tbl.maxlen, GFP_USER);
-@@ -161,7 +161,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
-- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
-+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
- int ret;
-
- tbl.data = kmalloc(tbl.maxlen, GFP_USER);
-@@ -361,7 +361,7 @@ static struct ctl_table ipv4_table[] = {
- },
- {
- .procname = "ip_local_reserved_ports",
-- .data = NULL, /* initialized in sysctl_ipv4_init */
-+ .data = sysctl_local_reserved_ports,
- .maxlen = 65536,
- .mode = 0644,
- .proc_handler = proc_do_large_bitmap,
-@@ -746,11 +746,10 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
-
- static __net_init int ipv4_sysctl_init_net(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table = NULL;
-
-- table = ipv4_net_table;
- if (!net_eq(net, &init_net)) {
-- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
-+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
- if (table == NULL)
- goto err_alloc;
-
-@@ -782,16 +781,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
-
- net->ipv4.sysctl_rt_cache_rebuild_count = 4;
-
-- net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
-- net_ipv4_ctl_path, table);
-+ if (!net_eq(net, &init_net))
-+ net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
-+ else
-+ net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, ipv4_net_table);
- if (net->ipv4.ipv4_hdr == NULL)
- goto err_reg;
-
- return 0;
-
- err_reg:
-- if (!net_eq(net, &init_net))
-- kfree(table);
-+ kfree(table);
- err_alloc:
- return -ENOMEM;
- }
-@@ -805,7 +805,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
- kfree(table);
- }
-
--static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
-+static __net_initconst struct pernet_operations ipv4_sysctl_ops = {
- .init = ipv4_sysctl_init_net,
- .exit = ipv4_sysctl_exit_net,
- };
-@@ -813,16 +813,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
- static __init int sysctl_ipv4_init(void)
- {
- struct ctl_table_header *hdr;
-- struct ctl_table *i;
--
-- for (i = ipv4_table; i->procname; i++) {
-- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
-- i->data = sysctl_local_reserved_ports;
-- break;
-- }
-- }
-- if (!i->procname)
-- return -EINVAL;
-
- hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
- if (hdr == NULL)
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index afe6886..297e5fb 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -4739,7 +4739,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
- * simplifies code)
- */
- static void
--tcp_collapse(struct sock *sk, struct sk_buff_head *list,
-+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
- struct sk_buff *head, struct sk_buff *tail,
- u32 start, u32 end)
- {
-@@ -5554,6 +5554,9 @@ slow_path:
- if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
- goto csum_error;
-
-+ if (!th->ack && !th->rst)
-+ goto discard;
-+
- /*
- * Standard slow path.
- */
-@@ -5562,8 +5565,7 @@ slow_path:
- return 0;
-
- step5:
-- if (th->ack &&
-- tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
-+ if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
- goto discard;
-
- tcp_rcv_rtt_measure_ts(sk, skb);
-@@ -5794,6 +5796,7 @@ discard:
- tcp_paws_reject(&tp->rx_opt, 0))
- goto discard_and_undo;
-
-+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
- if (th->syn) {
- /* We see SYN without ACK. It is attempt of
- * simultaneous connect with crossed SYNs.
-@@ -5842,6 +5845,7 @@ discard:
- goto discard;
- #endif
- }
-+#endif
- /* "fifth, if neither of the SYN or RST bits is set then
- * drop the segment and return."
- */
-@@ -5885,7 +5889,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- goto discard;
-
- if (th->syn) {
-- if (th->fin)
-+ if (th->fin || th->urg || th->psh)
- goto discard;
- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
- return 1;
-@@ -5924,11 +5928,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- return 0;
- }
-
-+ if (!th->ack && !th->rst)
-+ goto discard;
-+
- if (!tcp_validate_incoming(sk, skb, th, 0))
- return 0;
-
- /* step 5: check the ACK field */
-- if (th->ack) {
-+ if (true) {
- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT) > 0;
-
-@@ -6034,8 +6041,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- }
- break;
- }
-- } else
-- goto discard;
-+ }
-
- /* step 6: check the URG bit */
- tcp_urg(sk, skb, th);
-diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index b4e0eb4..4df4e3a 100644
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
- int sysctl_tcp_low_latency __read_mostly;
- EXPORT_SYMBOL(sysctl_tcp_low_latency);
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-
- #ifdef CONFIG_TCP_MD5SIG
- static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
-@@ -1631,6 +1634,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
- return 0;
-
- reset:
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole)
-+#endif
- tcp_v4_send_reset(rsk, skb);
- discard:
- kfree_skb(skb);
-@@ -1693,12 +1699,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
- TCP_SKB_CB(skb)->sacked = 0;
-
- sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
-- if (!sk)
-+ if (!sk) {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ ret = 1;
-+#endif
- goto no_tcp_socket;
--
-+ }
- process:
-- if (sk->sk_state == TCP_TIME_WAIT)
-+ if (sk->sk_state == TCP_TIME_WAIT) {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ ret = 2;
-+#endif
- goto do_time_wait;
-+ }
-
- if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
- NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
-@@ -1748,6 +1761,10 @@ no_tcp_socket:
- bad_packet:
- TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
- } else {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole || (ret == 1 &&
-+ (skb->dev->flags & IFF_LOOPBACK)))
-+#endif
- tcp_v4_send_reset(NULL, skb);
- }
-
-@@ -2408,7 +2425,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
- 0, /* non standard timer */
- 0, /* open_requests have no inode */
- atomic_read(&sk->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
- req,
-+#endif
- len);
- }
-
-@@ -2458,7 +2479,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
- sock_i_uid(sk),
- icsk->icsk_probes_out,
- sock_i_ino(sk),
-- atomic_read(&sk->sk_refcnt), sk,
-+ atomic_read(&sk->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sk,
-+#endif
- jiffies_to_clock_t(icsk->icsk_rto),
- jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
-@@ -2486,7 +2512,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
- i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
- 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
-- atomic_read(&tw->tw_refcnt), tw, len);
-+ atomic_read(&tw->tw_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ tw,
-+#endif
-+ len);
- }
-
- #define TMPSZ 150
-@@ -2657,7 +2689,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
- inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
- }
-
--static struct pernet_operations __net_initdata tcp_sk_ops = {
-+static struct pernet_operations __net_initconst tcp_sk_ops = {
- .init = tcp_sk_init,
- .exit = tcp_sk_exit,
- .exit_batch = tcp_sk_exit_batch,
-diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
-index 00e1530..47b4f16 100644
---- a/net/ipv4/tcp_minisocks.c
-+++ b/net/ipv4/tcp_minisocks.c
-@@ -27,6 +27,10 @@
- #include <net/inet_common.h>
- #include <net/xfrm.h>
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-+
- int sysctl_tcp_syncookies __read_mostly = 1;
- EXPORT_SYMBOL(sysctl_tcp_syncookies);
-
-@@ -746,6 +750,10 @@ listen_overflow:
-
- embryonic_reset:
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
-+
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole)
-+#endif
- if (!(flg & TCP_FLAG_RST))
- req->rsk_ops->send_reset(sk, skb);
-
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index e614810..c4f2ee7 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -1319,7 +1319,7 @@ static void tcp_cwnd_validate(struct sock *sk)
- * modulo only when the receiver window alone is the limiting factor or
- * when we would be allowed to send the split-due-to-Nagle skb fully.
- */
--static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
-+static unsigned int __intentional_overflow(0) tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int cwnd)
- {
- const struct tcp_sock *tp = tcp_sk(sk);
-diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
-index 85ee7eb..53277ab 100644
---- a/net/ipv4/tcp_probe.c
-+++ b/net/ipv4/tcp_probe.c
-@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
- if (cnt + width >= len)
- break;
-
-- if (copy_to_user(buf + cnt, tbuf, width))
-+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
- return -EFAULT;
- cnt += width;
- }
-diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
-index 2e0f0af..e2948bf 100644
---- a/net/ipv4/tcp_timer.c
-+++ b/net/ipv4/tcp_timer.c
-@@ -22,6 +22,10 @@
- #include <linux/gfp.h>
- #include <net/tcp.h>
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_lastack_retries;
-+#endif
-+
- int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
- int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
- int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
-@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
- }
- }
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if ((sk->sk_state == TCP_LAST_ACK) &&
-+ (grsec_lastack_retries > 0) &&
-+ (grsec_lastack_retries < retry_until))
-+ retry_until = grsec_lastack_retries;
-+#endif
-+
- if (retransmits_timed_out(sk, retry_until,
- syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
- /* Has it gone just too far? */
-diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 8c2e259..90d7b4e 100644
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -86,6 +86,7 @@
- #include <linux/types.h>
- #include <linux/fcntl.h>
- #include <linux/module.h>
-+#include <linux/security.h>
- #include <linux/socket.h>
- #include <linux/sockios.h>
- #include <linux/igmp.h>
-@@ -108,6 +109,10 @@
- #include <trace/events/udp.h>
- #include "udp_impl.h"
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-+
- struct udp_table udp_table __read_mostly;
- EXPORT_SYMBOL(udp_table);
-
-@@ -565,6 +570,9 @@ found:
- return s;
- }
-
-+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
-+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
-+
- /*
- * This routine is called by the ICMP module when it gets some
- * sort of error condition. If err < 0 then the socket should
-@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
- dport = usin->sin_port;
- if (dport == 0)
- return -EINVAL;
-+
-+ err = gr_search_udp_sendmsg(sk, usin);
-+ if (err)
-+ return err;
- } else {
- if (sk->sk_state != TCP_ESTABLISHED)
- return -EDESTADDRREQ;
-+
-+ err = gr_search_udp_sendmsg(sk, NULL);
-+ if (err)
-+ return err;
-+
- daddr = inet->inet_daddr;
- dport = inet->inet_dport;
- /* Open fast path for connected socket.
-@@ -1103,7 +1120,7 @@ static unsigned int first_packet_length(struct sock *sk)
- udp_lib_checksum_complete(skb)) {
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
- IS_UDPLITE(sk));
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- __skb_unlink(skb, rcvq);
- __skb_queue_tail(&list_kill, skb);
- }
-@@ -1183,6 +1200,10 @@ try_again:
- if (!skb)
- goto out;
-
-+ err = gr_search_udp_recvmsg(sk, skb);
-+ if (err)
-+ goto out_free;
-+
- ulen = skb->len - sizeof(struct udphdr);
- copied = len;
- if (copied > ulen)
-@@ -1248,10 +1269,8 @@ csum_copy_err:
- UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- unlock_sock_fast(sk, slow);
-
-- if (noblock)
-- return -EAGAIN;
--
-- /* starting over for a new packet */
-+ /* starting over for a new packet, but check if we need to yield */
-+ cond_resched();
- msg->msg_flags &= ~MSG_TRUNC;
- goto try_again;
- }
-@@ -1486,7 +1505,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
-
- drop:
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- kfree_skb(skb);
- return -1;
- }
-@@ -1505,7 +1524,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
- skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-
- if (!skb1) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
- IS_UDPLITE(sk));
- UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -1674,6 +1693,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
- goto csum_error;
-
- UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
-+#endif
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-
- /*
-@@ -2097,8 +2119,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
- sk_wmem_alloc_get(sp),
- sk_rmem_alloc_get(sp),
- 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
-- atomic_read(&sp->sk_refcnt), sp,
-- atomic_read(&sp->sk_drops), len);
-+ atomic_read(&sp->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sp,
-+#endif
-+ atomic_read_unchecked(&sp->sk_drops), len);
- }
-
- int udp4_seq_show(struct seq_file *seq, void *v)
-diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
-index a0b4c5d..a5818a1 100644
---- a/net/ipv4/xfrm4_policy.c
-+++ b/net/ipv4/xfrm4_policy.c
-@@ -190,11 +190,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
- fl4->flowi4_tos = iph->tos;
- }
-
--static inline int xfrm4_garbage_collect(struct dst_ops *ops)
-+static int xfrm4_garbage_collect(struct dst_ops *ops)
- {
- struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
-
-- xfrm4_policy_afinfo.garbage_collect(net);
-+ xfrm_garbage_collect_deferred(net);
- return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
- }
-
-diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index 3afdd78..2f630fb 100644
---- a/net/ipv6/addrconf.c
-+++ b/net/ipv6/addrconf.c
-@@ -169,7 +169,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
- .hop_limit = IPV6_DEFAULT_HOPLIMIT,
- .mtu6 = IPV6_MIN_MTU,
- .accept_ra = 1,
-- .accept_redirects = 1,
-+ .accept_redirects = 0,
- .autoconf = 1,
- .force_mld_version = 0,
- .dad_transmits = 1,
-@@ -204,7 +204,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
- .hop_limit = IPV6_DEFAULT_HOPLIMIT,
- .mtu6 = IPV6_MIN_MTU,
- .accept_ra = 1,
-- .accept_redirects = 1,
-+ .accept_redirects = 0,
- .autoconf = 1,
- .dad_transmits = 1,
- .rtr_solicits = MAX_RTR_SOLICITATIONS,
-@@ -2160,7 +2160,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
- p.iph.ihl = 5;
- p.iph.protocol = IPPROTO_IPV6;
- p.iph.ttl = 64;
-- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
-+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
-
- if (ops->ndo_do_ioctl) {
- mm_segment_t oldfs = get_fs();
-@@ -3237,16 +3237,23 @@ static const struct file_operations if6_fops = {
- .release = seq_release_net,
- };
-
-+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
-+extern void unregister_ipv6_seq_ops_addr(void);
-+
- static int __net_init if6_proc_net_init(struct net *net)
- {
-- if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
-+ register_ipv6_seq_ops_addr(&if6_seq_ops);
-+ if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) {
-+ unregister_ipv6_seq_ops_addr();
- return -ENOMEM;
-+ }
- return 0;
- }
-
- static void __net_exit if6_proc_net_exit(struct net *net)
- {
-- proc_net_remove(net, "if_inet6");
-+ proc_net_remove(net, "if_inet6");
-+ unregister_ipv6_seq_ops_addr();
- }
-
- static struct pernet_operations if6_proc_net_ops = {
-diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
-index 65dd543..e6c6e6d 100644
---- a/net/ipv6/esp6.c
-+++ b/net/ipv6/esp6.c
-@@ -164,8 +164,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
- struct esp_data *esp = x->data;
-
- /* skb is pure payload to encrypt */
-- err = -ENOMEM;
--
- aead = esp->aead;
- alen = crypto_aead_authsize(aead);
-
-@@ -200,8 +198,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
- }
-
- tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
-- if (!tmp)
-+ if (!tmp) {
-+ err = -ENOMEM;
- goto error;
-+ }
-
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
-@@ -419,7 +419,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
- net_adj = 0;
-
- return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
-- net_adj) & ~(align - 1)) + (net_adj - 2);
-+ net_adj) & ~(align - 1)) + net_adj - 2;
- }
-
- static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
-index ceced67..72c8c1e 100644
---- a/net/ipv6/icmp.c
-+++ b/net/ipv6/icmp.c
-@@ -969,7 +969,7 @@ ctl_table ipv6_icmp_table_template[] = {
-
- struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(ipv6_icmp_table_template,
- sizeof(ipv6_icmp_table_template),
-diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
-index 1567fb1..29af910 100644
---- a/net/ipv6/inet6_connection_sock.c
-+++ b/net/ipv6/inet6_connection_sock.c
-@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
- #ifdef CONFIG_XFRM
- {
- struct rt6_info *rt = (struct rt6_info *)dst;
-- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
-+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
- }
- #endif
- }
-@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
- #ifdef CONFIG_XFRM
- if (dst) {
- struct rt6_info *rt = (struct rt6_info *)dst;
-- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
-+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
- __sk_dst_reset(sk);
- dst = NULL;
- }
-diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
-index b204df8..8f274f4 100644
---- a/net/ipv6/ipv6_sockglue.c
-+++ b/net/ipv6/ipv6_sockglue.c
-@@ -961,7 +961,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
- if (sk->sk_type != SOCK_STREAM)
- return -ENOPROTOOPT;
-
-- msg.msg_control = optval;
-+ msg.msg_control = (void __force_kernel *)optval;
- msg.msg_controllen = len;
- msg.msg_flags = flags;
-
-diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
-index 2e752b2..3d54ac42 100644
---- a/net/ipv6/netfilter/ip6_tables.c
-+++ b/net/ipv6/netfilter/ip6_tables.c
-@@ -1091,14 +1091,14 @@ static int compat_table_info(const struct xt_table_info *info,
- #endif
-
- static int get_info(struct net *net, void __user *user,
-- const int *len, int compat)
-+ int len, int compat)
- {
- char name[XT_TABLE_MAXNAMELEN];
- struct xt_table *t;
- int ret;
-
-- if (*len != sizeof(struct ip6t_getinfo)) {
-- duprintf("length %u != %zu\n", *len,
-+ if (len != sizeof(struct ip6t_getinfo)) {
-+ duprintf("length %u != %zu\n", len,
- sizeof(struct ip6t_getinfo));
- return -EINVAL;
- }
-@@ -1135,7 +1135,7 @@ static int get_info(struct net *net, void __user *user,
- info.size = private->size;
- strcpy(info.name, name);
-
-- if (copy_to_user(user, &info, *len) != 0)
-+ if (copy_to_user(user, &info, len) != 0)
- ret = -EFAULT;
- else
- ret = 0;
-@@ -1991,7 +1991,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-
- switch (cmd) {
- case IP6T_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 1);
-+ ret = get_info(sock_net(sk), user, *len, 1);
- break;
- case IP6T_SO_GET_ENTRIES:
- ret = compat_get_entries(sock_net(sk), user, len);
-@@ -2038,7 +2038,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-
- switch (cmd) {
- case IP6T_SO_GET_INFO:
-- ret = get_info(sock_net(sk), user, len, 0);
-+ ret = get_info(sock_net(sk), user, *len, 0);
- break;
-
- case IP6T_SO_GET_ENTRIES:
-diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
-index 1008ce9..db7ea62 100644
---- a/net/ipv6/proc.c
-+++ b/net/ipv6/proc.c
-@@ -307,7 +307,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
- if (!proc_net_fops_create(net, "snmp6", S_IRUGO, &snmp6_seq_fops))
- goto proc_snmp6_fail;
-
-- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
-+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
- if (!net->mib.proc_net_devsnmp6)
- goto proc_dev_snmp6_fail;
- return 0;
-diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
-index 9287f3ea..136d642 100644
---- a/net/ipv6/raw.c
-+++ b/net/ipv6/raw.c
-@@ -109,7 +109,7 @@ found:
- */
- static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
- {
-- struct icmp6hdr *_hdr;
-+ struct icmp6hdr _hdr;
- const struct icmp6hdr *hdr;
-
- hdr = skb_header_pointer(skb, skb_transport_offset(skb),
-@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
- {
- if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
- skb_checksum_complete(skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
- struct raw6_sock *rp = raw6_sk(sk);
-
- if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
-
- if (inet->hdrincl) {
- if (skb_checksum_complete(skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- kfree_skb(skb);
- return NET_RX_DROP;
- }
-@@ -598,7 +598,7 @@ out:
- return err;
- }
-
--static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
-+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
- struct flowi6 *fl6, struct dst_entry **dstp,
- unsigned int flags)
- {
-@@ -908,12 +908,15 @@ do_confirm:
- static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
- char __user *optval, int optlen)
- {
-+ struct icmp6_filter filter;
-+
- switch (optname) {
- case ICMPV6_FILTER:
- if (optlen > sizeof(struct icmp6_filter))
- optlen = sizeof(struct icmp6_filter);
-- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
-+ if (copy_from_user(&filter, optval, optlen))
- return -EFAULT;
-+ raw6_sk(sk)->filter = filter;
- return 0;
- default:
- return -ENOPROTOOPT;
-@@ -926,6 +929,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
- {
- int len;
-+ struct icmp6_filter filter;
-
- switch (optname) {
- case ICMPV6_FILTER:
-@@ -937,7 +941,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
- len = sizeof(struct icmp6_filter);
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
-+ filter = raw6_sk(sk)->filter;
-+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
- return -EFAULT;
- return 0;
- default:
-@@ -1244,7 +1249,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
- 0, 0L, 0,
- sock_i_uid(sp), 0,
- sock_i_ino(sp),
-- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
-+ atomic_read(&sp->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sp,
-+#endif
-+ atomic_read_unchecked(&sp->sk_drops));
- }
-
- static int raw6_seq_show(struct seq_file *seq, void *v)
-diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
-index eba5deb..61e026f 100644
---- a/net/ipv6/reassembly.c
-+++ b/net/ipv6/reassembly.c
-@@ -651,21 +651,21 @@ static struct ctl_table ip6_frags_ctl_table[] = {
-
- static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table = NULL;
- struct ctl_table_header *hdr;
-
-- table = ip6_frags_ns_ctl_table;
- if (!net_eq(net, &init_net)) {
-- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
-+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
- if (table == NULL)
- goto err_alloc;
-
- table[0].data = &net->ipv6.frags.high_thresh;
- table[1].data = &net->ipv6.frags.low_thresh;
- table[2].data = &net->ipv6.frags.timeout;
-- }
-+ hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
-+ } else
-+ hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, ip6_frags_ns_ctl_table);
-
-- hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
- if (hdr == NULL)
- goto err_reg;
-
-@@ -673,8 +673,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
- return 0;
-
- err_reg:
-- if (!net_eq(net, &init_net))
-- kfree(table);
-+ kfree(table);
- err_alloc:
- return -ENOMEM;
- }
-diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index d89d1a6..61d0fe9 100644
---- a/net/ipv6/route.c
-+++ b/net/ipv6/route.c
-@@ -2806,7 +2806,7 @@ ctl_table ipv6_route_table_template[] = {
-
- struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(ipv6_route_table_template,
- sizeof(ipv6_route_table_template),
-diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
-index 5a0d664..0177566a 100644
---- a/net/ipv6/syncookies.c
-+++ b/net/ipv6/syncookies.c
-@@ -27,26 +27,21 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
- #define COOKIEBITS 24 /* Upper bits store count */
- #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-
--/* Table must be sorted. */
-+/* RFC 2460, Section 8.3:
-+ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
-+ *
-+ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
-+ * using higher values than ipv4 tcp syncookies.
-+ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
-+ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
-+ */
- static __u16 const msstab[] = {
-- 64,
-- 512,
-- 536,
-- 1280 - 60,
-+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
- 1480 - 60,
- 1500 - 60,
-- 4460 - 60,
- 9000 - 60,
- };
-
--/*
-- * This (misnamed) value is the age of syncookie which is permitted.
-- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
-- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
-- * backoff) to compute at runtime so it's currently hardcoded here.
-- */
--#define COUNTER_TRIES 4
--
- static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst)
-@@ -89,8 +84,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
- static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __be16 sport, __be16 dport, __u32 sseq,
-- __u32 count, __u32 data)
-+ __u32 data)
- {
-+ u32 count = tcp_cookie_time();
- return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
- sseq + (count << COOKIEBITS) +
- ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
-@@ -99,15 +95,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
-
- static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
- const struct in6_addr *daddr, __be16 sport,
-- __be16 dport, __u32 sseq, __u32 count,
-- __u32 maxdiff)
-+ __be16 dport, __u32 sseq)
- {
-- __u32 diff;
-+ __u32 diff, count = tcp_cookie_time();
-
- cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
-
- diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
-- if (diff >= maxdiff)
-+ if (diff >= MAX_SYNCOOKIE_AGE)
- return (__u32)-1;
-
- return (cookie -
-@@ -133,8 +128,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
- return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
-- th->dest, ntohl(th->seq),
-- jiffies / (HZ * 60), mssind);
-+ th->dest, ntohl(th->seq), mssind);
- }
-
- static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
-@@ -143,8 +137,7 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
- const struct tcphdr *th = tcp_hdr(skb);
- __u32 seq = ntohl(th->seq) - 1;
- __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
-- th->source, th->dest, seq,
-- jiffies / (HZ * 60), COUNTER_TRIES);
-+ th->source, th->dest, seq);
-
- return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
- }
-diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
-index 166a57c..dc4e6b8 100644
---- a/net/ipv6/sysctl_net_ipv6.c
-+++ b/net/ipv6/sysctl_net_ipv6.c
-@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
-
- static int __net_init ipv6_sysctl_net_init(struct net *net)
- {
-- struct ctl_table *ipv6_table;
-+ ctl_table_no_const *ipv6_table;
- struct ctl_table *ipv6_route_table;
- struct ctl_table *ipv6_icmp_table;
- int err;
-diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 655cc60..c49497a 100644
---- a/net/ipv6/tcp_ipv6.c
-+++ b/net/ipv6/tcp_ipv6.c
-@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
- }
- #endif
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-+
- static void tcp_v6_hash(struct sock *sk)
- {
- if (sk->sk_state != TCP_CLOSE) {
-@@ -1652,6 +1656,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
- return 0;
-
- reset:
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole)
-+#endif
- tcp_v6_send_reset(sk, skb);
- discard:
- if (opt_skb)
-@@ -1731,12 +1738,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
- TCP_SKB_CB(skb)->sacked = 0;
-
- sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
-- if (!sk)
-+ if (!sk) {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ ret = 1;
-+#endif
- goto no_tcp_socket;
-+ }
-
- process:
-- if (sk->sk_state == TCP_TIME_WAIT)
-+ if (sk->sk_state == TCP_TIME_WAIT) {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ ret = 2;
-+#endif
- goto do_time_wait;
-+ }
-
- if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
- NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
-@@ -1784,6 +1799,10 @@ no_tcp_socket:
- bad_packet:
- TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
- } else {
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole || (ret == 1 &&
-+ (skb->dev->flags & IFF_LOOPBACK)))
-+#endif
- tcp_v6_send_reset(NULL, skb);
- }
-
-@@ -2044,7 +2063,13 @@ static void get_openreq6(struct seq_file *seq,
- uid,
- 0, /* non standard timer */
- 0, /* open_requests have no inode */
-- 0, req);
-+ 0,
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL
-+#else
-+ req
-+#endif
-+ );
- }
-
- static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
-@@ -2094,7 +2119,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
- sock_i_uid(sp),
- icsk->icsk_probes_out,
- sock_i_ino(sp),
-- atomic_read(&sp->sk_refcnt), sp,
-+ atomic_read(&sp->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sp,
-+#endif
- jiffies_to_clock_t(icsk->icsk_rto),
- jiffies_to_clock_t(icsk->icsk_ack.ato),
- (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
-@@ -2129,7 +2159,13 @@ static void get_timewait6_sock(struct seq_file *seq,
- dest->s6_addr32[2], dest->s6_addr32[3], destp,
- tw->tw_substate, 0, 0,
- 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
-- atomic_read(&tw->tw_refcnt), tw);
-+ atomic_read(&tw->tw_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL
-+#else
-+ tw
-+#endif
-+ );
- }
-
- static int tcp6_seq_show(struct seq_file *seq, void *v)
-diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index d131a95..59d5161 100644
---- a/net/ipv6/udp.c
-+++ b/net/ipv6/udp.c
-@@ -50,6 +50,10 @@
- #include <linux/seq_file.h>
- #include "udp_impl.h"
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+extern int grsec_enable_blackhole;
-+#endif
-+
- int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
- {
- const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
-@@ -451,10 +455,8 @@ csum_copy_err:
- }
- unlock_sock_fast(sk, slow);
-
-- if (noblock)
-- return -EAGAIN;
--
-- /* starting over for a new packet */
-+ /* starting over for a new packet, but check if we need to yield */
-+ cond_resched();
- msg->msg_flags &= ~MSG_TRUNC;
- goto try_again;
- }
-@@ -546,7 +548,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
-
- return 0;
- drop:
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- drop_no_sk_drops_inc:
- UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- kfree_skb(skb);
-@@ -622,7 +624,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
- continue;
- }
- drop:
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- UDP6_INC_STATS_BH(sock_net(sk),
- UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
- UDP6_INC_STATS_BH(sock_net(sk),
-@@ -777,6 +779,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
- UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
- proto == IPPROTO_UDPLITE);
-
-+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
-+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
-+#endif
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
-
- kfree_skb(skb);
-@@ -793,7 +798,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb);
- else if (sk_add_backlog(sk, skb)) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- bh_unlock_sock(sk);
- sock_put(sk);
- goto discard;
-@@ -1409,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
- 0, 0L, 0,
- sock_i_uid(sp), 0,
- sock_i_ino(sp),
-- atomic_read(&sp->sk_refcnt), sp,
-- atomic_read(&sp->sk_drops));
-+ atomic_read(&sp->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sp,
-+#endif
-+ atomic_read_unchecked(&sp->sk_drops));
- }
-
- int udp6_seq_show(struct seq_file *seq, void *v)
-diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index db78e7d..0e6a420 100644
---- a/net/ipv6/xfrm6_policy.c
-+++ b/net/ipv6/xfrm6_policy.c
-@@ -125,8 +125,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
- {
- struct flowi6 *fl6 = &fl->u.ip6;
- int onlyproto = 0;
-- u16 offset = skb_network_header_len(skb);
- const struct ipv6hdr *hdr = ipv6_hdr(skb);
-+ u16 offset = sizeof(*hdr);
- struct ipv6_opt_hdr *exthdr;
- const unsigned char *nh = skb_network_header(skb);
- u8 nexthdr = nh[IP6CB(skb)->nhoff];
-@@ -160,8 +160,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
- case IPPROTO_DCCP:
- if (!onlyproto && (nh + offset + 4 < skb->data ||
- pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
-- __be16 *ports = (__be16 *)exthdr;
-+ __be16 *ports;
-
-+ nh = skb_network_header(skb);
-+ ports = (__be16 *)(nh + offset);
- fl6->fl6_sport = ports[!!reverse];
- fl6->fl6_dport = ports[!reverse];
- }
-@@ -170,8 +172,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
-
- case IPPROTO_ICMPV6:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
-- u8 *icmp = (u8 *)exthdr;
-+ u8 *icmp;
-
-+ nh = skb_network_header(skb);
-+ icmp = (u8 *)(nh + offset);
- fl6->fl6_icmp_type = icmp[0];
- fl6->fl6_icmp_code = icmp[1];
- }
-@@ -182,8 +186,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
- case IPPROTO_MH:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
- struct ip6_mh *mh;
-- mh = (struct ip6_mh *)exthdr;
-
-+ nh = skb_network_header(skb);
-+ mh = (struct ip6_mh *)(nh + offset);
- fl6->fl6_mh_type = mh->ip6mh_type;
- }
- fl6->flowi6_proto = nexthdr;
-@@ -202,11 +207,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
- }
- }
-
--static inline int xfrm6_garbage_collect(struct dst_ops *ops)
-+static int xfrm6_garbage_collect(struct dst_ops *ops)
- {
- struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
-
-- xfrm6_policy_afinfo.garbage_collect(net);
-+ xfrm_garbage_collect_deferred(net);
- return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
- }
-
-diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
-index f8ba30d..927a4aa 100644
---- a/net/ipx/ipx_proc.c
-+++ b/net/ipx/ipx_proc.c
-@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
- struct proc_dir_entry *p;
- int rc = -ENOMEM;
-
-- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
-+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
-
- if (!ipx_proc_dir)
- goto out;
-diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
-index cf368dd..d507600 100644
---- a/net/irda/ircomm/ircomm_tty.c
-+++ b/net/irda/ircomm/ircomm_tty.c
-@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
- add_wait_queue(&self->open_wait, &wait);
-
- IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
-- __FILE__,__LINE__, tty->driver->name, self->open_count );
-+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
-
- /* As far as I can see, we protect open_count - Jean II */
- spin_lock_irqsave(&self->spinlock, flags);
- if (!tty_hung_up_p(filp)) {
- extra_count = 1;
-- self->open_count--;
-+ local_dec(&self->open_count);
- }
- spin_unlock_irqrestore(&self->spinlock, flags);
-- self->blocked_open++;
-+ local_inc(&self->blocked_open);
-
- while (1) {
- if (tty->termios->c_cflag & CBAUD) {
-@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
- }
-
- IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
-- __FILE__,__LINE__, tty->driver->name, self->open_count );
-+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
-
- schedule();
- }
-@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
- if (extra_count) {
- /* ++ is not atomic, so this should be protected - Jean II */
- spin_lock_irqsave(&self->spinlock, flags);
-- self->open_count++;
-+ local_inc(&self->open_count);
- spin_unlock_irqrestore(&self->spinlock, flags);
- }
-- self->blocked_open--;
-+ local_dec(&self->blocked_open);
-
- IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
-- __FILE__,__LINE__, tty->driver->name, self->open_count);
-+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
-
- if (!retval)
- self->flags |= ASYNC_NORMAL_ACTIVE;
-@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
- }
- /* ++ is not atomic, so this should be protected - Jean II */
- spin_lock_irqsave(&self->spinlock, flags);
-- self->open_count++;
-+ local_inc(&self->open_count);
-
- tty->driver_data = self;
- self->tty = tty;
- spin_unlock_irqrestore(&self->spinlock, flags);
-
- IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
-- self->line, self->open_count);
-+ self->line, local_read(&self->open_count));
-
- /* Not really used by us, but lets do it anyway */
- self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
- return;
- }
-
-- if ((tty->count == 1) && (self->open_count != 1)) {
-+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. state->count should always
-@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
- */
- IRDA_DEBUG(0, "%s(), bad serial port count; "
- "tty->count is 1, state->count is %d\n", __func__ ,
-- self->open_count);
-- self->open_count = 1;
-+ local_read(&self->open_count));
-+ local_set(&self->open_count, 1);
- }
-
-- if (--self->open_count < 0) {
-+ if (local_dec_return(&self->open_count) < 0) {
- IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
-- __func__, self->line, self->open_count);
-- self->open_count = 0;
-+ __func__, self->line, local_read(&self->open_count));
-+ local_set(&self->open_count, 0);
- }
-- if (self->open_count) {
-+ if (local_read(&self->open_count)) {
- spin_unlock_irqrestore(&self->spinlock, flags);
-
- IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
-@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
- tty->closing = 0;
- self->tty = NULL;
-
-- if (self->blocked_open) {
-+ if (local_read(&self->blocked_open)) {
- if (self->close_delay)
- schedule_timeout_interruptible(self->close_delay);
- wake_up_interruptible(&self->open_wait);
-@@ -1015,7 +1015,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
- spin_lock_irqsave(&self->spinlock, flags);
- self->flags &= ~ASYNC_NORMAL_ACTIVE;
- self->tty = NULL;
-- self->open_count = 0;
-+ local_set(&self->open_count, 0);
- spin_unlock_irqrestore(&self->spinlock, flags);
-
- wake_up_interruptible(&self->open_wait);
-@@ -1362,7 +1362,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
- seq_putc(m, '\n');
-
- seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
-- seq_printf(m, "Open count: %d\n", self->open_count);
-+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
- seq_printf(m, "Max data size: %d\n", self->max_data_size);
- seq_printf(m, "Max header size: %d\n", self->max_header_size);
-
-diff --git a/net/irda/iriap.c b/net/irda/iriap.c
-index e71e85b..29340a9 100644
---- a/net/irda/iriap.c
-+++ b/net/irda/iriap.c
-@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
- /* case CS_ISO_8859_9: */
- /* case CS_UNICODE: */
- default:
-- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
-- __func__, ias_charset_types[charset]);
-+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
-+ __func__, charset,
-+ charset < ARRAY_SIZE(ias_charset_types) ?
-+ ias_charset_types[charset] :
-+ "(unknown)");
-
- /* Aborting, close connection! */
- iriap_disconnect_request(self);
-diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
-index 8c00416..9ea0c93 100644
---- a/net/irda/irlap_frame.c
-+++ b/net/irda/irlap_frame.c
-@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
- /*
- * We now have some discovery info to deliver!
- */
-- discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
-+ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
- if (!discovery) {
- IRDA_WARNING("%s: unable to malloc!\n", __func__);
- return;
-diff --git a/net/irda/irproc.c b/net/irda/irproc.c
-index b9ac598..f88cc56 100644
---- a/net/irda/irproc.c
-+++ b/net/irda/irproc.c
-@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
- {
- int i;
-
-- proc_irda = proc_mkdir("irda", init_net.proc_net);
-+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
- if (proc_irda == NULL)
- return;
-
-diff --git a/net/irda/irttp.c b/net/irda/irttp.c
-index 32e3bb0..a4e5eb8 100644
---- a/net/irda/irttp.c
-+++ b/net/irda/irttp.c
-@@ -441,6 +441,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
- lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
- if (lsap == NULL) {
- IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
-+ __irttp_close_tsap(self);
- return NULL;
- }
-
-diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
-index cf98d62..7bf2972 100644
---- a/net/iucv/af_iucv.c
-+++ b/net/iucv/af_iucv.c
-@@ -786,10 +786,10 @@ static int iucv_sock_autobind(struct sock *sk)
-
- write_lock_bh(&iucv_sk_list.lock);
-
-- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
-+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
- while (__iucv_get_sock_by_name(name)) {
- sprintf(name, "%08x",
-- atomic_inc_return(&iucv_sk_list.autobind_name));
-+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
- }
-
- write_unlock_bh(&iucv_sk_list.lock);
-diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
-index 403be43..87f09da 100644
---- a/net/iucv/iucv.c
-+++ b/net/iucv/iucv.c
-@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata iucv_cpu_notifier = {
-+static struct notifier_block iucv_cpu_notifier = {
- .notifier_call = iucv_cpu_notify,
- };
-
-diff --git a/net/key/af_key.c b/net/key/af_key.c
-index dc8d7ef..9d37285 100644
---- a/net/key/af_key.c
-+++ b/net/key/af_key.c
-@@ -1097,7 +1097,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
-
- x->id.proto = proto;
- x->id.spi = sa->sadb_sa_spi;
-- x->props.replay_window = sa->sadb_sa_replay;
-+ x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
-+ (sizeof(x->replay.bitmap) * 8));
- if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
- x->props.flags |= XFRM_STATE_NOECN;
- if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
-@@ -1924,6 +1925,9 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
- int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
- struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
-
-+ if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
-+ return -EINVAL;
-+
- while (len >= sizeof(struct sadb_x_ipsecrequest)) {
- if ((err = parse_ipsecrequest(xp, rq)) < 0)
- return err;
-@@ -3020,10 +3024,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
- static u32 get_acqseq(void)
- {
- u32 res;
-- static atomic_t acqseq;
-+ static atomic_unchecked_t acqseq;
-
- do {
-- res = atomic_inc_return(&acqseq);
-+ res = atomic_inc_return_unchecked(&acqseq);
- } while (!res);
- return res;
- }
-diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
-index 334a93d..ee97cfd 100644
---- a/net/l2tp/l2tp_ip.c
-+++ b/net/l2tp/l2tp_ip.c
-@@ -667,7 +667,7 @@ static struct inet_protosw l2tp_ip_protosw = {
- .no_check = 0,
- };
-
--static struct net_protocol l2tp_ip_protocol __read_mostly = {
-+static const struct net_protocol l2tp_ip_protocol = {
- .handler = l2tp_ip_recv,
- };
-
-diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
-index 93a41a0..d4b4edb 100644
---- a/net/l2tp/l2tp_netlink.c
-+++ b/net/l2tp/l2tp_netlink.c
-@@ -78,8 +78,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
-
- hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
- &l2tp_nl_family, 0, L2TP_CMD_NOOP);
-- if (IS_ERR(hdr)) {
-- ret = PTR_ERR(hdr);
-+ if (!hdr) {
-+ ret = -EMSGSIZE;
- goto err_out;
- }
-
-@@ -228,8 +228,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
-
- hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
- L2TP_CMD_TUNNEL_GET);
-- if (IS_ERR(hdr))
-- return PTR_ERR(hdr);
-+ if (!hdr)
-+ return -EMSGSIZE;
-
- NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
- NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-@@ -560,8 +560,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
- sk = tunnel->sock;
-
- hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
-- if (IS_ERR(hdr))
-- return PTR_ERR(hdr);
-+ if (!hdr)
-+ return -EMSGSIZE;
-
- NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
- NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
-diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
-index a1839c0..4e06b9b 100644
---- a/net/llc/llc_proc.c
-+++ b/net/llc/llc_proc.c
-@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
- int rc = -ENOMEM;
- struct proc_dir_entry *p;
-
-- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
-+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
- if (!llc_proc_dir)
- goto out;
-
-diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
-index 8da371c5..bd541cf 100644
---- a/net/mac80211/ieee80211_i.h
-+++ b/net/mac80211/ieee80211_i.h
-@@ -27,6 +27,7 @@
- #include <net/ieee80211_radiotap.h>
- #include <net/cfg80211.h>
- #include <net/mac80211.h>
-+#include <asm/local.h>
- #include "key.h"
- #include "sta_info.h"
-
-@@ -781,7 +782,7 @@ struct ieee80211_local {
- /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
- spinlock_t queue_stop_reason_lock;
-
-- int open_count;
-+ local_t open_count;
- int monitors, cooked_mntrs;
- /* number of interfaces with corresponding FIF_ flags */
- int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
-diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
-index 24ec86f..54ee7ef 100644
---- a/net/mac80211/iface.c
-+++ b/net/mac80211/iface.c
-@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
- break;
- }
-
-- if (local->open_count == 0) {
-+ if (local_read(&local->open_count) == 0) {
- res = drv_start(local);
- if (res)
- goto err_del_bss;
-@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
- if (!is_valid_ether_addr(dev->dev_addr)) {
-- if (!local->open_count)
-+ if (!local_read(&local->open_count))
- drv_stop(local);
- return -EADDRNOTAVAIL;
- }
-@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
- mutex_unlock(&local->mtx);
-
- if (coming_up)
-- local->open_count++;
-+ local_inc(&local->open_count);
-
- if (hw_reconf_flags) {
- ieee80211_hw_config(local, hw_reconf_flags);
-@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
- err_del_interface:
- drv_remove_interface(local, &sdata->vif);
- err_stop:
-- if (!local->open_count)
-+ if (!local_read(&local->open_count))
- drv_stop(local);
- err_del_bss:
- sdata->bss = NULL;
-@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
- }
-
- if (going_down)
-- local->open_count--;
-+ local_dec(&local->open_count);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP_VLAN:
-@@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
- if (cancel_scan)
- flush_delayed_work(&local->scan_work);
-
-- if (local->open_count == 0) {
-+ if (local_read(&local->open_count) == 0) {
- if (local->ops->napi_poll)
- napi_disable(&local->napi);
- ieee80211_clear_tx_pending(local);
-diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index 7d9b21d..0687004 100644
---- a/net/mac80211/main.c
-+++ b/net/mac80211/main.c
-@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
- local->hw.conf.power_level = power;
- }
-
-- if (changed && local->open_count) {
-+ if (changed && local_read(&local->open_count)) {
- ret = drv_config(local, changed);
- /*
- * Goal:
-diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
-index 9ee7164..56c5061 100644
---- a/net/mac80211/pm.c
-+++ b/net/mac80211/pm.c
-@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
- struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
-
-- if (!local->open_count)
-+ if (!local_read(&local->open_count))
- goto suspend;
-
- ieee80211_scan_cancel(local);
-@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
- cancel_work_sync(&local->dynamic_ps_enable_work);
- del_timer_sync(&local->dynamic_ps_timer);
-
-- local->wowlan = wowlan && local->open_count;
-+ local->wowlan = wowlan && local_read(&local->open_count);
- if (local->wowlan) {
- int err = drv_suspend(local, wowlan);
- if (err < 0) {
-@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
- }
-
- /* stop hardware - this must stop RX */
-- if (local->open_count)
-+ if (local_read(&local->open_count))
- ieee80211_stop_device(local);
-
- suspend:
-diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
-index 7d84b87..6a69cd9 100644
---- a/net/mac80211/rate.c
-+++ b/net/mac80211/rate.c
-@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
-
- ASSERT_RTNL();
-
-- if (local->open_count)
-+ if (local_read(&local->open_count))
- return -EBUSY;
-
- if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
-diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
-index c97a065..ff61928 100644
---- a/net/mac80211/rc80211_pid_debugfs.c
-+++ b/net/mac80211/rc80211_pid_debugfs.c
-@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
-
- spin_unlock_irqrestore(&events->lock, status);
-
-- if (copy_to_user(buf, pb, p))
-+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
- return -EFAULT;
-
- return p;
-diff --git a/net/mac80211/util.c b/net/mac80211/util.c
-index 7095ae5..85ba5e9 100644
---- a/net/mac80211/util.c
-+++ b/net/mac80211/util.c
-@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
- drv_set_coverage_class(local, hw->wiphy->coverage_class);
-
- /* everything else happens only if HW was up & running */
-- if (!local->open_count)
-+ if (!local_read(&local->open_count))
- goto wake_up;
-
- /*
-diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
-index d5597b7..ab6d39c 100644
---- a/net/netfilter/Kconfig
-+++ b/net/netfilter/Kconfig
-@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
-
- To compile it as a module, choose M here. If unsure, say N.
-
-+config NETFILTER_XT_MATCH_GRADM
-+ tristate '"gradm" match support'
-+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
-+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
-+ ---help---
-+ The gradm match allows to match on grsecurity RBAC being enabled.
-+ It is useful when iptables rules are applied early on bootup to
-+ prevent connections to the machine (except from a trusted host)
-+ while the RBAC system is disabled.
-+
- config NETFILTER_XT_MATCH_HASHLIMIT
- tristate '"hashlimit" match support'
- depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
-diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
-index 1a02853..5d8c22e 100644
---- a/net/netfilter/Makefile
-+++ b/net/netfilter/Makefile
-@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
-+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
- obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
-diff --git a/net/netfilter/core.c b/net/netfilter/core.c
-index afca6c7..594a841 100644
---- a/net/netfilter/core.c
-+++ b/net/netfilter/core.c
-@@ -269,7 +269,7 @@ void __init netfilter_init(void)
- }
-
- #ifdef CONFIG_PROC_FS
-- proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net);
-+ proc_net_netfilter = proc_mkdir_restrict("netfilter", init_net.proc_net);
- if (!proc_net_netfilter)
- panic("cannot create netfilter proc entry");
- #endif
-diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
-index b88dcec..d817abf27 100644
---- a/net/netfilter/ipset/ip_set_core.c
-+++ b/net/netfilter/ipset/ip_set_core.c
-@@ -1685,7 +1685,7 @@ done:
- return ret;
- }
-
--static struct nf_sockopt_ops so_set __read_mostly = {
-+static struct nf_sockopt_ops so_set = {
- .pf = PF_INET,
- .get_optmin = SO_IP_SET,
- .get_optmax = SO_IP_SET + 1,
-diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
-index e13095d..6617217 100644
---- a/net/netfilter/ipset/ip_set_hash_netiface.c
-+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
-@@ -761,7 +761,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
- [IPSET_ATTR_IP] = { .type = NLA_NESTED },
- [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
- [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
-- .len = IPSET_MAXNAMELEN - 1 },
-+ .len = IFNAMSIZ - 1 },
- [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
- [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
- [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
-diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
-index 6422845..2c19968 100644
---- a/net/netfilter/ipvs/ip_vs_conn.c
-+++ b/net/netfilter/ipvs/ip_vs_conn.c
-@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
- /* Increase the refcnt counter of the dest */
- atomic_inc(&dest->refcnt);
-
-- conn_flags = atomic_read(&dest->conn_flags);
-+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
- if (cp->protocol != IPPROTO_UDP)
- conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
- /* Bind with the destination and its corresponding transmitter */
-@@ -868,7 +868,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
- atomic_set(&cp->refcnt, 1);
-
- atomic_set(&cp->n_control, 0);
-- atomic_set(&cp->in_pkts, 0);
-+ atomic_set_unchecked(&cp->in_pkts, 0);
-
- atomic_inc(&ipvs->conn_count);
- if (flags & IP_VS_CONN_F_NO_CPORT)
-@@ -1148,7 +1148,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
-
- /* Don't drop the entry if its number of incoming packets is not
- located in [0, 8] */
-- i = atomic_read(&cp->in_pkts);
-+ i = atomic_read_unchecked(&cp->in_pkts);
- if (i > 8 || i < 0) return 0;
-
- if (!todrop_rate[i]) return 0;
-diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index d864aaf..d6afbda 100644
---- a/net/netfilter/ipvs/ip_vs_core.c
-+++ b/net/netfilter/ipvs/ip_vs_core.c
-@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- ret = cp->packet_xmit(skb, cp, pd->pp);
- /* do not touch skb anymore */
-
-- atomic_inc(&cp->in_pkts);
-+ atomic_inc_unchecked(&cp->in_pkts);
- ip_vs_conn_put(cp);
- return ret;
- }
-@@ -1621,7 +1621,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
- if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
- pkts = sysctl_sync_threshold(ipvs);
- else
-- pkts = atomic_add_return(1, &cp->in_pkts);
-+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
-
- if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
- cp->protocol == IPPROTO_SCTP) {
-diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
-index 93acfa1..e846c43 100644
---- a/net/netfilter/ipvs/ip_vs_ctl.c
-+++ b/net/netfilter/ipvs/ip_vs_ctl.c
-@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
- ip_vs_rs_hash(ipvs, dest);
- write_unlock_bh(&ipvs->rs_lock);
- }
-- atomic_set(&dest->conn_flags, conn_flags);
-+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
-
- /* bind the service */
- if (!dest->svc) {
-@@ -1666,7 +1666,7 @@ proc_do_sync_mode(ctl_table *table, int write,
- * align with netns init in ip_vs_control_net_init()
- */
-
--static struct ctl_table vs_vars[] = {
-+static ctl_table_no_const vs_vars[] __read_only = {
- {
- .procname = "amemthresh",
- .maxlen = sizeof(int),
-@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
- " %-7s %-6d %-10d %-10d\n",
- &dest->addr.in6,
- ntohs(dest->port),
-- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
-+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
- atomic_read(&dest->weight),
- atomic_read(&dest->activeconns),
- atomic_read(&dest->inactconns));
-@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
- "%-7s %-6d %-10d %-10d\n",
- ntohl(dest->addr.ip),
- ntohs(dest->port),
-- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
-+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
- atomic_read(&dest->weight),
- atomic_read(&dest->activeconns),
- atomic_read(&dest->inactconns));
-@@ -2503,13 +2503,14 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
- struct ip_vs_dest *dest;
- struct ip_vs_dest_entry entry;
-
-+ memset(&entry, 0, sizeof(entry));
- list_for_each_entry(dest, &svc->destinations, n_list) {
- if (count >= get->num_dests)
- break;
-
- entry.addr = dest->addr.ip;
- entry.port = dest->port;
-- entry.conn_flags = atomic_read(&dest->conn_flags);
-+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
- entry.weight = atomic_read(&dest->weight);
- entry.u_threshold = dest->u_threshold;
- entry.l_threshold = dest->l_threshold;
-@@ -3043,7 +3044,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
- NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
-
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
-+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
- NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
-@@ -3626,7 +3627,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
- {
- int idx;
- struct netns_ipvs *ipvs = net_ipvs(net);
-- struct ctl_table *tbl;
-+ ctl_table_no_const *tbl;
-
- atomic_set(&ipvs->dropentry, 0);
- spin_lock_init(&ipvs->dropentry_lock);
-diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
-index 0f16283..9ec4d21 100644
---- a/net/netfilter/ipvs/ip_vs_lblc.c
-+++ b/net/netfilter/ipvs/ip_vs_lblc.c
-@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
- * IPVS LBLC sysctl table
- */
- #ifdef CONFIG_SYSCTL
--static ctl_table vs_vars_table[] = {
-+static ctl_table_no_const vs_vars_table[] __read_only = {
- {
- .procname = "lblc_expiration",
- .data = NULL,
-diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
-index eec797f..6f3ec56 100644
---- a/net/netfilter/ipvs/ip_vs_lblcr.c
-+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
-@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
- * IPVS LBLCR sysctl table
- */
-
--static ctl_table vs_vars_table[] = {
-+static ctl_table_no_const vs_vars_table[] __read_only = {
- {
- .procname = "lblcr_expiration",
- .data = NULL,
-diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
-index 2cbcc83..2efc3e8 100644
---- a/net/netfilter/ipvs/ip_vs_sync.c
-+++ b/net/netfilter/ipvs/ip_vs_sync.c
-@@ -649,7 +649,7 @@ control:
- * i.e only increment in_pkts for Templates.
- */
- if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
-- int pkts = atomic_add_return(1, &cp->in_pkts);
-+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
-
- if (pkts % sysctl_sync_period(ipvs) != 1)
- return;
-@@ -797,7 +797,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
-
- if (opt)
- memcpy(&cp->in_seq, opt, sizeof(*opt));
-- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
-+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
- cp->state = state;
- cp->old_state = cp->state;
- /*
-diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
-index cc8f8b4..b22ea63 100644
---- a/net/netfilter/ipvs/ip_vs_xmit.c
-+++ b/net/netfilter/ipvs/ip_vs_xmit.c
-@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
- else
- rc = NF_ACCEPT;
- /* do not touch skb anymore */
-- atomic_inc(&cp->in_pkts);
-+ atomic_inc_unchecked(&cp->in_pkts);
- goto out;
- }
-
-@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
- else
- rc = NF_ACCEPT;
- /* do not touch skb anymore */
-- atomic_inc(&cp->in_pkts);
-+ atomic_inc_unchecked(&cp->in_pkts);
- goto out;
- }
-
-diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
-index 369df3f..b660190 100644
---- a/net/netfilter/nf_conntrack_acct.c
-+++ b/net/netfilter/nf_conntrack_acct.c
-@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
- #ifdef CONFIG_SYSCTL
- static int nf_conntrack_acct_init_sysctl(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
- GFP_KERNEL);
-diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
-index 7489bd3..b7a282c 100644
---- a/net/netfilter/nf_conntrack_core.c
-+++ b/net/netfilter/nf_conntrack_core.c
-@@ -1491,6 +1491,10 @@ err_proto:
- #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
- #define DYING_NULLS_VAL ((1<<30)+1)
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
-+#endif
-+
- static int nf_conntrack_init_net(struct net *net)
- {
- int ret;
-@@ -1504,7 +1508,11 @@ static int nf_conntrack_init_net(struct net *net)
- goto err_stat;
- }
-
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
-+#else
- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
-+#endif
- if (!net->ct.slabname) {
- ret = -ENOMEM;
- goto err_slabname;
-diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
-index 14af632..9914188 100644
---- a/net/netfilter/nf_conntrack_ecache.c
-+++ b/net/netfilter/nf_conntrack_ecache.c
-@@ -185,7 +185,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
- #ifdef CONFIG_SYSCTL
- static int nf_conntrack_event_init_sysctl(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
- GFP_KERNEL);
-diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
-index 8aa94ee..c854e4a 100644
---- a/net/netfilter/nf_conntrack_proto_dccp.c
-+++ b/net/netfilter/nf_conntrack_proto_dccp.c
-@@ -391,7 +391,7 @@ struct dccp_net {
- unsigned int dccp_timeout[CT_DCCP_MAX + 1];
- #ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
-- struct ctl_table *sysctl_table;
-+ ctl_table_no_const *sysctl_table;
- #endif
- };
-
-@@ -459,7 +459,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
-
- out_invalid:
- if (LOG_INVALID(net, IPPROTO_DCCP))
-- nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg);
-+ nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg);
- return false;
- }
-
-@@ -612,7 +612,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
-
- out_invalid:
- if (LOG_INVALID(net, IPPROTO_DCCP))
-- nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg);
-+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL, "%s", msg);
- return -NF_ACCEPT;
- }
-
-diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
-index 57ad466..e53ab60 100644
---- a/net/netfilter/nf_conntrack_proto_tcp.c
-+++ b/net/netfilter/nf_conntrack_proto_tcp.c
-@@ -519,7 +519,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
- const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
- __u32 seq, ack, sack, end, win, swin;
- s16 receiver_offset;
-- bool res;
-+ bool res, in_recv_win;
-
- /*
- * Get the required data from the packet.
-@@ -642,14 +642,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
-
-+ /* Is the ending sequence in the receive window (if available)? */
-+ in_recv_win = !receiver->td_maxwin ||
-+ after(end, sender->td_end - receiver->td_maxwin - 1);
-+
- pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
- before(seq, sender->td_maxend + 1),
-- after(end, sender->td_end - receiver->td_maxwin - 1),
-+ (in_recv_win ? 1 : 0),
- before(sack, receiver->td_end + 1),
- after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
-
- if (before(seq, sender->td_maxend + 1) &&
-- after(end, sender->td_end - receiver->td_maxwin - 1) &&
-+ in_recv_win &&
- before(sack, receiver->td_end + 1) &&
- after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
- /*
-@@ -718,7 +722,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
- nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
- "nf_ct_tcp: %s ",
- before(seq, sender->td_maxend + 1) ?
-- after(end, sender->td_end - receiver->td_maxwin - 1) ?
-+ in_recv_win ?
- before(sack, receiver->td_end + 1) ?
- after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
- : "ACK is under the lower bound (possible overly delayed ACK)"
-diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
-index 05e9feb..3b519f3 100644
---- a/net/netfilter/nf_conntrack_standalone.c
-+++ b/net/netfilter/nf_conntrack_standalone.c
-@@ -475,7 +475,7 @@ static struct ctl_path nf_ct_path[] = {
-
- static int nf_conntrack_standalone_init_sysctl(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- if (net_eq(net, &init_net)) {
- nf_ct_netfilter_header =
-diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
-index af7dd31..7535cd7 100644
---- a/net/netfilter/nf_conntrack_timestamp.c
-+++ b/net/netfilter/nf_conntrack_timestamp.c
-@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
- #ifdef CONFIG_SYSCTL
- static int nf_conntrack_tstamp_init_sysctl(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
- GFP_KERNEL);
-diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
-index 957374a..dcbce7a 100644
---- a/net/netfilter/nf_log.c
-+++ b/net/netfilter/nf_log.c
-@@ -222,7 +222,7 @@ static struct ctl_path nf_log_sysctl_path[] = {
- };
-
- static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
--static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
-+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
- static struct ctl_table_header *nf_log_dir_header;
-
- static int nf_log_proc_dostring(ctl_table *table, int write,
-@@ -253,14 +253,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
- rcu_assign_pointer(nf_loggers[tindex], logger);
- mutex_unlock(&nf_log_mutex);
- } else {
-+ ctl_table_no_const nf_log_table = *table;
-+
- mutex_lock(&nf_log_mutex);
- logger = rcu_dereference_protected(nf_loggers[tindex],
- lockdep_is_held(&nf_log_mutex));
- if (!logger)
-- table->data = "NONE";
-+ nf_log_table.data = "NONE";
- else
-- table->data = logger->name;
-- r = proc_dostring(table, write, buffer, lenp, ppos);
-+ nf_log_table.data = logger->name;
-+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
- mutex_unlock(&nf_log_mutex);
- }
-
-diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
-index f042ae5..30ea486 100644
---- a/net/netfilter/nf_sockopt.c
-+++ b/net/netfilter/nf_sockopt.c
-@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
- }
- }
-
-- list_add(&reg->list, &nf_sockopts);
-+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
- out:
- mutex_unlock(&nf_sockopt_mutex);
- return ret;
-@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
- void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
- {
- mutex_lock(&nf_sockopt_mutex);
-- list_del(&reg->list);
-+ pax_list_del((struct list_head *)&reg->list);
- mutex_unlock(&nf_sockopt_mutex);
- }
- EXPORT_SYMBOL(nf_unregister_sockopt);
-diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
-index b4f8d84..4ffd251 100644
---- a/net/netfilter/nfnetlink.c
-+++ b/net/netfilter/nfnetlink.c
-@@ -170,8 +170,10 @@ replay:
-
- err = nla_parse(cda, ss->cb[cb_id].attr_count,
- attr, attrlen, ss->cb[cb_id].policy);
-- if (err < 0)
-+ if (err < 0) {
-+ rcu_read_unlock();
- return err;
-+ }
-
- if (nc->call_rcu) {
- err = nc->call_rcu(net->nfnl, skb, nlh,
-diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
-index 66b2c54..4ea49be 100644
---- a/net/netfilter/nfnetlink_log.c
-+++ b/net/netfilter/nfnetlink_log.c
-@@ -70,7 +70,7 @@ struct nfulnl_instance {
- };
-
- static DEFINE_SPINLOCK(instances_lock);
--static atomic_t global_seq;
-+static atomic_unchecked_t global_seq;
-
- #define INSTANCE_BUCKETS 16
- static struct hlist_head instance_table[INSTANCE_BUCKETS];
-@@ -388,6 +388,7 @@ __build_packet_message(struct nfulnl_instance *inst,
- nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = htons(inst->group_num);
-
-+ memset(&pmsg, 0, sizeof(pmsg));
- pmsg.hw_protocol = skb->protocol;
- pmsg.hook = hooknum;
-
-@@ -456,7 +457,10 @@ __build_packet_message(struct nfulnl_instance *inst,
- if (indev && skb->dev &&
- skb->mac_header != skb->network_header) {
- struct nfulnl_msg_packet_hw phw;
-- int len = dev_parse_header(skb, phw.hw_addr);
-+ int len;
-+
-+ memset(&phw, 0, sizeof(phw));
-+ len = dev_parse_header(skb, phw.hw_addr);
- if (len > 0) {
- phw.hw_addrlen = htons(len);
- NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
-@@ -502,7 +506,7 @@ __build_packet_message(struct nfulnl_instance *inst,
- /* global sequence number */
- if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
- NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
-- htonl(atomic_inc_return(&global_seq)));
-+ htonl(atomic_inc_return_unchecked(&global_seq)));
-
- if (data_len) {
- struct nlattr *nla;
-diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
-index a80b0cb..f7e08e7 100644
---- a/net/netfilter/nfnetlink_queue.c
-+++ b/net/netfilter/nfnetlink_queue.c
-@@ -344,7 +344,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
- if (indev && entskb->dev &&
- entskb->mac_header != entskb->network_header) {
- struct nfqnl_msg_packet_hw phw;
-- int len = dev_parse_header(entskb, phw.hw_addr);
-+ int len;
-+
-+ memset(&phw, 0, sizeof(phw));
-+ len = dev_parse_header(entskb, phw.hw_addr);
- if (len) {
- phw.hw_addrlen = htons(len);
- NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
-diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
-index 9e63b43f..a61bc90 100644
---- a/net/netfilter/xt_TCPMSS.c
-+++ b/net/netfilter/xt_TCPMSS.c
-@@ -50,7 +50,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
- unsigned int minlen)
- {
- struct tcphdr *tcph;
-- unsigned int tcplen, i;
-+ int len, tcp_hdrlen;
-+ unsigned int i;
- __be16 oldval;
- u16 newmss;
- u8 *opt;
-@@ -58,11 +59,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
- if (!skb_make_writable(skb, skb->len))
- return -1;
-
-- tcplen = skb->len - tcphoff;
-+ len = skb->len - tcphoff;
-+ if (len < (int)sizeof(struct tcphdr))
-+ return -1;
-+
- tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
-+ tcp_hdrlen = tcph->doff * 4;
-
-- /* Header cannot be larger than the packet */
-- if (tcplen < tcph->doff*4)
-+ if (len < tcp_hdrlen)
- return -1;
-
- if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
-@@ -83,9 +87,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
- newmss = info->mss;
-
- opt = (u_int8_t *)tcph;
-- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
-- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
-- opt[i+1] == TCPOLEN_MSS) {
-+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
-+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
- u_int16_t oldmss;
-
- oldmss = (opt[i+2] << 8) | opt[i+3];
-@@ -108,9 +111,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
- }
-
- /* There is data after the header so the option can't be added
-- without moving it, and doing so may make the SYN packet
-- itself too large. Accept the packet unmodified instead. */
-- if (tcplen > tcph->doff*4)
-+ * without moving it, and doing so may make the SYN packet
-+ * itself too large. Accept the packet unmodified instead.
-+ */
-+ if (len > tcp_hdrlen)
- return 0;
-
- /*
-@@ -127,10 +131,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
- skb_put(skb, TCPOLEN_MSS);
-
- opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
-- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
-+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
-
- inet_proto_csum_replace2(&tcph->check, skb,
-- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
-+ htons(len), htons(len + TCPOLEN_MSS), 1);
- opt[0] = TCPOPT_MSS;
- opt[1] = TCPOLEN_MSS;
- opt[2] = (newmss & 0xff00) >> 8;
-diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
-new file mode 100644
-index 0000000..c566332
---- /dev/null
-+++ b/net/netfilter/xt_gradm.c
-@@ -0,0 +1,51 @@
-+/*
-+ * gradm match for netfilter
-+ * Copyright © Zbigniew Krzystolik, 2010
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License; either version
-+ * 2 or 3 as published by the Free Software Foundation.
-+ */
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter/x_tables.h>
-+#include <linux/grsecurity.h>
-+#include <linux/netfilter/xt_gradm.h>
-+
-+static bool
-+gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
-+{
-+ const struct xt_gradm_mtinfo *info = par->matchinfo;
-+ bool retval = false;
-+ if (gr_acl_is_enabled())
-+ retval = true;
-+ return retval ^ info->invflags;
-+}
-+
-+static struct xt_match gradm_mt_reg __read_mostly = {
-+ .name = "gradm",
-+ .revision = 0,
-+ .family = NFPROTO_UNSPEC,
-+ .match = gradm_mt,
-+ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
-+ .me = THIS_MODULE,
-+};
-+
-+static int __init gradm_mt_init(void)
-+{
-+ return xt_register_match(&gradm_mt_reg);
-+}
-+
-+static void __exit gradm_mt_exit(void)
-+{
-+ xt_unregister_match(&gradm_mt_reg);
-+}
-+
-+module_init(gradm_mt_init);
-+module_exit(gradm_mt_exit);
-+MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
-+MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
-+MODULE_LICENSE("GPL");
-+MODULE_ALIAS("ipt_gradm");
-+MODULE_ALIAS("ip6t_gradm");
-diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
-index 8f3f280..3f68faf 100644
---- a/net/netfilter/xt_hashlimit.c
-+++ b/net/netfilter/xt_hashlimit.c
-@@ -755,11 +755,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
- {
- struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
-
-- hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
-+ hashlimit_net->ipt_hashlimit = proc_mkdir_restrict("ipt_hashlimit", net->proc_net);
- if (!hashlimit_net->ipt_hashlimit)
- return -ENOMEM;
- #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-- hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
-+ hashlimit_net->ip6t_hashlimit = proc_mkdir_restrict("ip6t_hashlimit", net->proc_net);
- if (!hashlimit_net->ip6t_hashlimit) {
- proc_net_remove(net, "ipt_hashlimit");
- return -ENOMEM;
-diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
-index d2ff15a..cdeb1f2 100644
---- a/net/netfilter/xt_recent.c
-+++ b/net/netfilter/xt_recent.c
-@@ -574,7 +574,7 @@ static int __net_init recent_proc_net_init(struct net *net)
- {
- struct recent_net *recent_net = recent_pernet(net);
-
-- recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
-+ recent_net->xt_recent = proc_mkdir_restrict("xt_recent", net->proc_net);
- if (!recent_net->xt_recent)
- return -ENOMEM;
- return 0;
-diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
-index 4fe4fb4..87a89e5 100644
---- a/net/netfilter/xt_statistic.c
-+++ b/net/netfilter/xt_statistic.c
-@@ -19,7 +19,7 @@
- #include <linux/module.h>
-
- struct xt_statistic_priv {
-- atomic_t count;
-+ atomic_unchecked_t count;
- } ____cacheline_aligned_in_smp;
-
- MODULE_LICENSE("GPL");
-@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
- break;
- case XT_STATISTIC_MODE_NTH:
- do {
-- oval = atomic_read(&info->master->count);
-+ oval = atomic_read_unchecked(&info->master->count);
- nval = (oval == info->u.nth.every) ? 0 : oval + 1;
-- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
-+ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
- if (nval == 0)
- ret = !ret;
- break;
-@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
- info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
- if (info->master == NULL)
- return -ENOMEM;
-- atomic_set(&info->master->count, info->u.nth.count);
-+ atomic_set_unchecked(&info->master->count, info->u.nth.count);
-
- return 0;
- }
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 2369e96..7aadc6a 100644
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -706,8 +706,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
- if (addr->sa_family != AF_NETLINK)
- return -EINVAL;
-
-- /* Only superuser is allowed to send multicasts */
-- if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
-+ if ((nladdr->nl_groups || nladdr->nl_pid) &&
-+ !netlink_capable(sock, NL_NONROOT_SEND))
- return -EPERM;
-
- if (!nlk->pid)
-@@ -753,7 +753,7 @@ static void netlink_overrun(struct sock *sk)
- sk->sk_error_report(sk);
- }
- }
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- }
-
- static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
-@@ -2011,7 +2011,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
- sk_wmem_alloc_get(s),
- nlk->cb,
- atomic_read(&s->sk_refcnt),
-- atomic_read(&s->sk_drops),
-+ atomic_read_unchecked(&s->sk_drops),
- sock_i_ino(s)
- );
-
-@@ -2118,7 +2118,7 @@ static void __init netlink_add_usersock_entry(void)
- netlink_table_ungrab();
- }
-
--static struct pernet_operations __net_initdata netlink_net_ops = {
-+static struct pernet_operations __net_initconst netlink_net_ops = {
- .init = netlink_net_init,
- .exit = netlink_net_exit,
- };
-diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 874f8ff..339bb58 100644
---- a/net/netlink/genetlink.c
-+++ b/net/netlink/genetlink.c
-@@ -288,18 +288,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
- goto errout;
- }
-
-+ pax_open_kernel();
- if (ops->dumpit)
-- ops->flags |= GENL_CMD_CAP_DUMP;
-+ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DUMP;
- if (ops->doit)
-- ops->flags |= GENL_CMD_CAP_DO;
-+ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DO;
- if (ops->policy)
-- ops->flags |= GENL_CMD_CAP_HASPOL;
-+ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_HASPOL;
-+ pax_close_kernel();
-
- genl_lock();
-- list_add_tail(&ops->ops_list, &family->ops_list);
-+ pax_list_add_tail((struct list_head *)&ops->ops_list, &family->ops_list);
- genl_unlock();
-
-- genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
-+ genl_ctrl_event(CTRL_CMD_NEWOPS, (void *)ops);
- err = 0;
- errout:
- return err;
-@@ -329,9 +331,9 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
- genl_lock();
- list_for_each_entry(rc, &family->ops_list, ops_list) {
- if (rc == ops) {
-- list_del(&ops->ops_list);
-+ pax_list_del((struct list_head *)&ops->ops_list);
- genl_unlock();
-- genl_ctrl_event(CTRL_CMD_DELOPS, ops);
-+ genl_ctrl_event(CTRL_CMD_DELOPS, (void *)ops);
- return 0;
- }
- }
-diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
-index b4d889b..bb33240 100644
---- a/net/netrom/af_netrom.c
-+++ b/net/netrom/af_netrom.c
-@@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
- struct sock *sk = sock->sk;
- struct nr_sock *nr = nr_sk(sk);
-
-+ memset(sax, 0, sizeof(*sax));
- lock_sock(sk);
- if (peer != 0) {
- if (sk->sk_state != TCP_ESTABLISHED) {
-@@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
- *uaddr_len = sizeof(struct full_sockaddr_ax25);
- } else {
- sax->fsa_ax25.sax25_family = AF_NETROM;
-- sax->fsa_ax25.sax25_ndigis = 0;
- sax->fsa_ax25.sax25_call = nr->source_addr;
- *uaddr_len = sizeof(struct sockaddr_ax25);
- }
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 4f19bf2..e3a2b51 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -195,6 +195,7 @@ struct tpacket_kbdq_core {
- char *pkblk_start;
- char *pkblk_end;
- int kblk_size;
-+ unsigned int max_frame_len;
- unsigned int knum_blocks;
- uint64_t knxt_seq_num;
- char *prev;
-@@ -616,6 +617,7 @@ static void init_prb_bdqc(struct packet_sock *po,
- p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
- p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
-
-+ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
- prb_init_ft_ops(p1, req_u);
- prb_setup_retire_blk_timer(po, tx_ring);
- prb_open_block(p1, pbd);
-@@ -1678,7 +1680,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
-
- spin_lock(&sk->sk_receive_queue.lock);
- po->stats.tp_packets++;
-- skb->dropcount = atomic_read(&sk->sk_drops);
-+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- spin_unlock(&sk->sk_receive_queue.lock);
- sk->sk_data_ready(sk, skb->len);
-@@ -1687,7 +1689,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
- drop_n_acct:
- spin_lock(&sk->sk_receive_queue.lock);
- po->stats.tp_drops++;
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- spin_unlock(&sk->sk_receive_queue.lock);
-
- drop_n_restore:
-@@ -1778,6 +1780,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
- if ((int)snaplen < 0)
- snaplen = 0;
- }
-+ } else if (unlikely(macoff + snaplen >
-+ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
-+ u32 nval;
-+
-+ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
-+ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
-+ snaplen, nval, macoff);
-+ snaplen = nval;
-+ if (unlikely((int)snaplen < 0)) {
-+ snaplen = 0;
-+ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
-+ }
- }
- spin_lock(&sk->sk_receive_queue.lock);
- h.raw = packet_current_rx_frame(po, skb,
-@@ -2623,6 +2637,7 @@ out:
-
- static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
- {
-+ struct sock_extended_err ee;
- struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
- int copied, err;
-@@ -2644,8 +2659,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
- sock_recv_timestamp(msg, sk, skb);
-
- serr = SKB_EXT_ERR(skb);
-+ ee = serr->ee;
- put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
-- sizeof(serr->ee), &serr->ee);
-+ sizeof ee, &ee);
-
- msg->msg_flags |= MSG_ERRQUEUE;
- err = copied;
-@@ -3273,7 +3289,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
- case PACKET_HDRLEN:
- if (len > sizeof(int))
- len = sizeof(int);
-- if (copy_from_user(&val, optval, len))
-+ if (len > sizeof(val) || copy_from_user(&val, optval, len))
- return -EFAULT;
- switch (val) {
- case TPACKET_V1:
-@@ -3323,7 +3339,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
-
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, data, len))
-+
-+ if ((data == &val && len > sizeof(val)) ||
-+ (data == &st_u.stats3 && len > sizeof(st_u.stats3)) ||
-+ (data == &st && len > sizeof(st)) ||
-+ copy_to_user(optval, data, len))
- return -EFAULT;
- return 0;
- }
-@@ -3614,6 +3634,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
- goto out;
- if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
- goto out;
-+ if (po->tp_version >= TPACKET_V3 &&
-+ (int)(req->tp_block_size -
-+ BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
-+ goto out;
- if (unlikely(req->tp_frame_size < po->tp_hdrlen +
- po->tp_reserve))
- goto out;
-diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
-index d65f699..855d175 100644
---- a/net/phonet/af_phonet.c
-+++ b/net/phonet/af_phonet.c
-@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
- {
- int err = 0;
-
-- if (protocol >= PHONET_NPROTO)
-+ if (protocol < 0 || protocol >= PHONET_NPROTO)
- return -EINVAL;
-
- err = proto_register(pp->prot, 1);
-diff --git a/net/phonet/pep.c b/net/phonet/pep.c
-index 007546d..9a8e5c6 100644
---- a/net/phonet/pep.c
-+++ b/net/phonet/pep.c
-@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
-
- case PNS_PEP_CTRL_REQ:
- if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- break;
- }
- __skb_pull(skb, 4);
-@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
- }
-
- if (pn->rx_credits == 0) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- err = -ENOBUFS;
- break;
- }
-@@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
- }
-
- if (pn->rx_credits == 0) {
-- atomic_inc(&sk->sk_drops);
-+ atomic_inc_unchecked(&sk->sk_drops);
- err = NET_RX_DROP;
- break;
- }
-diff --git a/net/phonet/socket.c b/net/phonet/socket.c
-index 4c7eff3..59c727f 100644
---- a/net/phonet/socket.c
-+++ b/net/phonet/socket.c
-@@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
- pn->resource, sk->sk_state,
- sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
- sock_i_uid(sk), sock_i_ino(sk),
-- atomic_read(&sk->sk_refcnt), sk,
-- atomic_read(&sk->sk_drops), &len);
-+ atomic_read(&sk->sk_refcnt),
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+ NULL,
-+#else
-+ sk,
-+#endif
-+ atomic_read_unchecked(&sk->sk_drops), &len);
- }
- seq_printf(seq, "%*s\n", 127 - len, "");
- return 0;
-diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
-index cea1c7d..e74ee16 100644
---- a/net/phonet/sysctl.c
-+++ b/net/phonet/sysctl.c
-@@ -62,7 +62,7 @@ static int proc_local_port_range(ctl_table *table, int write,
- {
- int ret;
- int range[2] = {local_port_range[0], local_port_range[1]};
-- ctl_table tmp = {
-+ ctl_table_no_const tmp = {
- .data = &range,
- .maxlen = sizeof(range),
- .mode = table->mode,
-diff --git a/net/rds/cong.c b/net/rds/cong.c
-index e5b65ac..f3b6fb7 100644
---- a/net/rds/cong.c
-+++ b/net/rds/cong.c
-@@ -78,7 +78,7 @@
- * finds that the saved generation number is smaller than the global generation
- * number, it wakes up the process.
- */
--static atomic_t rds_cong_generation = ATOMIC_INIT(0);
-+static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
-
- /*
- * Congestion monitoring
-@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
- rdsdebug("waking map %p for %pI4\n",
- map, &map->m_addr);
- rds_stats_inc(s_cong_update_received);
-- atomic_inc(&rds_cong_generation);
-+ atomic_inc_unchecked(&rds_cong_generation);
- if (waitqueue_active(&map->m_waitq))
- wake_up(&map->m_waitq);
- if (waitqueue_active(&rds_poll_waitq))
-@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
-
- int rds_cong_updated_since(unsigned long *recent)
- {
-- unsigned long gen = atomic_read(&rds_cong_generation);
-+ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
-
- if (likely(*recent == gen))
- return 0;
-diff --git a/net/rds/connection.c b/net/rds/connection.c
-index 9e07c75..da1c1fe 100644
---- a/net/rds/connection.c
-+++ b/net/rds/connection.c
-@@ -188,6 +188,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
- }
-
- atomic_set(&conn->c_state, RDS_CONN_DOWN);
-+ conn->c_send_gen = 0;
- conn->c_reconnect_jiffies = 0;
- INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
- INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
-diff --git a/net/rds/ib.h b/net/rds/ib.h
-index edfaaaf..8c89879 100644
---- a/net/rds/ib.h
-+++ b/net/rds/ib.h
-@@ -128,7 +128,7 @@ struct rds_ib_connection {
- /* sending acks */
- unsigned long i_ack_flags;
- #ifdef KERNEL_HAS_ATOMIC64
-- atomic64_t i_ack_next; /* next ACK to send */
-+ atomic64_unchecked_t i_ack_next; /* next ACK to send */
- #else
- spinlock_t i_ack_lock; /* protect i_ack_next */
- u64 i_ack_next; /* next ACK to send */
-diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
-index 51c8689..36c555f 100644
---- a/net/rds/ib_cm.c
-+++ b/net/rds/ib_cm.c
-@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
- /* Clear the ACK state */
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
- #ifdef KERNEL_HAS_ATOMIC64
-- atomic64_set(&ic->i_ack_next, 0);
-+ atomic64_set_unchecked(&ic->i_ack_next, 0);
- #else
- ic->i_ack_next = 0;
- #endif
-diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
-index e29e0ca..fa3a6a3 100644
---- a/net/rds/ib_recv.c
-+++ b/net/rds/ib_recv.c
-@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
- static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
- int ack_required)
- {
-- atomic64_set(&ic->i_ack_next, seq);
-+ atomic64_set_unchecked(&ic->i_ack_next, seq);
- if (ack_required) {
- smp_mb__before_clear_bit();
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
- clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- smp_mb__after_clear_bit();
-
-- return atomic64_read(&ic->i_ack_next);
-+ return atomic64_read_unchecked(&ic->i_ack_next);
- }
- #endif
-
-diff --git a/net/rds/iw.h b/net/rds/iw.h
-index 04ce3b1..48119a6 100644
---- a/net/rds/iw.h
-+++ b/net/rds/iw.h
-@@ -134,7 +134,7 @@ struct rds_iw_connection {
- /* sending acks */
- unsigned long i_ack_flags;
- #ifdef KERNEL_HAS_ATOMIC64
-- atomic64_t i_ack_next; /* next ACK to send */
-+ atomic64_unchecked_t i_ack_next; /* next ACK to send */
- #else
- spinlock_t i_ack_lock; /* protect i_ack_next */
- u64 i_ack_next; /* next ACK to send */
-diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
-index 9556d28..f046d0e 100644
---- a/net/rds/iw_cm.c
-+++ b/net/rds/iw_cm.c
-@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
- /* Clear the ACK state */
- clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
- #ifdef KERNEL_HAS_ATOMIC64
-- atomic64_set(&ic->i_ack_next, 0);
-+ atomic64_set_unchecked(&ic->i_ack_next, 0);
- #else
- ic->i_ack_next = 0;
- #endif
-diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
-index 5e57347..3916042 100644
---- a/net/rds/iw_recv.c
-+++ b/net/rds/iw_recv.c
-@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
- static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
- int ack_required)
- {
-- atomic64_set(&ic->i_ack_next, seq);
-+ atomic64_set_unchecked(&ic->i_ack_next, seq);
- if (ack_required) {
- smp_mb__before_clear_bit();
- set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
-@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
- clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- smp_mb__after_clear_bit();
-
-- return atomic64_read(&ic->i_ack_next);
-+ return atomic64_read_unchecked(&ic->i_ack_next);
- }
- #endif
-
-diff --git a/net/rds/rdma.c b/net/rds/rdma.c
-index 4e37c1c..40084d8 100644
---- a/net/rds/rdma.c
-+++ b/net/rds/rdma.c
-@@ -564,12 +564,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-
- if (rs->rs_bound_addr == 0) {
- ret = -ENOTCONN; /* XXX not a great errno */
-- goto out;
-+ goto out_ret;
- }
-
- if (args->nr_local > UIO_MAXIOV) {
- ret = -EMSGSIZE;
-- goto out;
-+ goto out_ret;
- }
-
- /* Check whether to allocate the iovec area */
-@@ -578,7 +578,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
- if (!iovs) {
- ret = -ENOMEM;
-- goto out;
-+ goto out_ret;
- }
- }
-
-@@ -696,6 +696,7 @@ out:
- if (iovs != iovstack)
- sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
- kfree(pages);
-+out_ret:
- if (ret)
- rds_rdma_free_op(op);
- else
-diff --git a/net/rds/rds.h b/net/rds/rds.h
-index 7eaba18..b322557 100644
---- a/net/rds/rds.h
-+++ b/net/rds/rds.h
-@@ -110,6 +110,7 @@ struct rds_connection {
- void *c_transport_data;
-
- atomic_t c_state;
-+ unsigned long c_send_gen;
- unsigned long c_flags;
- unsigned long c_reconnect_jiffies;
- struct delayed_work c_send_w;
-@@ -449,7 +450,7 @@ struct rds_transport {
- void (*sync_mr)(void *trans_private, int direction);
- void (*free_mr)(void *trans_private, int invalidate);
- void (*flush_mrs)(void);
--};
-+} __do_const;
-
- struct rds_sock {
- struct sock rs_sk;
-diff --git a/net/rds/send.c b/net/rds/send.c
-index 88eace5..f33ba5d 100644
---- a/net/rds/send.c
-+++ b/net/rds/send.c
-@@ -140,8 +140,11 @@ int rds_send_xmit(struct rds_connection *conn)
- struct scatterlist *sg;
- int ret = 0;
- LIST_HEAD(to_be_dropped);
-+ int batch_count;
-+ unsigned long send_gen = 0;
-
- restart:
-+ batch_count = 0;
-
- /*
- * sendmsg calls here after having queued its message on the send
-@@ -157,6 +160,17 @@ restart:
- }
-
- /*
-+ * we record the send generation after doing the xmit acquire.
-+ * if someone else manages to jump in and do some work, we'll use
-+ * this to avoid a goto restart farther down.
-+ *
-+ * The acquire_in_xmit() check above ensures that only one
-+ * caller can increment c_send_gen at any time.
-+ */
-+ conn->c_send_gen++;
-+ send_gen = conn->c_send_gen;
-+
-+ /*
- * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
- * we do the opposite to avoid races.
- */
-@@ -202,6 +216,16 @@ restart:
- if (!rm) {
- unsigned int len;
-
-+ batch_count++;
-+
-+ /* we want to process as big a batch as we can, but
-+ * we also want to avoid softlockups. If we've been
-+ * through a lot of messages, lets back off and see
-+ * if anyone else jumps in
-+ */
-+ if (batch_count >= 1024)
-+ goto over_batch;
-+
- spin_lock_irqsave(&conn->c_lock, flags);
-
- if (!list_empty(&conn->c_send_queue)) {
-@@ -357,9 +381,9 @@ restart:
- }
- }
-
-+over_batch:
- if (conn->c_trans->xmit_complete)
- conn->c_trans->xmit_complete(conn);
--
- release_in_xmit(conn);
-
- /* Nuke any messages we decided not to retransmit. */
-@@ -380,10 +404,15 @@ restart:
- * If the transport cannot continue (i.e ret != 0), then it must
- * call us when more room is available, such as from the tx
- * completion handler.
-+ *
-+ * We have an extra generation check here so that if someone manages
-+ * to jump in after our release_in_xmit, we'll see that they have done
-+ * some work and we will skip our goto
- */
- if (ret == 0) {
- smp_mb();
-- if (!list_empty(&conn->c_send_queue)) {
-+ if (!list_empty(&conn->c_send_queue) &&
-+ send_gen == conn->c_send_gen) {
- rds_stats_inc(s_send_lock_queue_raced);
- goto restart;
- }
-@@ -593,8 +622,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
- sock_put(rds_rs_to_sk(rs));
- }
- rs = rm->m_rs;
-- sock_hold(rds_rs_to_sk(rs));
-+ if (rs)
-+ sock_hold(rds_rs_to_sk(rs));
- }
-+ if (!rs)
-+ goto unlock_and_drop;
- spin_lock(&rs->rs_lock);
-
- if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
-@@ -638,9 +670,6 @@ unlock_and_drop:
- * queue. This means that in the TCP case, the message may not have been
- * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
- * checks the RDS_MSG_HAS_ACK_SEQ bit.
-- *
-- * XXX It's not clear to me how this is safely serialized with socket
-- * destruction. Maybe it should bail if it sees SOCK_DEAD.
- */
- void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
- is_acked_func is_acked)
-@@ -711,6 +740,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
- */
- if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
- spin_unlock_irqrestore(&conn->c_lock, flags);
-+ spin_lock_irqsave(&rm->m_rs_lock, flags);
-+ rm->m_rs = NULL;
-+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
- continue;
- }
- list_del_init(&rm->m_conn_item);
-diff --git a/net/rds/tcp.c b/net/rds/tcp.c
-index edac9ef..16bcb98 100644
---- a/net/rds/tcp.c
-+++ b/net/rds/tcp.c
-@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
- int val = 1;
-
- set_fs(KERNEL_DS);
-- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
-+ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
- sizeof(val));
- set_fs(oldfs);
- }
-diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
-index 1b4fd68..2234175 100644
---- a/net/rds/tcp_send.c
-+++ b/net/rds/tcp_send.c
-@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
-+ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
- sizeof(val));
- set_fs(oldfs);
- }
-diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
-index 74c064c..fdec26f 100644
---- a/net/rxrpc/af_rxrpc.c
-+++ b/net/rxrpc/af_rxrpc.c
-@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
- __be32 rxrpc_epoch;
-
- /* current debugging ID */
--atomic_t rxrpc_debug_id;
-+atomic_unchecked_t rxrpc_debug_id;
-
- /* count of skbs currently in use */
- atomic_t rxrpc_n_skbs;
-diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
-index f99cfce..cc529dd 100644
---- a/net/rxrpc/ar-ack.c
-+++ b/net/rxrpc/ar-ack.c
-@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
-
- _enter("{%d,%d,%d,%d},",
- call->acks_hard, call->acks_unacked,
-- atomic_read(&call->sequence),
-+ atomic_read_unchecked(&call->sequence),
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
-
- stop = 0;
-@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
-
- /* each Tx packet has a new serial number */
- sp->hdr.serial =
-- htonl(atomic_inc_return(&call->conn->serial));
-+ htonl(atomic_inc_return_unchecked(&call->conn->serial));
-
- hdr = (struct rxrpc_header *) txb->head;
- hdr->serial = sp->hdr.serial;
-@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
- */
- static void rxrpc_clear_tx_window(struct rxrpc_call *call)
- {
-- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
-+ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
- }
-
- /*
-@@ -629,7 +629,7 @@ process_further:
-
- latest = ntohl(sp->hdr.serial);
- hard = ntohl(ack.firstPacket);
-- tx = atomic_read(&call->sequence);
-+ tx = atomic_read_unchecked(&call->sequence);
-
- _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- latest,
-@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
- goto maybe_reschedule;
-
- send_ACK_with_skew:
-- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
-+ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
- ntohl(ack.serial));
- send_ACK:
- mtu = call->conn->trans->peer->if_mtu;
-@@ -1173,7 +1173,7 @@ send_ACK:
- ackinfo.rxMTU = htonl(5692);
- ackinfo.jumbo_max = htonl(4);
-
-- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
-+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
- _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- ntohl(hdr.serial),
- ntohs(ack.maxSkew),
-@@ -1191,7 +1191,7 @@ send_ACK:
- send_message:
- _debug("send message");
-
-- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
-+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
- _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
- send_message_2:
-
-diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
-index bf656c2..48f9d27 100644
---- a/net/rxrpc/ar-call.c
-+++ b/net/rxrpc/ar-call.c
-@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
- spin_lock_init(&call->lock);
- rwlock_init(&call->state_lock);
- atomic_set(&call->usage, 1);
-- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
-+ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
-
- memset(&call->sock_node, 0xed, sizeof(call->sock_node));
-diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
-index 4106ca9..a338d7a 100644
---- a/net/rxrpc/ar-connection.c
-+++ b/net/rxrpc/ar-connection.c
-@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
- rwlock_init(&conn->lock);
- spin_lock_init(&conn->state_lock);
- atomic_set(&conn->usage, 1);
-- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
-+ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
- conn->avail_calls = RXRPC_MAXCALLS;
- conn->size_align = 4;
- conn->header_size = sizeof(struct rxrpc_header);
-diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
-index e7ed43a..6afa140 100644
---- a/net/rxrpc/ar-connevent.c
-+++ b/net/rxrpc/ar-connevent.c
-@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
-
- len = iov[0].iov_len + iov[1].iov_len;
-
-- hdr.serial = htonl(atomic_inc_return(&conn->serial));
-+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
- _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
-
- ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
-diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
-index 1a2b0633..e8d1382e 100644
---- a/net/rxrpc/ar-input.c
-+++ b/net/rxrpc/ar-input.c
-@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
- /* track the latest serial number on this connection for ACK packet
- * information */
- serial = ntohl(sp->hdr.serial);
-- hi_serial = atomic_read(&call->conn->hi_serial);
-+ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
- while (serial > hi_serial)
-- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
-+ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
- serial);
-
- /* request ACK generation for any ACK or DATA packet that requests
-diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
-index 8e22bd3..f66d1c0 100644
---- a/net/rxrpc/ar-internal.h
-+++ b/net/rxrpc/ar-internal.h
-@@ -272,8 +272,8 @@ struct rxrpc_connection {
- int error; /* error code for local abort */
- int debug_id; /* debug ID for printks */
- unsigned call_counter; /* call ID counter */
-- atomic_t serial; /* packet serial number counter */
-- atomic_t hi_serial; /* highest serial number received */
-+ atomic_unchecked_t serial; /* packet serial number counter */
-+ atomic_unchecked_t hi_serial; /* highest serial number received */
- u8 avail_calls; /* number of calls available */
- u8 size_align; /* data size alignment (for security) */
- u8 header_size; /* rxrpc + security header size */
-@@ -346,7 +346,7 @@ struct rxrpc_call {
- spinlock_t lock;
- rwlock_t state_lock; /* lock for state transition */
- atomic_t usage;
-- atomic_t sequence; /* Tx data packet sequence counter */
-+ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
- u32 abort_code; /* local/remote abort code */
- enum { /* current state of call */
- RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
-@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
- */
- extern atomic_t rxrpc_n_skbs;
- extern __be32 rxrpc_epoch;
--extern atomic_t rxrpc_debug_id;
-+extern atomic_unchecked_t rxrpc_debug_id;
- extern struct workqueue_struct *rxrpc_workqueue;
-
- /*
-diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
-index 43ea7de..eafaa2e 100644
---- a/net/rxrpc/ar-key.c
-+++ b/net/rxrpc/ar-key.c
-@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
- if (toklen <= (n_parts + 1) * 4)
- return -EINVAL;
-
-- princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL);
-+ princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
- if (!princ->name_parts)
- return -ENOMEM;
-
-@@ -356,7 +356,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
-
- _debug("n_elem %d", n_elem);
-
-- td = kcalloc(sizeof(struct krb5_tagged_data), n_elem,
-+ td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
- GFP_KERNEL);
- if (!td)
- return -ENOMEM;
-diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
-index 87f7135..74d3703 100644
---- a/net/rxrpc/ar-local.c
-+++ b/net/rxrpc/ar-local.c
-@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
- spin_lock_init(&local->lock);
- rwlock_init(&local->services_lock);
- atomic_set(&local->usage, 1);
-- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
-+ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
- memcpy(&local->srx, srx, sizeof(*srx));
- }
-
-diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
-index 338d793..47391d0 100644
---- a/net/rxrpc/ar-output.c
-+++ b/net/rxrpc/ar-output.c
-@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
- sp->hdr.cid = call->cid;
- sp->hdr.callNumber = call->call_id;
- sp->hdr.seq =
-- htonl(atomic_inc_return(&call->sequence));
-+ htonl(atomic_inc_return_unchecked(&call->sequence));
- sp->hdr.serial =
-- htonl(atomic_inc_return(&conn->serial));
-+ htonl(atomic_inc_return_unchecked(&conn->serial));
- sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
- sp->hdr.userStatus = 0;
- sp->hdr.securityIndex = conn->security_ix;
-diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
-index 2754f09..b20e38f 100644
---- a/net/rxrpc/ar-peer.c
-+++ b/net/rxrpc/ar-peer.c
-@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
- INIT_LIST_HEAD(&peer->error_targets);
- spin_lock_init(&peer->lock);
- atomic_set(&peer->usage, 1);
-- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
-+ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
- memcpy(&peer->srx, srx, sizeof(*srx));
-
- rxrpc_assess_MTU_size(peer);
-diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
-index 38047f7..9f48511 100644
---- a/net/rxrpc/ar-proc.c
-+++ b/net/rxrpc/ar-proc.c
-@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
- atomic_read(&conn->usage),
- rxrpc_conn_states[conn->state],
- key_serial(conn->key),
-- atomic_read(&conn->serial),
-- atomic_read(&conn->hi_serial));
-+ atomic_read_unchecked(&conn->serial),
-+ atomic_read_unchecked(&conn->hi_serial));
-
- return 0;
- }
-diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
-index 92df566..87ec1bf 100644
---- a/net/rxrpc/ar-transport.c
-+++ b/net/rxrpc/ar-transport.c
-@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
- spin_lock_init(&trans->client_lock);
- rwlock_init(&trans->conn_lock);
- atomic_set(&trans->usage, 1);
-- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
-+ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
-
- if (peer->srx.transport.family == AF_INET) {
- switch (peer->srx.transport_type) {
-diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
-index 7635107..4670276 100644
---- a/net/rxrpc/rxkad.c
-+++ b/net/rxrpc/rxkad.c
-@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
-
- len = iov[0].iov_len + iov[1].iov_len;
-
-- hdr.serial = htonl(atomic_inc_return(&conn->serial));
-+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
- _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
-
- ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
-@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
-
- len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
-
-- hdr->serial = htonl(atomic_inc_return(&conn->serial));
-+ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
- _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
-
- ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
-diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
-index 0b6a391..febcef2 100644
---- a/net/sctp/ipv6.c
-+++ b/net/sctp/ipv6.c
-@@ -961,7 +961,7 @@ static const struct inet6_protocol sctpv6_protocol = {
- .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
- };
-
--static struct sctp_af sctp_af_inet6 = {
-+static struct sctp_af sctp_af_inet6 __read_only = {
- .sa_family = AF_INET6,
- .sctp_xmit = sctp_v6_xmit,
- .setsockopt = ipv6_setsockopt,
-@@ -993,7 +993,7 @@ static struct sctp_af sctp_af_inet6 = {
- #endif
- };
-
--static struct sctp_pf sctp_pf_inet6 = {
-+static struct sctp_pf sctp_pf_inet6 __read_only = {
- .event_msgname = sctp_inet6_event_msgname,
- .skb_msgname = sctp_inet6_skb_msgname,
- .af_supported = sctp_inet6_af_supported,
-@@ -1018,7 +1018,7 @@ void sctp_v6_pf_init(void)
-
- void sctp_v6_pf_exit(void)
- {
-- list_del(&sctp_af_inet6.list);
-+ pax_list_del(&sctp_af_inet6.list);
- }
-
- /* Initialize IPv6 support and register with socket layer. */
-diff --git a/net/sctp/probe.c b/net/sctp/probe.c
-index bc6cd75..749e4eb 100644
---- a/net/sctp/probe.c
-+++ b/net/sctp/probe.c
-@@ -63,7 +63,7 @@ static struct {
- struct timespec tstart;
- } sctpw;
-
--static void printl(const char *fmt, ...)
-+static __printf(1, 2) void printl(const char *fmt, ...)
- {
- va_list args;
- int len;
-diff --git a/net/sctp/proc.c b/net/sctp/proc.c
-index 1e2eee8..ce3967e 100644
---- a/net/sctp/proc.c
-+++ b/net/sctp/proc.c
-@@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
- seq_printf(seq,
- "%8pK %8pK %-3d %-3d %-2d %-4d "
- "%4d %8d %8d %7d %5lu %-5d %5d ",
-- assoc, sk, sctp_sk(sk)->type, sk->sk_state,
-+ assoc, sk,
-+ sctp_sk(sk)->type, sk->sk_state,
- assoc->state, hash,
- assoc->assoc_id,
- assoc->sndbuf_used,
-diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
-index de35e01..ef925b0 100644
---- a/net/sctp/protocol.c
-+++ b/net/sctp/protocol.c
-@@ -109,7 +109,7 @@ static __init int sctp_proc_init(void)
- goto out_nomem;
- #ifdef CONFIG_PROC_FS
- if (!proc_net_sctp) {
-- proc_net_sctp = proc_mkdir("sctp", init_net.proc_net);
-+ proc_net_sctp = proc_mkdir_restrict("sctp", init_net.proc_net);
- if (!proc_net_sctp)
- goto out_free_percpu;
- }
-@@ -867,8 +867,10 @@ int sctp_register_af(struct sctp_af *af)
- return 0;
- }
-
-+ pax_open_kernel();
- INIT_LIST_HEAD(&af->list);
-- list_add_tail(&af->list, &sctp_address_families);
-+ pax_close_kernel();
-+ pax_list_add_tail(&af->list, &sctp_address_families);
- return 1;
- }
-
-@@ -999,7 +1001,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
-
- static struct sctp_af sctp_af_inet;
-
--static struct sctp_pf sctp_pf_inet = {
-+static struct sctp_pf sctp_pf_inet __read_only = {
- .event_msgname = sctp_inet_event_msgname,
- .skb_msgname = sctp_inet_skb_msgname,
- .af_supported = sctp_inet_af_supported,
-@@ -1069,7 +1071,7 @@ static const struct net_protocol sctp_protocol = {
- };
-
- /* IPv4 address related functions. */
--static struct sctp_af sctp_af_inet = {
-+static struct sctp_af sctp_af_inet __read_only = {
- .sa_family = AF_INET,
- .sctp_xmit = sctp_v4_xmit,
- .setsockopt = ip_setsockopt,
-@@ -1154,7 +1156,7 @@ static void sctp_v4_pf_init(void)
-
- static void sctp_v4_pf_exit(void)
- {
-- list_del(&sctp_af_inet.list);
-+ pax_list_del(&sctp_af_inet.list);
- }
-
- static int sctp_v4_protosw_init(void)
-diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
-index 76388b0..a967f68 100644
---- a/net/sctp/sm_sideeffect.c
-+++ b/net/sctp/sm_sideeffect.c
-@@ -441,7 +441,7 @@ static void sctp_generate_sack_event(unsigned long data)
- sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
- }
-
--sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
-+sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
- NULL,
- sctp_generate_t1_cookie_event,
- sctp_generate_t1_init_event,
-diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index fc63664..832978a 100644
---- a/net/sctp/socket.c
-+++ b/net/sctp/socket.c
-@@ -2190,11 +2190,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
- {
- struct sctp_association *asoc;
- struct sctp_ulpevent *event;
-+ struct sctp_event_subscribe subscribe;
-
- if (optlen > sizeof(struct sctp_event_subscribe))
- return -EINVAL;
-- if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
-+ if (copy_from_user(&subscribe, optval, optlen))
- return -EFAULT;
-+ sctp_sk(sk)->subscribe = subscribe;
-
- /*
- * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
-@@ -4180,13 +4182,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
- static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
- int __user *optlen)
- {
-+ struct sctp_event_subscribe subscribe;
-+
- if (len <= 0)
- return -EINVAL;
- if (len > sizeof(struct sctp_event_subscribe))
- len = sizeof(struct sctp_event_subscribe);
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
-+ subscribe = sctp_sk(sk)->subscribe;
-+ if (copy_to_user(optval, &subscribe, len))
- return -EFAULT;
- return 0;
- }
-@@ -4204,6 +4209,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
- */
- static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
- {
-+ __u32 autoclose;
-+
- /* Applicable to UDP-style socket only */
- if (sctp_style(sk, TCP))
- return -EOPNOTSUPP;
-@@ -4212,7 +4219,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
- len = sizeof(int);
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
-+ autoclose = sctp_sk(sk)->autoclose;
-+ if (copy_to_user(optval, &autoclose, sizeof(int)))
- return -EFAULT;
- return 0;
- }
-@@ -4576,12 +4584,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
- */
- static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
- {
-+ struct sctp_initmsg initmsg;
-+
- if (len < sizeof(struct sctp_initmsg))
- return -EINVAL;
- len = sizeof(struct sctp_initmsg);
- if (put_user(len, optlen))
- return -EFAULT;
-- if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
-+ initmsg = sctp_sk(sk)->initmsg;
-+ if (copy_to_user(optval, &initmsg, len))
- return -EFAULT;
- return 0;
- }
-@@ -4622,6 +4633,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
- addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- if (space_left < addrlen)
- return -ENOMEM;
-+ if (addrlen > sizeof(temp) || addrlen < 0)
-+ return -EFAULT;
- if (copy_to_user(to, &temp, addrlen))
- return -EFAULT;
- to += addrlen;
-diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
-index 442ad4e..825ea94 100644
---- a/net/sctp/ssnmap.c
-+++ b/net/sctp/ssnmap.c
-@@ -41,8 +41,6 @@
- #include <net/sctp/sctp.h>
- #include <net/sctp/sm.h>
-
--#define MAX_KMALLOC_SIZE 131072
--
- static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
- __u16 out);
-
-@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
- int size;
-
- size = sctp_ssnmap_size(in, out);
-- if (size <= MAX_KMALLOC_SIZE)
-+ if (size <= KMALLOC_MAX_SIZE)
- retval = kmalloc(size, gfp);
- else
- retval = (struct sctp_ssnmap *)
-@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
- return retval;
-
- fail_map:
-- if (size <= MAX_KMALLOC_SIZE)
-+ if (size <= KMALLOC_MAX_SIZE)
- kfree(retval);
- else
- free_pages((unsigned long)retval, get_order(size));
-@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
- int size;
-
- size = sctp_ssnmap_size(map->in.len, map->out.len);
-- if (size <= MAX_KMALLOC_SIZE)
-+ if (size <= KMALLOC_MAX_SIZE)
- kfree(map);
- else
- free_pages((unsigned long)map, get_order(size));
-diff --git a/net/sctp/transport.c b/net/sctp/transport.c
-index 8da4481..d02565e 100644
---- a/net/sctp/transport.c
-+++ b/net/sctp/transport.c
-@@ -317,7 +317,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
- * 1/8, rto_alpha would be expressed as 3.
- */
- tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
-- + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
-+ + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> sctp_rto_beta);
- tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
- + (rtt >> sctp_rto_alpha);
- } else {
-diff --git a/net/socket.c b/net/socket.c
-index 116cf9d..a13ae17 100644
---- a/net/socket.c
-+++ b/net/socket.c
-@@ -88,6 +88,7 @@
- #include <linux/nsproxy.h>
- #include <linux/magic.h>
- #include <linux/slab.h>
-+#include <linux/in.h>
-
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
-@@ -105,6 +106,8 @@
- #include <linux/sockios.h>
- #include <linux/atalk.h>
-
-+#include <linux/grsock.h>
-+
- static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
- static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos);
-@@ -156,7 +159,7 @@ static const struct file_operations socket_file_ops = {
- */
-
- static DEFINE_SPINLOCK(net_family_lock);
--static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
-+const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
-
- /*
- * Statistics counters of the socket lists
-@@ -322,7 +325,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
- &sockfs_dentry_operations, SOCKFS_MAGIC);
- }
-
--static struct vfsmount *sock_mnt __read_mostly;
-+struct vfsmount *sock_mnt __read_mostly;
-
- static struct file_system_type sock_fs_type = {
- .name = "sockfs",
-@@ -1188,6 +1191,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
- return -EAFNOSUPPORT;
- if (type < 0 || type >= SOCK_MAX)
- return -EINVAL;
-+ if (protocol < 0)
-+ return -EINVAL;
-
- /* Compatibility.
-
-@@ -1208,6 +1213,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
- if (err)
- return err;
-
-+ if(!kern && !gr_search_socket(family, type, protocol)) {
-+ if (rcu_access_pointer(net_families[family]) == NULL)
-+ return -EAFNOSUPPORT;
-+ else
-+ return -EACCES;
-+ }
-+
-+ if (!kern && gr_handle_sock_all(family, type, protocol)) {
-+ if (rcu_access_pointer(net_families[family]) == NULL)
-+ return -EAFNOSUPPORT;
-+ else
-+ return -EACCES;
-+ }
-+
- /*
- * Allocate the socket and allow the family to set things up. if
- * the protocol is 0, the family is instructed to select an appropriate
-@@ -1432,6 +1451,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
- if (sock) {
- err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
- if (err >= 0) {
-+ if (gr_handle_sock_server((struct sockaddr *)&address)) {
-+ err = -EACCES;
-+ goto error;
-+ }
-+ err = gr_search_bind(sock, (struct sockaddr_in *)&address);
-+ if (err)
-+ goto error;
-+
- err = security_socket_bind(sock,
- (struct sockaddr *)&address,
- addrlen);
-@@ -1440,6 +1467,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
- (struct sockaddr *)
- &address, addrlen);
- }
-+error:
- fput_light(sock->file, fput_needed);
- }
- return err;
-@@ -1463,10 +1491,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
- if ((unsigned)backlog > somaxconn)
- backlog = somaxconn;
-
-+ if (gr_handle_sock_server_other(sock->sk)) {
-+ err = -EPERM;
-+ goto error;
-+ }
-+
-+ err = gr_search_listen(sock);
-+ if (err)
-+ goto error;
-+
- err = security_socket_listen(sock, backlog);
- if (!err)
- err = sock->ops->listen(sock, backlog);
-
-+error:
- fput_light(sock->file, fput_needed);
- }
- return err;
-@@ -1510,6 +1548,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
- newsock->type = sock->type;
- newsock->ops = sock->ops;
-
-+ if (gr_handle_sock_server_other(sock->sk)) {
-+ err = -EPERM;
-+ sock_release(newsock);
-+ goto out_put;
-+ }
-+
-+ err = gr_search_accept(sock);
-+ if (err) {
-+ sock_release(newsock);
-+ goto out_put;
-+ }
-+
- /*
- * We don't need try_module_get here, as the listening socket (sock)
- * has the protocol module (sock->ops->owner) held.
-@@ -1548,6 +1598,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
- fd_install(newfd, newfile);
- err = newfd;
-
-+ gr_attach_curr_ip(newsock->sk);
-+
- out_put:
- fput_light(sock->file, fput_needed);
- out:
-@@ -1580,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
- int, addrlen)
- {
- struct socket *sock;
-+ struct sockaddr *sck;
- struct sockaddr_storage address;
- int err, fput_needed;
-
-@@ -1590,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
- if (err < 0)
- goto out_put;
-
-+ sck = (struct sockaddr *)&address;
-+
-+ if (gr_handle_sock_client(sck)) {
-+ err = -EACCES;
-+ goto out_put;
-+ }
-+
-+ err = gr_search_connect(sock, (struct sockaddr_in *)sck);
-+ if (err)
-+ goto out_put;
-+
- err =
- security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
- if (err)
-@@ -1671,6 +1735,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
- * the protocol.
- */
-
-+asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int);
-+
- SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
- unsigned, flags, struct sockaddr __user *, addr,
- int, addr_len)
-@@ -1737,7 +1803,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
- struct socket *sock;
- struct iovec iov;
- struct msghdr msg;
-- struct sockaddr_storage address;
-+ struct sockaddr_storage address = { };
- int err, err2;
- int fput_needed;
-
-@@ -1973,7 +2039,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
- * checking falls down on this.
- */
- if (copy_from_user(ctl_buf,
-- (void __user __force *)msg_sys->msg_control,
-+ (void __force_user *)msg_sys->msg_control,
- ctl_len))
- goto out_freectl;
- msg_sys->msg_control = ctl_buf;
-@@ -2124,7 +2190,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
- int err, iov_size, total_len, len;
-
- /* kernel mode address */
-- struct sockaddr_storage addr;
-+ struct sockaddr_storage addr = { };
-
- /* user mode address pointers */
- struct sockaddr __user *uaddr;
-@@ -2155,7 +2221,8 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
- /* Save the user-mode address (verify_iovec will change the
- * kernel msghdr to use the kernel address space)
- */
-- uaddr = (__force void __user *)msg_sys->msg_name;
-+
-+ uaddr = (void __force_user *)msg_sys->msg_name;
- uaddr_len = COMPAT_NAMELEN(msg);
- if (MSG_CMSG_COMPAT & flags)
- err = verify_compat_iovec(msg_sys, iov,
-@@ -2799,9 +2866,9 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
- }
-
- ifr = compat_alloc_user_space(buf_size);
-- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
-+ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
-
-- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
-+ if (copy_in_user(ifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
- return -EFAULT;
-
- if (put_user(convert_in ? rxnfc : compat_ptr(data),
-@@ -2823,12 +2890,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
- offsetof(struct ethtool_rxnfc, fs.ring_cookie));
-
- if (copy_in_user(rxnfc, compat_rxnfc,
-- (void *)(&rxnfc->fs.m_ext + 1) -
-- (void *)rxnfc) ||
-+ (void __user *)(&rxnfc->fs.m_ext + 1) -
-+ (void __user *)rxnfc) ||
- copy_in_user(&rxnfc->fs.ring_cookie,
- &compat_rxnfc->fs.ring_cookie,
-- (void *)(&rxnfc->fs.location + 1) -
-- (void *)&rxnfc->fs.ring_cookie) ||
-+ (void __user *)(&rxnfc->fs.location + 1) -
-+ (void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
- return -EFAULT;
-@@ -2840,12 +2907,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
-
- if (convert_out) {
- if (copy_in_user(compat_rxnfc, rxnfc,
-- (const void *)(&rxnfc->fs.m_ext + 1) -
-- (const void *)rxnfc) ||
-+ (const void __user *)(&rxnfc->fs.m_ext + 1) -
-+ (const void __user *)rxnfc) ||
- copy_in_user(&compat_rxnfc->fs.ring_cookie,
- &rxnfc->fs.ring_cookie,
-- (const void *)(&rxnfc->fs.location + 1) -
-- (const void *)&rxnfc->fs.ring_cookie) ||
-+ (const void __user *)(&rxnfc->fs.location + 1) -
-+ (const void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
- return -EFAULT;
-@@ -2915,14 +2982,14 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = dev_ioctl(net, cmd,
-- (struct ifreq __user __force *) &kifr);
-+ (struct ifreq __force_user *) &kifr);
- set_fs(old_fs);
-
- return err;
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
- uifr = compat_alloc_user_space(sizeof(*uifr));
-- if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
-+ if (copy_in_user(uifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
- return -EFAULT;
-
- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
-@@ -3024,7 +3091,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
-+ err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
- set_fs(old_fs);
-
- if (cmd == SIOCGIFMAP && !err) {
-@@ -3129,7 +3196,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
- ret |= __get_user(rtdev, &(ur4->rt_dev));
- if (rtdev) {
- ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
-- r4.rt_dev = (char __user __force *)devname;
-+ r4.rt_dev = (char __force_user *)devname;
- devname[15] = 0;
- } else
- r4.rt_dev = NULL;
-@@ -3369,8 +3436,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
- int __user *uoptlen;
- int err;
-
-- uoptval = (char __user __force *) optval;
-- uoptlen = (int __user __force *) optlen;
-+ uoptval = (char __force_user *) optval;
-+ uoptlen = (int __force_user *) optlen;
-
- set_fs(KERNEL_DS);
- if (level == SOL_SOCKET)
-@@ -3390,7 +3457,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
- char __user *uoptval;
- int err;
-
-- uoptval = (char __user __force *) optval;
-+ uoptval = (char __force_user *) optval;
-
- set_fs(KERNEL_DS);
- if (level == SOL_SOCKET)
-diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
-index 7de935a..9e69f3e 100644
---- a/net/sunrpc/cache.c
-+++ b/net/sunrpc/cache.c
-@@ -1587,7 +1587,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
- struct sunrpc_net *sn;
-
- sn = net_generic(net, sunrpc_net_id);
-- cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
-+ cd->u.procfs.proc_ent = proc_mkdir_restrict(cd->name, sn->proc_net_rpc);
- if (cd->u.procfs.proc_ent == NULL)
- goto out_nomem;
- cd->u.procfs.channel_ent = NULL;
-diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index a0e55e5..2680674 100644
---- a/net/sunrpc/clnt.c
-+++ b/net/sunrpc/clnt.c
-@@ -163,10 +163,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
- err = rpciod_up();
- if (err)
- goto out_no_rpciod;
-- err = -EINVAL;
-- if (!xprt)
-- goto out_no_xprt;
-
-+ err = -EINVAL;
- if (args->version >= program->nrvers)
- goto out_err;
- version = program->version[args->version];
-@@ -259,10 +257,9 @@ out_no_stats:
- kfree(clnt->cl_server);
- kfree(clnt);
- out_err:
-- xprt_put(xprt);
--out_no_xprt:
- rpciod_down();
- out_no_rpciod:
-+ xprt_put(xprt);
- return ERR_PTR(err);
- }
-
-@@ -903,7 +900,9 @@ call_start(struct rpc_task *task)
- (RPC_IS_ASYNC(task) ? "async" : "sync"));
-
- /* Increment call count */
-- task->tk_msg.rpc_proc->p_count++;
-+ pax_open_kernel();
-+ (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
-+ pax_close_kernel();
- clnt->cl_stats->rpccnt++;
- task->tk_action = call_reserve;
- }
-diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
-index bfddd68..567429b 100644
---- a/net/sunrpc/rpc_pipe.c
-+++ b/net/sunrpc/rpc_pipe.c
-@@ -1059,6 +1059,8 @@ static struct file_system_type rpc_pipe_fs_type = {
- .mount = rpc_mount,
- .kill_sb = kill_litter_super,
- };
-+MODULE_ALIAS_FS("rpc_pipefs");
-+MODULE_ALIAS("rpc_pipefs");
-
- static void
- init_once(void *foo)
-@@ -1104,6 +1106,3 @@ void unregister_rpc_pipefs(void)
- kmem_cache_destroy(rpc_inode_cachep);
- unregister_filesystem(&rpc_pipe_fs_type);
- }
--
--/* Make 'mount -t rpc_pipefs ...' autoload this module. */
--MODULE_ALIAS("rpc_pipefs");
-diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
-index 206c61e..e3641fb 100644
---- a/net/sunrpc/sched.c
-+++ b/net/sunrpc/sched.c
-@@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
- #ifdef RPC_DEBUG
- static void rpc_task_set_debuginfo(struct rpc_task *task)
- {
-- static atomic_t rpc_pid;
-+ static atomic_unchecked_t rpc_pid;
-
-- task->tk_pid = atomic_inc_return(&rpc_pid);
-+ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
- }
- #else
- static inline void rpc_task_set_debuginfo(struct rpc_task *task)
-diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
-index 80df89d..2056196 100644
---- a/net/sunrpc/stats.c
-+++ b/net/sunrpc/stats.c
-@@ -262,7 +262,7 @@ int rpc_proc_init(struct net *net)
-
- dprintk("RPC: registering /proc/net/rpc\n");
- sn = net_generic(net, sunrpc_net_id);
-- sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
-+ sn->proc_net_rpc = proc_mkdir_restrict("rpc", net->proc_net);
- if (sn->proc_net_rpc == NULL)
- return -ENOMEM;
-
-diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
-index c80c162..83a1e28 100644
---- a/net/sunrpc/svc.c
-+++ b/net/sunrpc/svc.c
-@@ -732,7 +732,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
-
- __module_get(serv->sv_module);
- task = kthread_create_on_node(serv->sv_function, rqstp,
-- node, serv->sv_name);
-+ node, "%s", serv->sv_name);
- if (IS_ERR(task)) {
- error = PTR_ERR(task);
- module_put(serv->sv_module);
-@@ -1145,7 +1145,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
- svc_putnl(resv, RPC_SUCCESS);
-
- /* Bump per-procedure stats counter */
-- procp->pc_count++;
-+ pax_open_kernel();
-+ (*(unsigned int *)&procp->pc_count)++;
-+ pax_close_kernel();
-
- /* Initialize storage for argp and resp */
- memset(rqstp->rq_argp, 0, procp->pc_argsize);
-diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
-index ce13632..144987d 100644
---- a/net/sunrpc/svcauth_unix.c
-+++ b/net/sunrpc/svcauth_unix.c
-@@ -602,7 +602,7 @@ struct cache_detail unix_gid_cache = {
- .alloc = unix_gid_alloc,
- };
-
--static struct unix_gid *unix_gid_lookup(uid_t uid)
-+static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(uid_t uid)
- {
- struct unix_gid ug;
- struct cache_head *ch;
-diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
-index 5e3080c..df0e1a9 100644
---- a/net/sunrpc/svcsock.c
-+++ b/net/sunrpc/svcsock.c
-@@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
- int buflen, unsigned int base)
- {
- size_t save_iovlen;
-- void __user *save_iovbase;
-+ void *save_iovbase;
- unsigned int i;
- int ret;
-
-diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
-index 09af4fa..677025e 100644
---- a/net/sunrpc/xprtrdma/svc_rdma.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma.c
-@@ -47,6 +47,7 @@
- #include <linux/sunrpc/clnt.h>
- #include <linux/sunrpc/sched.h>
- #include <linux/sunrpc/svc_rdma.h>
-+#include "xprt_rdma.h"
-
- #define RPCDBG_FACILITY RPCDBG_SVCXPRT
-
-@@ -61,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
- static unsigned int min_max_inline = 4096;
- static unsigned int max_max_inline = 65536;
-
--atomic_t rdma_stat_recv;
--atomic_t rdma_stat_read;
--atomic_t rdma_stat_write;
--atomic_t rdma_stat_sq_starve;
--atomic_t rdma_stat_rq_starve;
--atomic_t rdma_stat_rq_poll;
--atomic_t rdma_stat_rq_prod;
--atomic_t rdma_stat_sq_poll;
--atomic_t rdma_stat_sq_prod;
-+atomic_unchecked_t rdma_stat_recv;
-+atomic_unchecked_t rdma_stat_read;
-+atomic_unchecked_t rdma_stat_write;
-+atomic_unchecked_t rdma_stat_sq_starve;
-+atomic_unchecked_t rdma_stat_rq_starve;
-+atomic_unchecked_t rdma_stat_rq_poll;
-+atomic_unchecked_t rdma_stat_rq_prod;
-+atomic_unchecked_t rdma_stat_sq_poll;
-+atomic_unchecked_t rdma_stat_sq_prod;
-
- /* Temporary NFS request map and context caches */
- struct kmem_cache *svc_rdma_map_cachep;
-@@ -109,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
- len -= *ppos;
- if (len > *lenp)
- len = *lenp;
-- if (len && copy_to_user(buffer, str_buf, len))
-+ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
- return -EFAULT;
- *lenp = len;
- *ppos += len;
-@@ -150,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
- {
- .procname = "rdma_stat_read",
- .data = &rdma_stat_read,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_recv",
- .data = &rdma_stat_recv,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_write",
- .data = &rdma_stat_write,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_sq_starve",
- .data = &rdma_stat_sq_starve,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_rq_starve",
- .data = &rdma_stat_rq_starve,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_rq_poll",
- .data = &rdma_stat_rq_poll,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_rq_prod",
- .data = &rdma_stat_rq_prod,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_sq_poll",
- .data = &rdma_stat_sq_poll,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
- {
- .procname = "rdma_stat_sq_prod",
- .data = &rdma_stat_sq_prod,
-- .maxlen = sizeof(atomic_t),
-+ .maxlen = sizeof(atomic_unchecked_t),
- .mode = 0644,
- .proc_handler = read_reset_stat,
- },
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-index 9530ef2..65b1462 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
- struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
-
- while (ch->rc_discrim != xdr_zero) {
-- u64 ch_offset;
--
- if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
- (unsigned long)vaend) {
- dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
- return NULL;
- }
--
-- ch->rc_discrim = ntohl(ch->rc_discrim);
-- ch->rc_position = ntohl(ch->rc_position);
-- ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
-- ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
-- va = (u32 *)&ch->rc_target.rs_offset;
-- xdr_decode_hyper(va, &ch_offset);
-- put_unaligned(ch_offset, (u64 *)va);
- ch++;
- }
- return (u32 *)&ch->rc_position;
-@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
- *byte_count = 0;
- *ch_count = 0;
- for (; ch->rc_discrim != 0; ch++) {
-- *byte_count = *byte_count + ch->rc_target.rs_length;
-+ *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
- *ch_count = *ch_count + 1;
- }
- }
-@@ -108,7 +98,9 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
- */
- static u32 *decode_write_list(u32 *va, u32 *vaend)
- {
-- int ch_no;
-+ unsigned long start, end;
-+ int nchunks;
-+
- struct rpcrdma_write_array *ary =
- (struct rpcrdma_write_array *)va;
-
-@@ -121,37 +113,28 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
- dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
- return NULL;
- }
-- ary->wc_discrim = ntohl(ary->wc_discrim);
-- ary->wc_nchunks = ntohl(ary->wc_nchunks);
-- if (((unsigned long)&ary->wc_array[0] +
-- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
-- (unsigned long)vaend) {
-+ nchunks = ntohl(ary->wc_nchunks);
-+
-+ start = (unsigned long)&ary->wc_array[0];
-+ end = (unsigned long)vaend;
-+ if (nchunks < 0 ||
-+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
-- ary, ary->wc_nchunks, vaend);
-+ ary, nchunks, vaend);
- return NULL;
- }
-- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
-- u64 ch_offset;
--
-- ary->wc_array[ch_no].wc_target.rs_handle =
-- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
-- ary->wc_array[ch_no].wc_target.rs_length =
-- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
-- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
-- xdr_decode_hyper(va, &ch_offset);
-- put_unaligned(ch_offset, (u64 *)va);
-- }
--
- /*
- * rs_length is the 2nd 4B field in wc_target and taking its
- * address skips the list terminator
- */
-- return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length;
-+ return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
- }
-
- static u32 *decode_reply_array(u32 *va, u32 *vaend)
- {
-- int ch_no;
-+ unsigned long start, end;
-+ int nchunks;
- struct rpcrdma_write_array *ary =
- (struct rpcrdma_write_array *)va;
-
-@@ -164,28 +147,18 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
- dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
- return NULL;
- }
-- ary->wc_discrim = ntohl(ary->wc_discrim);
-- ary->wc_nchunks = ntohl(ary->wc_nchunks);
-- if (((unsigned long)&ary->wc_array[0] +
-- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
-- (unsigned long)vaend) {
-+ nchunks = ntohl(ary->wc_nchunks);
-+
-+ start = (unsigned long)&ary->wc_array[0];
-+ end = (unsigned long)vaend;
-+ if (nchunks < 0 ||
-+ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
-- ary, ary->wc_nchunks, vaend);
-+ ary, nchunks, vaend);
- return NULL;
- }
-- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
-- u64 ch_offset;
--
-- ary->wc_array[ch_no].wc_target.rs_handle =
-- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
-- ary->wc_array[ch_no].wc_target.rs_length =
-- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
-- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
-- xdr_decode_hyper(va, &ch_offset);
-- put_unaligned(ch_offset, (u64 *)va);
-- }
--
-- return (u32 *)&ary->wc_array[ch_no];
-+ return (u32 *)&ary->wc_array[nchunks];
- }
-
- int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
-@@ -386,13 +359,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
-
- void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
- int chunk_no,
-- u32 rs_handle, u64 rs_offset,
-+ __be32 rs_handle,
-+ __be64 rs_offset,
- u32 write_len)
- {
- struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
-- seg->rs_handle = htonl(rs_handle);
-+ seg->rs_handle = rs_handle;
-+ seg->rs_offset = rs_offset;
- seg->rs_length = htonl(write_len);
-- xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
- }
-
- void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-index df67211..c4a1489 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
- page_off = 0;
- ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
- ch_no = 0;
-- ch_bytes = ch->rc_target.rs_length;
-+ ch_bytes = ntohl(ch->rc_target.rs_length);
- head->arg.head[0] = rqstp->rq_arg.head[0];
- head->arg.tail[0] = rqstp->rq_arg.tail[0];
- head->arg.pages = &head->pages[head->count];
-@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
- ch_no++;
- ch++;
- chl_map->ch[ch_no].start = sge_no;
-- ch_bytes = ch->rc_target.rs_length;
-+ ch_bytes = ntohl(ch->rc_target.rs_length);
- /* If bytes remaining account for next chunk */
- if (byte_count) {
- head->arg.page_len += ch_bytes;
-@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
- offset = 0;
- ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
- for (ch_no = 0; ch_no < ch_count; ch_no++) {
-+ int len = ntohl(ch->rc_target.rs_length);
- rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
-- rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length;
-+ rpl_map->sge[ch_no].iov_len = len;
- chl_map->ch[ch_no].count = 1;
- chl_map->ch[ch_no].start = ch_no;
-- offset += ch->rc_target.rs_length;
-+ offset += len;
- ch++;
- }
-
-@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
- for (i = 0; i < count; i++) {
- ctxt->sge[i].length = 0; /* in case map fails */
- if (!frmr) {
-- BUG_ON(0 == virt_to_page(vec[i].iov_base));
-+ BUG_ON(!virt_to_page(vec[i].iov_base));
- off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
- ctxt->sge[i].addr =
- ib_dma_map_page(xprt->sc_cm_id->device,
-@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
-
- for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
- ch->rc_discrim != 0; ch++, ch_no++) {
-+ u64 rs_offset;
- next_sge:
- ctxt = svc_rdma_get_context(xprt);
- ctxt->direction = DMA_FROM_DEVICE;
-@@ -440,10 +442,10 @@ next_sge:
- read_wr.opcode = IB_WR_RDMA_READ;
- ctxt->wr_op = read_wr.opcode;
- read_wr.send_flags = IB_SEND_SIGNALED;
-- read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
-- read_wr.wr.rdma.remote_addr =
-- get_unaligned(&(ch->rc_target.rs_offset)) +
-- sgl_offset;
-+ read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
-+ xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
-+ &rs_offset);
-+ read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
- read_wr.sg_list = ctxt->sge;
- read_wr.num_sge =
- rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
-@@ -499,7 +501,7 @@ next_sge:
- svc_rdma_put_context(ctxt, 0);
- goto out;
- }
-- atomic_inc(&rdma_stat_read);
-+ atomic_inc_unchecked(&rdma_stat_read);
-
- if (read_wr.num_sge < chl_map->ch[ch_no].count) {
- chl_map->ch[ch_no].count -= read_wr.num_sge;
-@@ -609,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
- dto_q);
- list_del_init(&ctxt->dto_q);
- } else {
-- atomic_inc(&rdma_stat_rq_starve);
-+ atomic_inc_unchecked(&rdma_stat_rq_starve);
- clear_bit(XPT_DATA, &xprt->xpt_flags);
- ctxt = NULL;
- }
-@@ -629,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
- dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
- ctxt, rdma_xprt, rqstp, ctxt->wc_status);
- BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
-- atomic_inc(&rdma_stat_recv);
-+ atomic_inc_unchecked(&rdma_stat_recv);
-
- /* Build up the XDR from the receive buffers. */
- rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
-index 249a835..c887c45 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
-@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
- write_wr.wr.rdma.remote_addr = to;
-
- /* Post It */
-- atomic_inc(&rdma_stat_write);
-+ atomic_inc_unchecked(&rdma_stat_write);
- if (svc_rdma_send(xprt, &write_wr))
- goto err;
- return 0;
-@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
- u64 rs_offset;
-
- arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
-- write_len = min(xfer_len, arg_ch->rs_length);
-+ write_len = min(xfer_len, ntohl(arg_ch->rs_length));
-
- /* Prepare the response chunk given the length actually
- * written */
-- rs_offset = get_unaligned(&(arg_ch->rs_offset));
-+ xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
- svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
-- arg_ch->rs_handle,
-- rs_offset,
-- write_len);
-+ arg_ch->rs_handle,
-+ arg_ch->rs_offset,
-+ write_len);
- chunk_off = 0;
- while (write_len) {
- int this_write;
- this_write = min(write_len, max_write);
- ret = send_write(xprt, rqstp,
-- arg_ch->rs_handle,
-+ ntohl(arg_ch->rs_handle),
- rs_offset + chunk_off,
- xdr_off,
- this_write,
-@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
- u32 xdr_off;
- int chunk_no;
- int chunk_off;
-+ int nchunks;
- struct rpcrdma_segment *ch;
- struct rpcrdma_write_array *arg_ary;
- struct rpcrdma_write_array *res_ary;
-@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
- max_write = xprt->sc_max_sge * PAGE_SIZE;
-
- /* xdr offset starts at RPC message */
-+ nchunks = ntohl(arg_ary->wc_nchunks);
- for (xdr_off = 0, chunk_no = 0;
-- xfer_len && chunk_no < arg_ary->wc_nchunks;
-+ xfer_len && chunk_no < nchunks;
- chunk_no++) {
- u64 rs_offset;
- ch = &arg_ary->wc_array[chunk_no].wc_target;
-- write_len = min(xfer_len, ch->rs_length);
-+ write_len = min(xfer_len, htonl(ch->rs_length));
-
- /* Prepare the reply chunk given the length actually
- * written */
-- rs_offset = get_unaligned(&(ch->rs_offset));
-+ xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
- svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
-- ch->rs_handle, rs_offset,
-- write_len);
-+ ch->rs_handle, ch->rs_offset,
-+ write_len);
- chunk_off = 0;
- while (write_len) {
- int this_write;
-
- this_write = min(write_len, max_write);
- ret = send_write(xprt, rqstp,
-- ch->rs_handle,
-+ ntohl(ch->rs_handle),
- rs_offset + chunk_off,
- xdr_off,
- this_write,
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-index ba1296d..515ea15 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-@@ -51,6 +51,7 @@
- #include <rdma/rdma_cm.h>
- #include <linux/sunrpc/svc_rdma.h>
- #include <linux/export.h>
-+#include "xprt_rdma.h"
-
- #define RPCDBG_FACILITY RPCDBG_SVCXPRT
-
-@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
- .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
- };
-
--/* WR context cache. Created in svc_rdma.c */
--extern struct kmem_cache *svc_rdma_ctxt_cachep;
--
--/* Workqueue created in svc_rdma.c */
--extern struct workqueue_struct *svc_rdma_wq;
--
- struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
- {
- struct svc_rdma_op_ctxt *ctxt;
-@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
- atomic_dec(&xprt->sc_ctxt_used);
- }
-
--/* Temporary NFS request map cache. Created in svc_rdma.c */
--extern struct kmem_cache *svc_rdma_map_cachep;
--
- /*
- * Temporary NFS req mappings are shared across all transport
- * instances. These are short lived and should be bounded by the number
-@@ -300,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
- return;
-
- ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
-- atomic_inc(&rdma_stat_rq_poll);
-+ atomic_inc_unchecked(&rdma_stat_rq_poll);
-
- while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
- ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
-@@ -322,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
- }
-
- if (ctxt)
-- atomic_inc(&rdma_stat_rq_prod);
-+ atomic_inc_unchecked(&rdma_stat_rq_prod);
-
- set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
- /*
-@@ -394,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
- return;
-
- ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-- atomic_inc(&rdma_stat_sq_poll);
-+ atomic_inc_unchecked(&rdma_stat_sq_poll);
- while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
- if (wc.status != IB_WC_SUCCESS)
- /* Close the transport */
-@@ -412,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
- }
-
- if (ctxt)
-- atomic_inc(&rdma_stat_sq_prod);
-+ atomic_inc_unchecked(&rdma_stat_sq_prod);
- }
-
- static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
-@@ -1274,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
- spin_lock_bh(&xprt->sc_lock);
- if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
- spin_unlock_bh(&xprt->sc_lock);
-- atomic_inc(&rdma_stat_sq_starve);
-+ atomic_inc_unchecked(&rdma_stat_sq_starve);
-
- /* See if we can opportunistically reap SQ WR to make room */
- sq_cq_reap(xprt);
-diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
-index 08c5d5a..9a66c95 100644
---- a/net/sunrpc/xprtrdma/xprt_rdma.h
-+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
-@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
- */
- int rpcrdma_marshal_req(struct rpc_rqst *);
-
-+/* Temporary NFS request map cache. Created in svc_rdma.c */
-+extern struct kmem_cache *svc_rdma_map_cachep;
-+/* WR context cache. Created in svc_rdma.c */
-+extern struct kmem_cache *svc_rdma_ctxt_cachep;
-+/* Workqueue created in svc_rdma.c */
-+extern struct workqueue_struct *svc_rdma_wq;
-+
- #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
-diff --git a/net/sysctl_net.c b/net/sysctl_net.c
-index e758139..d29ea47 100644
---- a/net/sysctl_net.c
-+++ b/net/sysctl_net.c
-@@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
- struct ctl_table *table)
- {
- /* Allow network administrator to have same access as root. */
-- if (capable(CAP_NET_ADMIN)) {
-+ if (capable_nolog(CAP_NET_ADMIN)) {
- int mode = (table->mode >> 6) & 7;
- return (mode << 6) | (mode << 3) | mode;
- }
-diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
-index a224a38..c31d40a 100644
---- a/net/tipc/eth_media.c
-+++ b/net/tipc/eth_media.c
-@@ -58,7 +58,6 @@ struct eth_bearer {
-
- static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
- static int eth_started;
--static struct notifier_block notifier;
-
- /**
- * send_msg - send a TIPC message out over an Ethernet interface
-@@ -277,6 +276,11 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
- * with OS for notifications about device state changes.
- */
-
-+static struct notifier_block notifier = {
-+ .notifier_call = &recv_notification,
-+ .priority = 0,
-+};
-+
- int tipc_eth_media_start(void)
- {
- struct tipc_media_addr bcast_addr;
-@@ -297,8 +301,6 @@ int tipc_eth_media_start(void)
- if (res)
- return res;
-
-- notifier.notifier_call = &recv_notification;
-- notifier.priority = 0;
- res = register_netdevice_notifier(&notifier);
- if (!res)
- eth_started = 1;
-diff --git a/net/tipc/link.c b/net/tipc/link.c
-index ae98a72..22f4de0 100644
---- a/net/tipc/link.c
-+++ b/net/tipc/link.c
-@@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
- struct tipc_msg fragm_hdr;
- struct sk_buff *buf, *buf_chain, *prev;
- u32 fragm_crs, fragm_rest, hsz, sect_rest;
-- const unchar *sect_crs;
-+ const unchar __user *sect_crs;
- int curr_sect;
- u32 fragm_no;
-
-@@ -1247,7 +1247,7 @@ again:
-
- if (!sect_rest) {
- sect_rest = msg_sect[++curr_sect].iov_len;
-- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
-+ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
- }
-
- if (sect_rest < fragm_rest)
-@@ -1266,7 +1266,7 @@ error:
- }
- } else
- skb_copy_to_linear_data_offset(buf, fragm_crs,
-- sect_crs, sz);
-+ (const void __force_kernel *)sect_crs, sz);
- sect_crs += sz;
- sect_rest -= sz;
- fragm_crs += sz;
-@@ -2367,8 +2367,11 @@ static int link_recv_changeover_msg(struct link **l_ptr,
- struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
- u32 msg_typ = msg_type(tunnel_msg);
- u32 msg_count = msg_msgcnt(tunnel_msg);
-+ u32 bearer_id = msg_bearer_id(tunnel_msg);
-
-- dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
-+ if (bearer_id >= MAX_BEARERS)
-+ goto exit;
-+ dest_link = (*l_ptr)->owner->links[bearer_id];
- if (!dest_link)
- goto exit;
- if (dest_link == *l_ptr) {
-@@ -2601,14 +2604,16 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
- struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
- u32 msg_sz = msg_size(imsg);
- u32 fragm_sz = msg_data_sz(fragm);
-- u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
-+ u32 exp_fragm_cnt;
- u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
-+
- if (msg_type(imsg) == TIPC_MCAST_MSG)
- max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
-- if (msg_size(imsg) > max) {
-+ if (fragm_sz == 0 || msg_size(imsg) > max) {
- buf_discard(fbuf);
- return 0;
- }
-+ exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
- pbuf = tipc_buf_acquire(msg_size(imsg));
- if (pbuf != NULL) {
- pbuf->next = *pending;
-diff --git a/net/tipc/msg.c b/net/tipc/msg.c
-index 83d5096..dcba497 100644
---- a/net/tipc/msg.c
-+++ b/net/tipc/msg.c
-@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
- msg_sect[cnt].iov_len);
- else
- skb_copy_to_linear_data_offset(*buf, pos,
-- msg_sect[cnt].iov_base,
-+ (const void __force_kernel *)msg_sect[cnt].iov_base,
- msg_sect[cnt].iov_len);
- pos += msg_sect[cnt].iov_len;
- }
-diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
-index 1983717..4d6102c 100644
---- a/net/tipc/subscr.c
-+++ b/net/tipc/subscr.c
-@@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
- {
- struct iovec msg_sect;
-
-- msg_sect.iov_base = (void *)&sub->evt;
-+ msg_sect.iov_base = (void __force_user *)&sub->evt;
- msg_sect.iov_len = sizeof(struct tipc_event);
-
- sub->evt.event = htohl(event, sub->swap);
-diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index 8705ee3..cf68ef1 100644
---- a/net/unix/af_unix.c
-+++ b/net/unix/af_unix.c
-@@ -768,6 +768,12 @@ static struct sock *unix_find_other(struct net *net,
- err = -ECONNREFUSED;
- if (!S_ISSOCK(inode->i_mode))
- goto put_fail;
-+
-+ if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
-+ err = -EACCES;
-+ goto put_fail;
-+ }
-+
- u = unix_find_socket_byinode(inode);
- if (!u)
- goto put_fail;
-@@ -788,6 +794,13 @@ static struct sock *unix_find_other(struct net *net,
- if (u) {
- struct dentry *dentry;
- dentry = unix_sk(u)->dentry;
-+
-+ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
-+ err = -EPERM;
-+ sock_put(u);
-+ goto fail;
-+ }
-+
- if (dentry)
- touch_atime(unix_sk(u)->mnt, dentry);
- } else
-@@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
- err = security_path_mknod(&path, dentry, mode, 0);
- if (err)
- goto out_mknod_drop_write;
-+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
-+ err = -EACCES;
-+ goto out_mknod_drop_write;
-+ }
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
- out_mknod_drop_write:
- mnt_drop_write(path.mnt);
- if (err)
- goto out_mknod_dput;
-+
-+ gr_handle_create(dentry, path.mnt);
-+
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- dput(path.dentry);
- path.dentry = dentry;
-@@ -2180,11 +2200,14 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
- writable = unix_writable(sk);
- other = unix_peer_get(sk);
- if (other) {
-- if (unix_peer(other) != sk) {
-+ unix_state_lock(other);
-+ if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) != sk) {
-+ unix_state_unlock(other);
- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
- if (unix_recvq_full(other))
- writable = 0;
-- }
-+ } else
-+ unix_state_unlock(other);
- sock_put(other);
- }
-
-@@ -2276,9 +2299,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
- seq_puts(seq, "Num RefCount Protocol Flags Type St "
- "Inode Path\n");
- else {
-- struct sock *s = v;
-+ struct sock *s = v, *peer;
- struct unix_sock *u = unix_sk(s);
- unix_state_lock(s);
-+ peer = unix_peer(s);
-+ unix_state_unlock(s);
-+
-+ unix_state_double_lock(s, peer);
-
- seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
- s,
-@@ -2303,10 +2330,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
- seq_putc(seq, '@');
- i++;
- }
-- for ( ; i < len; i++)
-- seq_putc(seq, u->addr->name->sun_path[i]);
-- }
-- unix_state_unlock(s);
-+ for ( ; i < len; i++) {
-+ char c = u->addr->name->sun_path[i];
-+ switch (c) {
-+ case '\n':
-+ seq_putc(seq, '\\');
-+ seq_putc(seq, 'n');
-+ break;
-+ case '\t':
-+ seq_putc(seq, '\\');
-+ seq_putc(seq, 't');
-+ break;
-+ case '\\':
-+ seq_putc(seq, '\\');
-+ seq_putc(seq, '\\');
-+ break;
-+ default:
-+ seq_putc(seq, c);
-+ }
-+ }
-+ } else if (peer)
-+ seq_printf(seq, " P%lu", sock_i_ino(peer));
-+
-+ unix_state_double_unlock(s, peer);
- seq_putc(seq, '\n');
- }
-
-diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
-index 397cffe..405fdb1 100644
---- a/net/unix/sysctl_net_unix.c
-+++ b/net/unix/sysctl_net_unix.c
-@@ -34,7 +34,7 @@ static struct ctl_path unix_path[] = {
-
- int __net_init unix_sysctl_register(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
- if (table == NULL)
-diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
-index c43612e..dd69d0c 100644
---- a/net/wanrouter/wanproc.c
-+++ b/net/wanrouter/wanproc.c
-@@ -289,7 +289,7 @@ static const struct file_operations wandev_fops = {
- int __init wanrouter_proc_init(void)
- {
- struct proc_dir_entry *p;
-- proc_router = proc_mkdir(ROUTER_NAME, init_net.proc_net);
-+ proc_router = proc_mkdir_restrict(ROUTER_NAME, init_net.proc_net);
- if (!proc_router)
- goto fail;
-
-diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
-index 0af7f54..c916d2f 100644
---- a/net/wireless/wext-core.c
-+++ b/net/wireless/wext-core.c
-@@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
- */
-
- /* Support for very large requests */
-- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
-- (user_length > descr->max_tokens)) {
-+ if (user_length > descr->max_tokens) {
- /* Allow userspace to GET more than max so
- * we can support any size GET requests.
- * There is still a limit : -ENOMEM.
-@@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
- }
- }
-
-- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
-- /*
-- * If this is a GET, but not NOMAX, it means that the extra
-- * data is not bounded by userspace, but by max_tokens. Thus
-- * set the length to max_tokens. This matches the extra data
-- * allocation.
-- * The driver should fill it with the number of tokens it
-- * provided, and it may check iwp->length rather than having
-- * knowledge of max_tokens. If the driver doesn't change the
-- * iwp->length, this ioctl just copies back max_token tokens
-- * filled with zeroes. Hopefully the driver isn't claiming
-- * them to be valid data.
-- */
-- iwp->length = descr->max_tokens;
-- }
--
- err = handler(dev, info, (union iwreq_data *) iwp, extra);
-
- iwp->length += essid_compat;
-diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
-index d2efd29..ffeadf5 100644
---- a/net/x25/sysctl_net_x25.c
-+++ b/net/x25/sysctl_net_x25.c
-@@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = {
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-- { 0, },
-+ { },
- };
-
- static struct ctl_path x25_path[] = {
-diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
-index 2ffde46..76f0432 100644
---- a/net/x25/x25_proc.c
-+++ b/net/x25/x25_proc.c
-@@ -217,7 +217,7 @@ int __init x25_proc_init(void)
- struct proc_dir_entry *p;
- int rc = -ENOMEM;
-
-- x25_proc_dir = proc_mkdir("x25", init_net.proc_net);
-+ x25_proc_dir = proc_mkdir_restrict("x25", init_net.proc_net);
- if (!x25_proc_dir)
- goto out;
-
-diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
-index 113d20e..2bb5a4e 100644
---- a/net/xfrm/xfrm_policy.c
-+++ b/net/xfrm/xfrm_policy.c
-@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
- {
- policy->walk.dead = 1;
-
-- atomic_inc(&policy->genid);
-+ atomic_inc_unchecked(&policy->genid);
-
- if (del_timer(&policy->timer))
- xfrm_pol_put(policy);
-@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
- hlist_add_head(&policy->bydst, chain);
- xfrm_pol_hold(policy);
- net->xfrm.policy_count[dir]++;
-- atomic_inc(&flow_cache_genid);
-+ atomic_inc_unchecked(&flow_cache_genid);
- if (delpol)
- __xfrm_policy_unlink(delpol, dir);
- policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
-@@ -1530,7 +1530,7 @@ free_dst:
- goto out;
- }
-
--static int inline
-+static inline int
- xfrm_dst_alloc_copy(void **target, const void *src, int size)
- {
- if (!*target) {
-@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
- return 0;
- }
-
--static int inline
-+static inline int
- xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
- {
- #ifdef CONFIG_XFRM_SUB_POLICY
-@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
- #endif
- }
-
--static int inline
-+static inline int
- xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
- {
- #ifdef CONFIG_XFRM_SUB_POLICY
-@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
-
- xdst->num_pols = num_pols;
- memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
-- xdst->policy_genid = atomic_read(&pols[0]->genid);
-+ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
-
- return xdst;
- }
-@@ -2297,11 +2297,12 @@ static void xfrm_garbage_collect(struct net *net)
- __xfrm_garbage_collect(net);
- }
-
--static void xfrm_garbage_collect_deferred(struct net *net)
-+void xfrm_garbage_collect_deferred(struct net *net)
- {
- flow_cache_flush_deferred();
- __xfrm_garbage_collect(net);
- }
-+EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
-
- static void xfrm_init_pmtu(struct dst_entry *dst)
- {
-@@ -2348,7 +2349,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
- if (xdst->xfrm_genid != dst->xfrm->genid)
- return 0;
- if (xdst->num_pols > 0 &&
-- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
-+ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
- return 0;
-
- mtu = dst_mtu(dst->child);
-@@ -2434,8 +2435,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
- dst_ops->link_failure = xfrm_link_failure;
- if (likely(dst_ops->neigh_lookup == NULL))
- dst_ops->neigh_lookup = xfrm_neigh_lookup;
-- if (likely(afinfo->garbage_collect == NULL))
-- afinfo->garbage_collect = xfrm_garbage_collect_deferred;
- xfrm_policy_afinfo[afinfo->family] = afinfo;
- }
- write_unlock_bh(&xfrm_policy_afinfo_lock);
-@@ -2482,7 +2481,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
- dst_ops->check = NULL;
- dst_ops->negative_advice = NULL;
- dst_ops->link_failure = NULL;
-- afinfo->garbage_collect = NULL;
- }
- }
- write_unlock_bh(&xfrm_policy_afinfo_lock);
-@@ -2692,7 +2690,7 @@ static void __net_exit xfrm_net_exit(struct net *net)
- xfrm_statistics_fini(net);
- }
-
--static struct pernet_operations __net_initdata xfrm_net_ops = {
-+static struct pernet_operations __net_initconst xfrm_net_ops = {
- .init = xfrm_net_init,
- .exit = xfrm_net_exit,
- };
-@@ -2885,7 +2883,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
- sizeof(pol->xfrm_vec[i].saddr));
- pol->xfrm_vec[i].encap_family = mp->new_family;
- /* flush bundles */
-- atomic_inc(&pol->genid);
-+ atomic_inc_unchecked(&pol->genid);
- }
- }
-
-diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
-index 3efb07d..2576ee4 100644
---- a/net/xfrm/xfrm_replay.c
-+++ b/net/xfrm/xfrm_replay.c
-@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
- return 0;
-
- diff = x->replay.seq - seq;
-- if (diff >= min_t(unsigned int, x->props.replay_window,
-- sizeof(x->replay.bitmap) * 8)) {
-+ if (diff >= x->props.replay_window) {
- x->stats.replay_window++;
- goto err;
- }
-diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
-index 9414b9c..2477932 100644
---- a/net/xfrm/xfrm_state.c
-+++ b/net/xfrm/xfrm_state.c
-@@ -194,11 +194,13 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
-
- if (unlikely(afinfo == NULL))
- return -EAFNOSUPPORT;
-- typemap = afinfo->type_map;
-+ typemap = (const struct xfrm_type **)afinfo->type_map;
-
-- if (likely(typemap[type->proto] == NULL))
-+ if (likely(typemap[type->proto] == NULL)) {
-+ pax_open_kernel();
- typemap[type->proto] = type;
-- else
-+ pax_close_kernel();
-+ } else
- err = -EEXIST;
- xfrm_state_unlock_afinfo(afinfo);
- return err;
-@@ -213,12 +215,15 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
-
- if (unlikely(afinfo == NULL))
- return -EAFNOSUPPORT;
-- typemap = afinfo->type_map;
-+ typemap = (const struct xfrm_type **)afinfo->type_map;
-
- if (unlikely(typemap[type->proto] != type))
- err = -ENOENT;
-- else
-+ else {
-+ pax_open_kernel();
- typemap[type->proto] = NULL;
-+ pax_close_kernel();
-+ }
- xfrm_state_unlock_afinfo(afinfo);
- return err;
- }
-@@ -227,7 +232,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
- static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
- {
- struct xfrm_state_afinfo *afinfo;
-- const struct xfrm_type **typemap;
- const struct xfrm_type *type;
- int modload_attempted = 0;
-
-@@ -235,9 +239,8 @@ retry:
- afinfo = xfrm_state_get_afinfo(family);
- if (unlikely(afinfo == NULL))
- return NULL;
-- typemap = afinfo->type_map;
-
-- type = typemap[proto];
-+ type = afinfo->type_map[proto];
- if (unlikely(type && !try_module_get(type->owner)))
- type = NULL;
- if (!type && !modload_attempted) {
-@@ -270,7 +273,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
- return -EAFNOSUPPORT;
-
- err = -EEXIST;
-- modemap = afinfo->mode_map;
-+ modemap = (struct xfrm_mode **)afinfo->mode_map;
- if (modemap[mode->encap])
- goto out;
-
-@@ -278,8 +281,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
- if (!try_module_get(afinfo->owner))
- goto out;
-
-- mode->afinfo = afinfo;
-+ pax_open_kernel();
-+ *(const void **)&mode->afinfo = afinfo;
- modemap[mode->encap] = mode;
-+ pax_close_kernel();
- err = 0;
-
- out:
-@@ -302,9 +307,11 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
- return -EAFNOSUPPORT;
-
- err = -ENOENT;
-- modemap = afinfo->mode_map;
-+ modemap = (struct xfrm_mode **)afinfo->mode_map;
- if (likely(modemap[mode->encap] == mode)) {
-+ pax_open_kernel();
- modemap[mode->encap] = NULL;
-+ pax_close_kernel();
- module_put(mode->afinfo->owner);
- err = 0;
- }
-@@ -1497,10 +1504,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
- u32 xfrm_get_acqseq(void)
- {
- u32 res;
-- static atomic_t acqseq;
-+ static atomic_unchecked_t acqseq;
-
- do {
-- res = atomic_inc_return(&acqseq);
-+ res = atomic_inc_return_unchecked(&acqseq);
- } while (!res);
-
- return res;
-@@ -1985,8 +1992,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
- goto error;
-
- x->outer_mode = xfrm_get_mode(x->props.mode, family);
-- if (x->outer_mode == NULL)
-+ if (x->outer_mode == NULL) {
-+ err = -EPROTONOSUPPORT;
- goto error;
-+ }
-
- if (init_replay) {
- err = xfrm_init_replay(x);
-diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
-index 05640bc..b67eaaa 100644
---- a/net/xfrm/xfrm_sysctl.c
-+++ b/net/xfrm/xfrm_sysctl.c
-@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
-
- int __net_init xfrm_sysctl_init(struct net *net)
- {
-- struct ctl_table *table;
-+ ctl_table_no_const *table;
-
- __xfrm_sysctl_init(net);
-
-diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
-index ede01a8..756e6bd 100644
---- a/net/xfrm/xfrm_user.c
-+++ b/net/xfrm/xfrm_user.c
-@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
- memcpy(&x->sel, &p->sel, sizeof(x->sel));
- memcpy(&x->lft, &p->lft, sizeof(x->lft));
- x->props.mode = p->mode;
-- x->props.replay_window = p->replay_window;
-+ x->props.replay_window = min_t(unsigned int, p->replay_window,
-+ sizeof(x->replay.bitmap) * 8);
- x->props.reqid = p->reqid;
- x->props.family = p->family;
- memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
-@@ -1816,7 +1817,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
- if (x->km.state != XFRM_STATE_VALID)
- goto out;
-
-- err = xfrm_replay_verify_len(x->replay_esn, rp);
-+ err = xfrm_replay_verify_len(x->replay_esn, re);
- if (err)
- goto out;
-
-diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
-index 978416d..a1c341e 100644
---- a/scripts/Kbuild.include
-+++ b/scripts/Kbuild.include
-@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
- # cc-ldoption
- # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
- cc-ldoption = $(call try-run,\
-- $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
-+ $(CC) $(1) -Wl,-r -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
-
- # ld-option
- # Usage: LDFLAGS += $(call ld-option, -X)
-diff --git a/scripts/Makefile.build b/scripts/Makefile.build
-index d2b366c1..2d5a6f8 100644
---- a/scripts/Makefile.build
-+++ b/scripts/Makefile.build
-@@ -109,7 +109,7 @@ endif
- endif
-
- # Do not include host rules unless needed
--ifneq ($(hostprogs-y)$(hostprogs-m),)
-+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
- include scripts/Makefile.host
- endif
-
-diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
-index 686cb0d..9d653bf 100644
---- a/scripts/Makefile.clean
-+++ b/scripts/Makefile.clean
-@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
- __clean-files := $(extra-y) $(always) \
- $(targets) $(clean-files) \
- $(host-progs) \
-- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
-+ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
-+ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
-
- __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
-
-diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
-index a57f5bd..d3bae5e 100644
---- a/scripts/Makefile.headersinst
-+++ b/scripts/Makefile.headersinst
-@@ -4,12 +4,16 @@
- # header-y - list files to be installed. They are preprocessed
- # to remove __KERNEL__ section of the file
- # objhdr-y - Same as header-y but for generated files
-+# genhdr-y - Same as objhdr-y but in a generated/ directory
- #
- # ==========================================================================
-
- # called may set destination dir (when installing to asm/)
- _dst := $(if $(dst),$(dst),$(obj))
-
-+# generated header directory
-+gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
-+
- kbuild-file := $(srctree)/$(obj)/Kbuild
- include $(kbuild-file)
-
-@@ -33,9 +37,10 @@ wrapper-files := $(filter $(header-y), $(generic-y))
-
- # all headers files for this dir
- header-y := $(filter-out $(generic-y), $(header-y))
--all-files := $(header-y) $(objhdr-y) $(wrapper-files)
-+all-files := $(header-y) $(objhdr-y) $(genhdr-y) $(wrapper-files)
- input-files := $(addprefix $(srctree)/$(obj)/,$(header-y)) \
-- $(addprefix $(objtree)/$(obj)/,$(objhdr-y))
-+ $(addprefix $(objtree)/$(obj)/,$(objhdr-y)) \
-+ $(addprefix $(objtree)/$(gen)/,$(genhdr-y))
- output-files := $(addprefix $(install)/, $(all-files))
-
- # Work out what needs to be removed
-@@ -52,6 +57,7 @@ quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
- cmd_install = \
- $(PERL) $< $(srctree)/$(obj) $(install) $(SRCARCH) $(header-y); \
- $(PERL) $< $(objtree)/$(obj) $(install) $(SRCARCH) $(objhdr-y); \
-+ $(PERL) $< $(objtree)/$(gen) $(install) $(SRCARCH) $(genhdr-y); \
- for F in $(wrapper-files); do \
- echo "\#include <asm-generic/$$F>" > $(install)/$$F; \
- done; \
-diff --git a/scripts/Makefile.host b/scripts/Makefile.host
-index 1ac414f..38575f7 100644
---- a/scripts/Makefile.host
-+++ b/scripts/Makefile.host
-@@ -31,6 +31,8 @@
- # Note: Shared libraries consisting of C++ files are not supported
-
- __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
-+__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
-+__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
-
- # C code
- # Executables compiled from a single .c file
-@@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
- # Shared libaries (only .c supported)
- # Shared libraries (.so) - all .so files referenced in "xxx-objs"
- host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
-+host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
-+host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
- # Remove .so files from "xxx-objs"
- host-cobjs := $(filter-out %.so,$(host-cobjs))
-+host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
-
--#Object (.o) files used by the shared libaries
-+# Object (.o) files used by the shared libaries
- host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
-+host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
-
- # output directory for programs/.o files
- # hostprogs-y := tools/build may have been specified. Retrieve directory
-@@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
- host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
- host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
- host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
-+host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
- host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
-+host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
- host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
-
- obj-dirs += $(host-objdirs)
-@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
- $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
- $(call if_changed_dep,host-cshobjs)
-
-+# Compile .c file, create position independent .o file
-+# host-cxxshobjs -> .o
-+quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
-+ cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
-+$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
-+ $(call if_changed_dep,host-cxxshobjs)
-+
- # Link a shared library, based on position independent .o files
- # *.o -> .so shared library (host-cshlib)
- quiet_cmd_host-cshlib = HOSTLLD -shared $@
-@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
- $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
- $(call if_changed,host-cshlib)
-
-+# Link a shared library, based on position independent .o files
-+# *.o -> .so shared library (host-cxxshlib)
-+quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
-+ cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
-+ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
-+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
-+$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
-+ $(call if_changed,host-cxxshlib)
-+
- targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
-- $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
-+ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
-
-diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
-index 5d986d9..7c8f0e8 100644
---- a/scripts/Makefile.lib
-+++ b/scripts/Makefile.lib
-@@ -63,7 +63,7 @@ multi-objs := $(multi-objs-y) $(multi-objs-m)
- subdir-obj-y := $(filter %/built-in.o, $(obj-y))
-
- # $(obj-dirs) is a list of directories that contain object files
--obj-dirs := $(dir $(multi-objs) $(subdir-obj-y))
-+obj-dirs := $(dir $(multi-objs) $(obj-y))
-
- # Replace multi-part objects by their individual parts, look at local dir only
- real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
-diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
-index cb1f50c..cef2a7c 100644
---- a/scripts/basic/fixdep.c
-+++ b/scripts/basic/fixdep.c
-@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
- /*
- * Lookup a value in the configuration string.
- */
--static int is_defined_config(const char *name, int len, unsigned int hash)
-+static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
- {
- struct item *aux;
-
-@@ -211,10 +211,10 @@ static void clear_config(void)
- /*
- * Record the use of a CONFIG_* word.
- */
--static void use_config(const char *m, int slen)
-+static void use_config(const char *m, unsigned int slen)
- {
- unsigned int hash = strhash(m, slen);
-- int c, i;
-+ unsigned int c, i;
-
- if (is_defined_config(m, slen, hash))
- return;
-@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
-
- static void parse_config_file(const char *map, size_t len)
- {
-- const int *end = (const int *) (map + len);
-+ const unsigned int *end = (const unsigned int *) (map + len);
- /* start at +1, so that p can never be < map */
-- const int *m = (const int *) map + 1;
-+ const unsigned int *m = (const unsigned int *) map + 1;
- const char *p, *q;
-
- for (; m < end; m++) {
-@@ -406,7 +406,7 @@ static void print_deps(void)
- static void traps(void)
- {
- static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
-- int *p = (int *)test;
-+ unsigned int *p = (unsigned int *)test;
-
- if (*p != INT_CONF) {
- fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
-diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
-new file mode 100644
-index 0000000..eaa4fce
---- /dev/null
-+++ b/scripts/gcc-plugin.sh
-@@ -0,0 +1,51 @@
-+#!/bin/sh
-+srctree=$(dirname "$0")
-+gccplugins_dir=$($3 -print-file-name=plugin)
-+plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/../tools/gcc -I"${gccplugins_dir}"/include 2>&1 <<EOF
-+#include "gcc-common.h"
-+#if BUILDING_GCC_VERSION >= 4008 || defined(ENABLE_BUILD_WITH_CXX)
-+#warning $2 CXX
-+#else
-+#warning $1 CC
-+#endif
-+EOF
-+)
-+
-+if [ $? -ne 0 ]
-+then
-+ exit 1
-+fi
-+
-+case "$plugincc" in
-+ *"$1 CC"*)
-+ echo "$1"
-+ exit 0
-+ ;;
-+
-+ *"$2 CXX"*)
-+ # the c++ compiler needs another test, see below
-+ ;;
-+
-+ *)
-+ exit 1
-+ ;;
-+esac
-+
-+# we need a c++ compiler that supports the designated initializer GNU extension
-+plugincc=$($2 -c -x c++ -std=gnu++98 - -fsyntax-only -I"${srctree}"/../tools/gcc -I"${gccplugins_dir}"/include 2>&1 <<EOF
-+#include "gcc-common.h"
-+class test {
-+public:
-+ int test;
-+} test = {
-+ .test = 1
-+};
-+EOF
-+)
-+
-+if [ $? -eq 0 ]
-+then
-+ echo "$2"
-+ exit 0
-+fi
-+exit 1
-diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
-old mode 100644
-new mode 100755
-diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
-index 48462be..3e08f94 100644
---- a/scripts/headers_install.pl
-+++ b/scripts/headers_install.pl
-@@ -33,6 +33,7 @@ foreach my $file (@files) {
- $line =~ s/([\s(])__user\s/$1/g;
- $line =~ s/([\s(])__force\s/$1/g;
- $line =~ s/([\s(])__iomem\s/$1/g;
-+ $line =~ s/(\s?)__intentional_overflow\([-\d\s,]*\)\s?/$1/g;
- $line =~ s/\s__attribute_const__\s/ /g;
- $line =~ s/\s__attribute_const__$//g;
- $line =~ s/\b__packed\b/__attribute__((packed))/g;
-diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
-index 98ff331..9a48619 100644
---- a/scripts/mod/file2alias.c
-+++ b/scripts/mod/file2alias.c
-@@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
- unsigned long size, unsigned long id_size,
- void *symval)
- {
-- int i;
-+ unsigned int i;
-
- if (size % id_size || size < id_size) {
- if (cross_build != 0)
-@@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
- /* USB is special because the bcdDevice can be matched against a numeric range */
- /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
- static void do_usb_entry(struct usb_device_id *id,
-- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
-+ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
- unsigned char range_lo, unsigned char range_hi,
- unsigned char max, struct module *mod)
- {
-@@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
- {
- unsigned int devlo, devhi;
- unsigned char chi, clo, max;
-- int ndigits;
-+ unsigned int ndigits;
-
- id->match_flags = TO_NATIVE(id->match_flags);
- id->idVendor = TO_NATIVE(id->idVendor);
-@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
- for (i = 0; i < count; i++) {
- const char *id = (char *)devs[i].id;
- char acpi_id[sizeof(devs[0].id)];
-- int j;
-+ unsigned int j;
-
- buf_printf(&mod->dev_table_buf,
- "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
-@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
-
- for (j = 0; j < PNP_MAX_DEVICES; j++) {
- const char *id = (char *)card->devs[j].id;
-- int i2, j2;
-+ unsigned int i2, j2;
- int dup = 0;
-
- if (!id[0])
-@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
- /* add an individual alias for every device entry */
- if (!dup) {
- char acpi_id[sizeof(card->devs[0].id)];
-- int k;
-+ unsigned int k;
-
- buf_printf(&mod->dev_table_buf,
- "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
-@@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
- static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
- char *alias)
- {
-- int i, j;
-+ unsigned int i, j;
-
- sprintf(alias, "dmi*");
-
-diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index dc5748f..193bd1d 100644
---- a/scripts/mod/modpost.c
-+++ b/scripts/mod/modpost.c
-@@ -926,6 +926,7 @@ enum mismatch {
- ANY_INIT_TO_ANY_EXIT,
- ANY_EXIT_TO_ANY_INIT,
- EXPORT_TO_INIT_EXIT,
-+ DATA_TO_TEXT
- };
-
- struct sectioncheck {
-@@ -1034,6 +1035,12 @@ const struct sectioncheck sectioncheck[] = {
- .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
- .mismatch = EXPORT_TO_INIT_EXIT,
- .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
-+},
-+/* Do not reference code from writable data */
-+{
-+ .fromsec = { DATA_SECTIONS, NULL },
-+ .tosec = { TEXT_SECTIONS, NULL },
-+ .mismatch = DATA_TO_TEXT
- }
- };
-
-@@ -1156,10 +1163,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
- continue;
- if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
- continue;
-- if (sym->st_value == addr)
-- return sym;
- /* Find a symbol nearby - addr are maybe negative */
- d = sym->st_value - addr;
-+ if (d == 0)
-+ return sym;
- if (d < 0)
- d = addr - sym->st_value;
- if (d < distance) {
-@@ -1438,6 +1445,14 @@ static void report_sec_mismatch(const char *modname,
- tosym, prl_to, prl_to, tosym);
- free(prl_to);
- break;
-+ case DATA_TO_TEXT:
-+#if 0
-+ fprintf(stderr,
-+ "The %s %s:%s references\n"
-+ "the %s %s:%s%s\n",
-+ from, fromsec, fromsym, to, tosec, tosym, to_p);
-+#endif
-+ break;
- }
- fprintf(stderr, "\n");
- }
-@@ -1663,7 +1678,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
- static void check_sec_ref(struct module *mod, const char *modname,
- struct elf_info *elf)
- {
-- int i;
-+ unsigned int i;
- Elf_Shdr *sechdrs = elf->sechdrs;
-
- /* Walk through all sections */
-@@ -1761,7 +1776,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
- va_end(ap);
- }
-
--void buf_write(struct buffer *buf, const char *s, int len)
-+void buf_write(struct buffer *buf, const char *s, unsigned int len)
- {
- if (buf->size - buf->pos < len) {
- buf->size += len + SZ;
-@@ -1979,7 +1994,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
- if (fstat(fileno(file), &st) < 0)
- goto close_write;
-
-- if (st.st_size != b->pos)
-+ if (st.st_size != (off_t)b->pos)
- goto close_write;
-
- tmp = NOFAIL(malloc(b->pos));
-diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
-index 51207e4..f7d603d 100644
---- a/scripts/mod/modpost.h
-+++ b/scripts/mod/modpost.h
-@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
-
- struct buffer {
- char *p;
-- int pos;
-- int size;
-+ unsigned int pos;
-+ unsigned int size;
- };
-
- void __attribute__((format(printf, 2, 3)))
- buf_printf(struct buffer *buf, const char *fmt, ...);
-
- void
--buf_write(struct buffer *buf, const char *s, int len);
-+buf_write(struct buffer *buf, const char *s, unsigned int len);
-
- struct module {
- struct module *next;
-diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
-index 9dfcd6d..099068e 100644
---- a/scripts/mod/sumversion.c
-+++ b/scripts/mod/sumversion.c
-@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
- goto out;
- }
-
-- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
-+ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
- warn("writing sum in %s failed: %s\n",
- filename, strerror(errno));
- goto out;
-diff --git a/scripts/module-common.lds b/scripts/module-common.lds
-index 0865b3e..7235dd4 100644
---- a/scripts/module-common.lds
-+++ b/scripts/module-common.lds
-@@ -6,6 +6,10 @@
- SECTIONS {
- /DISCARD/ : { *(.discard) }
-
-+ .rodata : {
-+ *(.rodata) *(.rodata.*)
-+ *(.data..read_only)
-+ }
- __ksymtab : { *(SORT(___ksymtab+*)) }
- __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
- __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
-diff --git a/scripts/package/Makefile b/scripts/package/Makefile
-index bc6aa00..51086c8 100644
---- a/scripts/package/Makefile
-+++ b/scripts/package/Makefile
-@@ -45,7 +45,7 @@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE
- $(MAKE) clean
- $(PREV) ln -sf $(srctree) $(KERNELPATH)
- $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
-- $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
-+ $(PREV) tar --owner=root --group=root -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
- $(PREV) rm $(KERNELPATH)
- rm -f $(objtree)/.scmversion
- set -e; \
-diff --git a/scripts/package/builddeb b/scripts/package/builddeb
-index bee55f6..4108c4b 100644
---- a/scripts/package/builddeb
-+++ b/scripts/package/builddeb
-@@ -241,6 +241,7 @@ fi
- (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
- (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
- (cd $objtree; find Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
-+(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
- destdir=$kernel_headers_dir/usr/src/linux-headers-$version
- mkdir -p "$destdir"
- (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
-diff --git a/scripts/package/mkspec b/scripts/package/mkspec
-index 4bf17dd..bca2734 100755
---- a/scripts/package/mkspec
-+++ b/scripts/package/mkspec
-@@ -1,7 +1,7 @@
- #!/bin/sh
- #
--# Output a simple RPM spec file that uses no fancy features requiring
--# RPM v4. This is intended to work with any RPM distro.
-+# Output a simple RPM spec file.
-+# This version assumes a minimum of RPM 4.0.3.
- #
- # The only gothic bit here is redefining install_post to avoid
- # stripping the symbols from files in the kernel which we want
-@@ -59,6 +59,14 @@ echo "header files define structures and constants that are needed for"
- echo "building most standard programs and are also needed for rebuilding the"
- echo "glibc package."
- echo ""
-+echo "%package devel"
-+echo "Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel"
-+echo "Group: System Environment/Kernel"
-+echo "AutoReqProv: no"
-+echo "%description -n kernel-devel"
-+echo "This package provides kernel headers and makefiles sufficient to build modules"
-+echo "against the $__KERNELRELEASE kernel package."
-+echo ""
-
- if ! $PREBUILT; then
- echo "%prep"
-@@ -74,15 +82,17 @@ echo ""
- fi
-
- echo "%install"
-+echo 'KBUILD_IMAGE=$(make image_name)'
- echo "%ifarch ia64"
- echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
--echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
- echo "%else"
- echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
--echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
- echo "%endif"
-+echo 'mkdir -p $RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
-
--echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
-+echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= mod-fw= modules_install'
-+echo 'INSTALL_FW_PATH=$RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
-+echo 'make INSTALL_FW_PATH=$INSTALL_FW_PATH' firmware_install
- echo "%ifarch ia64"
- echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
- echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
-@@ -95,7 +105,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
- echo "%endif"
- echo "%endif"
-
--echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
-+echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install'
- echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
-
- echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
-@@ -107,18 +117,53 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
- echo 'mv vmlinux.orig vmlinux'
- echo "%endif"
-
-+echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
-+echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
-+echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
-+echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
-+
- echo ""
- echo "%clean"
- echo 'rm -rf $RPM_BUILD_ROOT'
- echo ""
-+echo "%pre"
-+echo 'chmod -f 0500 /boot'
-+echo 'if [ -d /lib/modules ]; then'
-+echo 'chmod -f 0500 /lib/modules'
-+echo 'fi'
-+echo 'if [ -d /lib32/modules ]; then'
-+echo 'chmod -f 0500 /lib32/modules'
-+echo 'fi'
-+echo 'if [ -d /lib64/modules ]; then'
-+echo 'chmod -f 0500 /lib64/modules'
-+echo 'fi'
-+echo ""
-+echo "%post devel"
-+echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/build"
-+echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/source"
-+echo ""
-+echo "%post"
-+echo "if [ -x /sbin/dracut ]; then"
-+echo '/sbin/new-kernel-pkg --dracut --mkinitrd --depmod --install --make-default '"$KERNELRELEASE"' || exit $?'
-+echo "else"
-+echo '/sbin/new-kernel-pkg --mkinitrd --depmod --install --make-default '"$KERNELRELEASE"' || exit $?'
-+echo "fi"
-+echo ""
- echo "%files"
--echo '%defattr (-, root, root)'
-+echo '%defattr (400, root, root, 500)'
- echo "%dir /lib/modules"
-+echo "%exclude /lib/modules/$KERNELRELEASE/build"
-+echo "%exclude /lib/modules/$KERNELRELEASE/source"
- echo "/lib/modules/$KERNELRELEASE"
--echo "/lib/firmware"
-+echo "/lib/firmware/$KERNELRELEASE"
- echo "/boot/*"
- echo ""
- echo "%files headers"
- echo '%defattr (-, root, root)'
- echo "/usr/include"
- echo ""
-+echo "%files devel"
-+echo '%defattr (400, root, root, 500)'
-+echo "%dir /lib/modules/$KERNELRELEASE"
-+echo "/usr/src/kernels/$KERNELRELEASE"
-+echo ""
-diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
-index 5c11312..72742b5 100644
---- a/scripts/pnmtologo.c
-+++ b/scripts/pnmtologo.c
-@@ -237,14 +237,14 @@ static void write_header(void)
- fprintf(out, " * Linux logo %s\n", logoname);
- fputs(" */\n\n", out);
- fputs("#include <linux/linux_logo.h>\n\n", out);
-- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
-+ fprintf(out, "static unsigned char %s_data[] = {\n",
- logoname);
- }
-
- static void write_footer(void)
- {
- fputs("\n};\n\n", out);
-- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
-+ fprintf(out, "const struct linux_logo %s = {\n", logoname);
- fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
- fprintf(out, "\t.width\t\t= %d,\n", logo_width);
- fprintf(out, "\t.height\t\t= %d,\n", logo_height);
-@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
- fputs("\n};\n\n", out);
-
- /* write logo clut */
-- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
-+ fprintf(out, "static unsigned char %s_clut[] = {\n",
- logoname);
- write_hex_cnt = 0;
- for (i = 0; i < logo_clutsize; i++) {
-diff --git a/scripts/tags.sh b/scripts/tags.sh
-index 38f6617..e70b72b 100755
---- a/scripts/tags.sh
-+++ b/scripts/tags.sh
-@@ -116,7 +116,7 @@ docscope()
-
- dogtags()
- {
-- all_sources | gtags -f -
-+ all_sources | gtags -i -f -
- }
-
- exuberant()
-diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..9cb2b83 100644
---- a/security/Kconfig
-+++ b/security/Kconfig
-@@ -4,6 +4,977 @@
-
- menu "Security options"
-
-+menu "Grsecurity"
-+
-+ config ARCH_TRACK_EXEC_LIMIT
-+ bool
-+
-+ config PAX_KERNEXEC_PLUGIN
-+ bool
-+
-+ config PAX_PER_CPU_PGD
-+ bool
-+
-+ config TASK_SIZE_MAX_SHIFT
-+ int
-+ depends on X86_64
-+ default 47 if !PAX_PER_CPU_PGD
-+ default 42 if PAX_PER_CPU_PGD
-+
-+ config PAX_ENABLE_PAE
-+ bool
-+ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
-+
-+ config PAX_USERCOPY_SLABS
-+ bool
-+
-+config GRKERNSEC
-+ bool "Grsecurity"
-+ select CRYPTO
-+ select CRYPTO_SHA256
-+ select PROC_FS
-+ select STOP_MACHINE
-+ select DEBUG_KERNEL
-+ select DEBUG_LIST
-+ help
-+ If you say Y here, you will be able to configure many features
-+ that will enhance the security of your system. It is highly
-+ recommended that you say Y here and read through the help
-+ for each option so that you fully understand the features and
-+ can evaluate their usefulness for your machine.
-+
-+choice
-+ prompt "Configuration Method"
-+ depends on GRKERNSEC
-+ default GRKERNSEC_CONFIG_CUSTOM
-+ help
-+
-+config GRKERNSEC_CONFIG_AUTO
-+ bool "Automatic"
-+ help
-+ If you choose this configuration method, you'll be able to answer a small
-+ number of simple questions about how you plan to use this kernel.
-+ The settings of grsecurity and PaX will be automatically configured for
-+ the highest commonly-used settings within the provided constraints.
-+
-+ If you require additional configuration, custom changes can still be made
-+ from the "custom configuration" menu.
-+
-+config GRKERNSEC_CONFIG_CUSTOM
-+ bool "Custom"
-+ help
-+ If you choose this configuration method, you'll be able to configure all
-+ grsecurity and PaX settings manually. Via this method, no options are
-+ automatically enabled.
-+
-+ Take note that if menuconfig is exited with this configuration method
-+ chosen, you will not be able to use the automatic configuration methods
-+ without starting again with a kernel configuration with no grsecurity
-+ or PaX options specified inside.
-+
-+endchoice
-+
-+choice
-+ prompt "Usage Type"
-+ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
-+ default GRKERNSEC_CONFIG_SERVER
-+ help
-+
-+config GRKERNSEC_CONFIG_SERVER
-+ bool "Server"
-+ help
-+ Choose this option if you plan to use this kernel on a server.
-+
-+config GRKERNSEC_CONFIG_DESKTOP
-+ bool "Desktop"
-+ help
-+ Choose this option if you plan to use this kernel on a desktop.
-+
-+endchoice
-+
-+choice
-+ prompt "Virtualization Type"
-+ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
-+ default GRKERNSEC_CONFIG_VIRT_NONE
-+ help
-+
-+config GRKERNSEC_CONFIG_VIRT_NONE
-+ bool "None"
-+ help
-+ Choose this option if this kernel will be run on bare metal.
-+
-+config GRKERNSEC_CONFIG_VIRT_GUEST
-+ bool "Guest"
-+ help
-+ Choose this option if this kernel will be run as a VM guest.
-+
-+config GRKERNSEC_CONFIG_VIRT_HOST
-+ bool "Host"
-+ help
-+ Choose this option if this kernel will be run as a VM host.
-+
-+endchoice
-+
-+choice
-+ prompt "Virtualization Hardware"
-+ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
-+ help
-+
-+config GRKERNSEC_CONFIG_VIRT_EPT
-+ bool "EPT/RVI Processor Support"
-+ depends on X86
-+ help
-+ Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
-+ hardware virtualization. This allows for additional kernel hardening protections
-+ to operate without additional performance impact.
-+
-+ To see if your Intel processor supports EPT, see:
-+ http://ark.intel.com/Products/VirtualizationTechnology
-+ (Most Core i3/5/7 support EPT)
-+
-+ To see if your AMD processor supports RVI, see:
-+ http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
-+
-+config GRKERNSEC_CONFIG_VIRT_SOFT
-+ bool "First-gen/No Hardware Virtualization"
-+ help
-+ Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
-+ support hardware virtualization or doesn't support the EPT/RVI extensions.
-+
-+endchoice
-+
-+choice
-+ prompt "Virtualization Software"
-+ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
-+ help
-+
-+config GRKERNSEC_CONFIG_VIRT_XEN
-+ bool "Xen"
-+ help
-+ Choose this option if this kernel is running as a Xen guest or host.
-+
-+config GRKERNSEC_CONFIG_VIRT_VMWARE
-+ bool "VMWare"
-+ help
-+ Choose this option if this kernel is running as a VMWare guest or host.
-+
-+config GRKERNSEC_CONFIG_VIRT_KVM
-+ bool "KVM"
-+ help
-+ Choose this option if this kernel is running as a KVM guest or host.
-+
-+config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
-+ bool "VirtualBox"
-+ help
-+ Choose this option if this kernel is running as a VirtualBox guest or host.
-+
-+config GRKERNSEC_CONFIG_VIRT_HYPERV
-+ bool "Hyper-V"
-+ help
-+ Choose this option if this kernel is running as a Hyper-V guest.
-+
-+endchoice
-+
-+choice
-+ prompt "Required Priorities"
-+ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
-+ default GRKERNSEC_CONFIG_PRIORITY_PERF
-+ help
-+
-+config GRKERNSEC_CONFIG_PRIORITY_PERF
-+ bool "Performance"
-+ help
-+ Choose this option if performance is of highest priority for this deployment
-+ of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
-+ clearing of structures intended for userland, and freed memory sanitizing will
-+ be disabled.
-+
-+config GRKERNSEC_CONFIG_PRIORITY_SECURITY
-+ bool "Security"
-+ help
-+ Choose this option if security is of highest priority for this deployment of
-+ grsecurity. UDEREF, kernel stack clearing, clearing of structures intended
-+ for userland, and freed memory sanitizing will be enabled for this kernel.
-+ In a worst-case scenario, these features can introduce a 20% performance hit
-+ (UDEREF on x64 contributing half of this hit).
-+
-+endchoice
-+
-+menu "Default Special Groups"
-+depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
-+
-+config GRKERNSEC_PROC_GID
-+ int "GID exempted from /proc restrictions"
-+ default 1001
-+ help
-+ Setting this GID determines which group will be exempted from
-+ grsecurity's /proc restrictions, allowing users of the specified
-+ group to view network statistics and the existence of other users'
-+ processes on the system. This GID may also be chosen at boot time
-+ via "grsec_proc_gid=" on the kernel commandline.
-+
-+config GRKERNSEC_TPE_UNTRUSTED_GID
-+ int "GID for TPE-untrusted users"
-+ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
-+ default 1005
-+ help
-+ Setting this GID determines which group untrusted users should
-+ be added to. These users will be placed under grsecurity's Trusted Path
-+ Execution mechanism, preventing them from executing their own binaries.
-+ The users will only be able to execute binaries in directories owned and
-+ writable only by the root user. If the sysctl option is enabled, a sysctl
-+ option with name "tpe_gid" is created.
-+
-+config GRKERNSEC_TPE_TRUSTED_GID
-+ int "GID for TPE-trusted users"
-+ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
-+ default 1005
-+ help
-+ Setting this GID determines what group TPE restrictions will be
-+ *disabled* for. If the sysctl option is enabled, a sysctl option
-+ with name "tpe_gid" is created.
-+
-+config GRKERNSEC_SYMLINKOWN_GID
-+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
-+ depends on GRKERNSEC_CONFIG_SERVER
-+ default 1006
-+ help
-+ Setting this GID determines what group kernel-enforced
-+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
-+ is enabled, a sysctl option with name "symlinkown_gid" is created.
-+
-+
-+endmenu
-+
-+menu "Customize Configuration"
-+depends on GRKERNSEC
-+
-+menu "PaX"
-+
-+config PAX
-+ bool "Enable various PaX features"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
-+ help
-+ This allows you to enable various PaX features. PaX adds
-+ intrusion prevention mechanisms to the kernel that reduce
-+ the risks posed by exploitable memory corruption bugs.
-+
-+menu "PaX Control"
-+ depends on PAX
-+
-+config PAX_SOFTMODE
-+ bool 'Support soft mode'
-+ help
-+ Enabling this option will allow you to run PaX in soft mode, that
-+ is, PaX features will not be enforced by default, only on executables
-+ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
-+ support as they are the only way to mark executables for soft mode use.
-+
-+ Soft mode can be activated by using the "pax_softmode=1" kernel command
-+ line option on boot. Furthermore you can control various PaX features
-+ at runtime via the entries in /proc/sys/kernel/pax.
-+
-+config PAX_EI_PAX
-+ bool 'Use legacy ELF header marking'
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ Enabling this option will allow you to control PaX features on
-+ a per executable basis via the 'chpax' utility available at
-+ http://pax.grsecurity.net/. The control flags will be read from
-+ an otherwise reserved part of the ELF header. This marking has
-+ numerous drawbacks (no support for soft-mode, toolchain does not
-+ know about the non-standard use of the ELF header) therefore it
-+ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
-+ support.
-+
-+ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
-+ support as well, they will override the legacy EI_PAX marks.
-+
-+ If you enable none of the marking options then all applications
-+ will run with PaX enabled on them by default.
-+
-+config PAX_PT_PAX_FLAGS
-+ bool 'Use ELF program header marking'
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ Enabling this option will allow you to control PaX features on
-+ a per executable basis via the 'paxctl' utility available at
-+ http://pax.grsecurity.net/. The control flags will be read from
-+ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
-+ has the benefits of supporting both soft mode and being fully
-+ integrated into the toolchain (the binutils patch is available
-+ from http://pax.grsecurity.net).
-+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
-+
-+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
-+ must make sure that the marks are the same if a binary has both marks.
-+
-+ If you enable none of the marking options then all applications
-+ will run with PaX enabled on them by default.
-+
-+config PAX_XATTR_PAX_FLAGS
-+ bool 'Use filesystem extended attributes marking'
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ select CIFS_XATTR if CIFS
-+ select EXT2_FS_XATTR if EXT2_FS
-+ select EXT3_FS_XATTR if EXT3_FS
-+ select JFFS2_FS_XATTR if JFFS2_FS
-+ select REISERFS_FS_XATTR if REISERFS_FS
-+ select SQUASHFS_XATTR if SQUASHFS
-+ select TMPFS_XATTR if TMPFS
-+ select UBIFS_FS_XATTR if UBIFS_FS
-+ help
-+ Enabling this option will allow you to control PaX features on
-+ a per executable basis via the 'setfattr' utility. The control
-+ flags will be read from the user.pax.flags extended attribute of
-+ the file. This marking has the benefit of supporting binary-only
-+ applications that self-check themselves (e.g., skype) and would
-+ not tolerate chpax/paxctl changes. The main drawback is that
-+ extended attributes are not supported by some filesystems (e.g.,
-+ isofs, udf, vfat) so copying files through such filesystems will
-+ lose the extended attributes and these PaX markings.
-+
-+ Note that if you enable the legacy EI_PAX marking support as well,
-+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
-+
-+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
-+ must make sure that the marks are the same if a binary has both marks.
-+
-+ If you enable none of the marking options then all applications
-+ will run with PaX enabled on them by default.
-+
-+choice
-+ prompt 'MAC system integration'
-+ default PAX_HAVE_ACL_FLAGS
-+ help
-+ Mandatory Access Control systems have the option of controlling
-+ PaX flags on a per executable basis, choose the method supported
-+ by your particular system.
-+
-+ - "none": if your MAC system does not interact with PaX,
-+ - "direct": if your MAC system defines pax_set_initial_flags() itself,
-+ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
-+
-+ NOTE: this option is for developers/integrators only.
-+
-+ config PAX_NO_ACL_FLAGS
-+ bool 'none'
-+
-+ config PAX_HAVE_ACL_FLAGS
-+ bool 'direct'
-+
-+ config PAX_HOOK_ACL_FLAGS
-+ bool 'hook'
-+endchoice
-+
-+endmenu
-+
-+menu "Non-executable pages"
-+ depends on PAX
-+
-+config PAX_NOEXEC
-+ bool "Enforce non-executable pages"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
-+ help
-+ By design some architectures do not allow for protecting memory
-+ pages against execution or even if they do, Linux does not make
-+ use of this feature. In practice this means that if a page is
-+ readable (such as the stack or heap) it is also executable.
-+
-+ There is a well known exploit technique that makes use of this
-+ fact and a common programming mistake where an attacker can
-+ introduce code of his choice somewhere in the attacked program's
-+ memory (typically the stack or the heap) and then execute it.
-+
-+ If the attacked program was running with different (typically
-+ higher) privileges than that of the attacker, then he can elevate
-+ his own privilege level (e.g. get a root shell, write to files for
-+ which he does not have write access to, etc).
-+
-+ Enabling this option will let you choose from various features
-+ that prevent the injection and execution of 'foreign' code in
-+ a program.
-+
-+ This will also break programs that rely on the old behaviour and
-+ expect that dynamically allocated memory via the malloc() family
-+ of functions is executable (which it is not). Notable examples
-+ are the XFree86 4.x server, the java runtime and wine.
-+
-+config PAX_PAGEEXEC
-+ bool "Paging based non-executable pages"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
-+ select ARCH_TRACK_EXEC_LIMIT if X86_32
-+ help
-+ This implementation is based on the paging feature of the CPU.
-+ On i386 without hardware non-executable bit support there is a
-+ variable but usually low performance impact, however on Intel's
-+ P4 core based CPUs it is very high so you should not enable this
-+ for kernels meant to be used on such CPUs.
-+
-+ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
-+ with hardware non-executable bit support there is no performance
-+ impact, on ppc the impact is negligible.
-+
-+ Note that several architectures require various emulations due to
-+ badly designed userland ABIs, this will cause a performance impact
-+ but will disappear as soon as userland is fixed. For example, ppc
-+ userland MUST have been built with secure-plt by a recent toolchain.
-+
-+config PAX_SEGMEXEC
-+ bool "Segmentation based non-executable pages"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PAX_NOEXEC && X86_32
-+ help
-+ This implementation is based on the segmentation feature of the
-+ CPU and has a very small performance impact, however applications
-+ will be limited to a 1.5 GB address space instead of the normal
-+ 3 GB.
-+
-+config PAX_EMUTRAMP
-+ bool "Emulate trampolines"
-+ default y if PARISC || GRKERNSEC_CONFIG_AUTO
-+ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
-+ help
-+ There are some programs and libraries that for one reason or
-+ another attempt to execute special small code snippets from
-+ non-executable memory pages. Most notable examples are the
-+ signal handler return code generated by the kernel itself and
-+ the GCC trampolines.
-+
-+ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
-+ such programs will no longer work under your kernel.
-+
-+ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
-+ utilities to enable trampoline emulation for the affected programs
-+ yet still have the protection provided by the non-executable pages.
-+
-+ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
-+ your system will not even boot.
-+
-+ Alternatively you can say N here and use the 'chpax' or 'paxctl'
-+ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
-+ for the affected files.
-+
-+ NOTE: enabling this feature *may* open up a loophole in the
-+ protection provided by non-executable pages that an attacker
-+ could abuse. Therefore the best solution is to not have any
-+ files on your system that would require this option. This can
-+ be achieved by not using libc5 (which relies on the kernel
-+ signal handler return code) and not using or rewriting programs
-+ that make use of the nested function implementation of GCC.
-+ Skilled users can just fix GCC itself so that it implements
-+ nested function calls in a way that does not interfere with PaX.
-+
-+config PAX_EMUSIGRT
-+ bool "Automatically emulate sigreturn trampolines"
-+ depends on PAX_EMUTRAMP && PARISC
-+ default y
-+ help
-+ Enabling this option will have the kernel automatically detect
-+ and emulate signal return trampolines executing on the stack
-+ that would otherwise lead to task termination.
-+
-+ This solution is intended as a temporary one for users with
-+ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
-+ Modula-3 runtime, etc) or executables linked to such, basically
-+ everything that does not specify its own SA_RESTORER function in
-+ normal executable memory like glibc 2.1+ does.
-+
-+ On parisc you MUST enable this option, otherwise your system will
-+ not even boot.
-+
-+ NOTE: this feature cannot be disabled on a per executable basis
-+ and since it *does* open up a loophole in the protection provided
-+ by non-executable pages, the best solution is to not have any
-+ files on your system that would require this option.
-+
-+config PAX_MPROTECT
-+ bool "Restrict mprotect()"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
-+ help
-+ Enabling this option will prevent programs from
-+ - changing the executable status of memory pages that were
-+ not originally created as executable,
-+ - making read-only executable pages writable again,
-+ - creating executable pages from anonymous memory,
-+ - making read-only-after-relocations (RELRO) data pages writable again.
-+
-+ You should say Y here to complete the protection provided by
-+ the enforcement of non-executable pages.
-+
-+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
-+ this feature on a per file basis.
-+
-+config PAX_MPROTECT_COMPAT
-+ bool "Use legacy/compat protection demoting (read help)"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
-+ depends on PAX_MPROTECT
-+ help
-+ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
-+ by sending the proper error code to the application. For some broken
-+ userland, this can cause problems with Python or other applications. The
-+ current implementation however allows for applications like clamav to
-+ detect if JIT compilation/execution is allowed and to fall back gracefully
-+ to an interpreter-based mode if it does not. While we encourage everyone
-+ to use the current implementation as-is and push upstream to fix broken
-+ userland (note that the RWX logging option can assist with this), in some
-+ environments this may not be possible. Having to disable MPROTECT
-+ completely on certain binaries reduces the security benefit of PaX,
-+ so this option is provided for those environments to revert to the old
-+ behavior.
-+
-+config PAX_ELFRELOCS
-+ bool "Allow ELF text relocations (read help)"
-+ depends on PAX_MPROTECT
-+ default n
-+ help
-+ Non-executable pages and mprotect() restrictions are effective
-+ in preventing the introduction of new executable code into an
-+ attacked task's address space. There remain only two venues
-+ for this kind of attack: if the attacker can execute already
-+ existing code in the attacked task then he can either have it
-+ create and mmap() a file containing his code or have it mmap()
-+ an already existing ELF library that does not have position
-+ independent code in it and use mprotect() on it to make it
-+ writable and copy his code there. While protecting against
-+ the former approach is beyond PaX, the latter can be prevented
-+ by having only PIC ELF libraries on one's system (which do not
-+ need to relocate their code). If you are sure this is your case,
-+ as is the case with all modern Linux distributions, then leave
-+ this option disabled. You should say 'n' here.
-+
-+config PAX_ETEXECRELOCS
-+ bool "Allow ELF ET_EXEC text relocations"
-+ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
-+ select PAX_ELFRELOCS
-+ default y
-+ help
-+ On some architectures there are incorrectly created applications
-+ that require text relocations and would not work without enabling
-+ this option. If you are an alpha, ia64 or parisc user, you should
-+ enable this option and disable it once you have made sure that
-+ none of your applications need it.
-+
-+config PAX_EMUPLT
-+ bool "Automatically emulate ELF PLT"
-+ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
-+ default y
-+ help
-+ Enabling this option will have the kernel automatically detect
-+ and emulate the Procedure Linkage Table entries in ELF files.
-+ On some architectures such entries are in writable memory, and
-+ become non-executable leading to task termination. Therefore
-+ it is mandatory that you enable this option on alpha, parisc,
-+ sparc and sparc64, otherwise your system would not even boot.
-+
-+ NOTE: this feature *does* open up a loophole in the protection
-+ provided by the non-executable pages, therefore the proper
-+ solution is to modify the toolchain to produce a PLT that does
-+ not need to be writable.
-+
-+config PAX_DLRESOLVE
-+ bool 'Emulate old glibc resolver stub'
-+ depends on PAX_EMUPLT && SPARC
-+ default n
-+ help
-+ This option is needed if userland has an old glibc (before 2.4)
-+ that puts a 'save' instruction into the runtime generated resolver
-+ stub that needs special emulation.
-+
-+config PAX_KERNEXEC
-+ bool "Enforce non-executable kernel pages"
-+ default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
-+ depends on X86 && !XEN && (!X86_32 || X86_WP_WORKS_OK)
-+ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
-+ select PAX_KERNEXEC_PLUGIN if X86_64
-+ help
-+ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
-+ that is, enabling this option will make it harder to inject
-+ and execute 'foreign' code in kernel memory itself.
-+
-+ Note that on amd64 with CONFIG_EFI enabled, the physical map
-+ is required to be RWX if EFI runtime services are active.
-+ Newer kernels do not have this limitation.
-+
-+ Likewise, if EFI runtime services are active, the memory for
-+ the services is mapped RWX.
-+
-+ If your system is EFI-enabled, it is thus strongly recommended
-+ that you boot with "noefi" on the kernel command-line if
-+ possible.
-+
-+choice
-+ prompt "Return Address Instrumentation Method"
-+ default PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ depends on PAX_KERNEXEC_PLUGIN
-+ help
-+ Select the method used to instrument function pointer dereferences.
-+ Note that binary modules cannot be instrumented by this approach.
-+
-+ Note that the implementation requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package.
-+
-+ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ bool "bts"
-+ help
-+ This method is compatible with binary only modules but has
-+ a higher runtime overhead.
-+
-+ config PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ bool "or"
-+ depends on !PARAVIRT
-+ help
-+ This method is incompatible with binary only modules but has
-+ a lower runtime overhead.
-+endchoice
-+
-+config PAX_KERNEXEC_PLUGIN_METHOD
-+ string
-+ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ default ""
-+
-+config PAX_KERNEXEC_MODULE_TEXT
-+ int "Minimum amount of memory reserved for module code"
-+ default "8" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
-+ default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
-+ depends on PAX_KERNEXEC && X86_32
-+ help
-+ Due to implementation details the kernel must reserve a fixed
-+ amount of memory for runtime allocated code (such as modules)
-+ at compile time that cannot be changed at runtime. Here you
-+ can specify the minimum amount in MB that will be reserved.
-+ Due to the same implementation details this size will always
-+ be rounded up to the next 2/4 MB boundary (depends on PAE) so
-+ the actually available memory for runtime allocated code will
-+ usually be more than this minimum.
-+
-+ The default 4 MB should be enough for most users but if you have
-+ an excessive number of modules (e.g., most distribution configs
-+ compile many drivers as modules) or use huge modules such as
-+ nvidia's kernel driver, you will need to adjust this amount.
-+ A good rule of thumb is to look at your currently loaded kernel
-+ modules and add up their sizes.
-+
-+endmenu
-+
-+menu "Address Space Layout Randomization"
-+ depends on PAX
-+
-+config PAX_ASLR
-+ bool "Address Space Layout Randomization"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ Many if not most exploit techniques rely on the knowledge of
-+ certain addresses in the attacked program. The following options
-+ will allow the kernel to apply a certain amount of randomization
-+ to specific parts of the program thereby forcing an attacker to
-+ guess them in most cases. Any failed guess will most likely crash
-+ the attacked program which allows the kernel to detect such attempts
-+ and react on them. PaX itself provides no reaction mechanisms,
-+ instead it is strongly encouraged that you make use of grsecurity's
-+ (http://www.grsecurity.net/) built-in crash detection features or
-+ develop one yourself.
-+
-+ By saying Y here you can choose to randomize the following areas:
-+ - top of the task's kernel stack
-+ - top of the task's userland stack
-+ - base address for mmap() requests that do not specify one
-+ (this includes all libraries)
-+ - base address of the main executable
-+
-+ It is strongly recommended to say Y here as address space layout
-+ randomization has negligible impact on performance yet it provides
-+ a very effective protection.
-+
-+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
-+ this feature on a per file basis.
-+
-+config PAX_RANDKSTACK
-+ bool "Randomize kernel stack base"
-+ default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
-+ depends on X86_TSC && X86
-+ help
-+ By saying Y here the kernel will randomize every task's kernel
-+ stack on every system call. This will not only force an attacker
-+ to guess it but also prevent him from making use of possible
-+ leaked information about it.
-+
-+ Since the kernel stack is a rather scarce resource, randomization
-+ may cause unexpected stack overflows, therefore you should very
-+ carefully test your system. Note that once enabled in the kernel
-+ configuration, this feature cannot be disabled on a per file basis.
-+
-+config PAX_RANDUSTACK
-+ bool
-+
-+config PAX_RANDMMAP
-+ bool "Randomize user stack and mmap() bases"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on PAX_ASLR
-+ select PAX_RANDUSTACK
-+ help
-+ By saying Y here the kernel will randomize every task's userland
-+ stack and use a randomized base address for mmap() requests that
-+ do not specify one themselves.
-+
-+ The stack randomization is done in two steps where the second
-+ one may apply a big amount of shift to the top of the stack and
-+ cause problems for programs that want to use lots of memory (more
-+ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
-+
-+ As a result of mmap randomization all dynamically loaded libraries
-+ will appear at random addresses and therefore be harder to exploit
-+ by a technique where an attacker attempts to execute library code
-+ for his purposes (e.g. spawn a shell from an exploited program that
-+ is running at an elevated privilege level).
-+
-+ Furthermore, if a program is relinked as a dynamic ELF file, its
-+ base address will be randomized as well, completing the full
-+ randomization of the address space layout. Attacking such programs
-+ becomes a guess game. You can find an example of doing this at
-+ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
-+ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
-+
-+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
-+ feature on a per file basis.
-+
-+endmenu
-+
-+menu "Miscellaneous hardening features"
-+
-+config PAX_MEMORY_SANITIZE
-+ bool "Sanitize all freed memory"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
-+ help
-+ By saying Y here the kernel will erase memory pages and slab objects
-+ as soon as they are freed. This in turn reduces the lifetime of data
-+ stored in them, making it less likely that sensitive information such
-+ as passwords, cryptographic secrets, etc stay in memory for too long.
-+
-+ This is especially useful for programs whose runtime is short, long
-+ lived processes and the kernel itself benefit from this as long as
-+ they ensure timely freeing of memory that may hold sensitive
-+ information.
-+
-+ A nice side effect of the sanitization of slab objects is the
-+ reduction of possible info leaks caused by padding bytes within the
-+ leaky structures. Use-after-free bugs for structures containing
-+ pointers can also be detected as dereferencing the sanitized pointer
-+ will generate an access violation.
-+
-+ The tradeoff is performance impact, on a single CPU system kernel
-+ compilation sees a 3% slowdown, other systems and workloads may vary
-+ and you are advised to test this feature on your expected workload
-+ before deploying it.
-+
-+ The slab sanitization feature excludes a few slab caches per default
-+ for performance reasons. To extend the feature to cover those as
-+ well, pass "pax_sanitize_slab=full" as kernel command line parameter.
-+
-+ To reduce the performance penalty by sanitizing pages only, albeit
-+ limiting the effectiveness of this feature at the same time, slab
-+ sanitization can be disabled with the kernel command line parameter
-+ "pax_sanitize_slab=off".
-+
-+ Note that this feature does not protect data stored in live pages,
-+ e.g., process memory swapped to disk may stay there for a long time.
-+
-+config PAX_MEMORY_STACKLEAK
-+ bool "Sanitize kernel stack"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
-+ depends on X86
-+ help
-+ By saying Y here the kernel will erase the kernel stack before it
-+ returns from a system call. This in turn reduces the information
-+ that a kernel stack leak bug can reveal.
-+
-+ Note that such a bug can still leak information that was put on
-+ the stack by the current system call (the one eventually triggering
-+ the bug) but traces of earlier system calls on the kernel stack
-+ cannot leak anymore.
-+
-+ The tradeoff is performance impact: on a single CPU system kernel
-+ compilation sees a 1% slowdown, other systems and workloads may vary
-+ and you are advised to test this feature on your expected workload
-+ before deploying it.
-+
-+ Note that the full feature requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package. Using
-+ older gcc versions means that functions with large enough stack
-+ frames may leave uninitialized memory behind that may be exposed
-+ to a later syscall leaking the stack.
-+
-+config PAX_MEMORY_STRUCTLEAK
-+ bool "Forcibly initialize local variables copied to userland"
-+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
-+ help
-+ By saying Y here the kernel will zero initialize some local
-+ variables that are going to be copied to userland. This in
-+ turn prevents unintended information leakage from the kernel
-+ stack should later code forget to explicitly set all parts of
-+ the copied variable.
-+
-+ The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
-+ at a much smaller coverage.
-+
-+ Note that the implementation requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package.
-+
-+config PAX_MEMORY_UDEREF
-+ bool "Prevent invalid userland pointer dereference"
-+ default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
-+ depends on X86 && !UML_X86 && !XEN
-+ select PAX_PER_CPU_PGD if X86_64
-+ help
-+ By saying Y here the kernel will be prevented from dereferencing
-+ userland pointers in contexts where the kernel expects only kernel
-+ pointers. This is both a useful runtime debugging feature and a
-+ security measure that prevents exploiting a class of kernel bugs.
-+
-+ The tradeoff is that some virtualization solutions may experience
-+ a huge slowdown and therefore you should not enable this feature
-+ for kernels meant to run in such environments. Whether a given VM
-+ solution is affected or not is best determined by simply trying it
-+ out, the performance impact will be obvious right on boot as this
-+ mechanism engages from very early on. A good rule of thumb is that
-+ VMs running on CPUs without hardware virtualization support (i.e.,
-+ the majority of IA-32 CPUs) will likely experience the slowdown.
-+
-+config PAX_REFCOUNT
-+ bool "Prevent various kernel object reference counter overflows"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
-+ help
-+ By saying Y here the kernel will detect and prevent overflowing
-+ various (but not all) kinds of object reference counters. Such
-+ overflows can normally occur due to bugs only and are often, if
-+ not always, exploitable.
-+
-+ The tradeoff is that data structures protected by an overflowed
-+ refcount will never be freed and therefore will leak memory. Note
-+ that this leak also happens even without this protection but in
-+ that case the overflow can eventually trigger the freeing of the
-+ data structure while it is still being used elsewhere, resulting
-+ in the exploitable situation that this feature prevents.
-+
-+ Since this has a negligible performance impact, you should enable
-+ this feature.
-+
-+config PAX_USERCOPY
-+ bool "Harden heap object copies between kernel and userland"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on ARM || IA64 || PPC || SPARC || X86
-+ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
-+ select PAX_USERCOPY_SLABS
-+ help
-+ By saying Y here the kernel will enforce the size of heap objects
-+ when they are copied in either direction between the kernel and
-+ userland, even if only a part of the heap object is copied.
-+
-+ Specifically, this checking prevents information leaking from the
-+ kernel heap during kernel to userland copies (if the kernel heap
-+ object is otherwise fully initialized) and prevents kernel heap
-+ overflows during userland to kernel copies.
-+
-+ Note that the current implementation provides the strictest bounds
-+ checks for the SLUB allocator.
-+
-+ Enabling this option also enables per-slab cache protection against
-+ data in a given cache being copied into/out of via userland
-+ accessors. Though the whitelist of regions will be reduced over
-+ time, it notably protects important data structures like task structs.
-+
-+ If frame pointers are enabled on x86, this option will also restrict
-+ copies into and out of the kernel stack to local variables within a
-+ single frame.
-+
-+ Since this has a negligible performance impact, you should enable
-+ this feature.
-+
-+config PAX_USERCOPY_DEBUG
-+ bool
-+ depends on X86 && PAX_USERCOPY
-+ default n
-+
-+config PAX_CONSTIFY_PLUGIN
-+ bool "Automatically constify eligible structures"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on !UML && PAX_KERNEXEC
-+ help
-+ By saying Y here the compiler will automatically constify a class
-+ of types that contain only function pointers. This reduces the
-+ kernel's attack surface and also produces a better memory layout.
-+
-+ Note that the implementation requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package
-+
-+ Note that if some code really has to modify constified variables
-+ then the source code will have to be patched to allow it. Examples
-+ can be found in PaX itself (the no_const attribute) and for some
-+ out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
-+
-+
-+config PAX_SIZE_OVERFLOW
-+ bool "Prevent various integer overflows in function size parameters"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on X86
-+ help
-+ By saying Y here the kernel recomputes expressions of function
-+ arguments marked by a size_overflow attribute with double integer
-+ precision (DImode/TImode for 32/64 bit integer types).
-+
-+ The recomputed argument is checked against TYPE_MAX and an event
-+ is logged on overflow and the triggering process is killed.
-+
-+ Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
-+
-+ Note that the implementation requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package.
-+
-+config PAX_LATENT_ENTROPY
-+ bool "Generate some entropy during boot and runtime"
-+ default y if GRKERNSEC_CONFIG_AUTO
-+ help
-+ By saying Y here the kernel will instrument some kernel code to
-+ extract some entropy from both original and artificially created
-+ program state. This will help especially embedded systems where
-+ there is little 'natural' source of entropy normally. The cost
-+ is some slowdown of the boot process and fork and irq processing.
-+
-+ When pax_extra_latent_entropy is passed on the kernel command line,
-+ entropy will be extracted from up to the first 4GB of RAM while the
-+ runtime memory allocator is being initialized. This costs even more
-+ slowdown of the boot process.
-+
-+ Note that the implementation requires a gcc with plugin support,
-+ i.e., gcc 4.5 or newer. You may need to install the supporting
-+ headers explicitly in addition to the normal gcc package.
-+
-+ Note that entropy extracted this way is not cryptographically
-+ secure!
-+
-+endmenu
-+
-+endmenu
-+
-+source grsecurity/Kconfig
-+
-+endmenu
-+
-+endmenu
-+
- config KEYS
- bool "Enable access key retention support"
- help
-@@ -169,7 +1140,7 @@ config INTEL_TXT
- config LSM_MMAP_MIN_ADDR
- int "Low address space for LSM to protect from user allocation"
- depends on SECURITY && SECURITY_SELINUX
-- default 32768 if ARM
-+ default 32768 if ALPHA || ARM || PARISC || SPARC32
- default 65536
- help
- This is the portion of low virtual memory which should be protected
-diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
-index 9b9013b..51ebf96 100644
---- a/security/apparmor/Kconfig
-+++ b/security/apparmor/Kconfig
-@@ -29,3 +29,12 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
- boot.
-
- If you are unsure how to answer this question, answer 1.
-+
-+config SECURITY_APPARMOR_COMPAT_24
-+ bool "Enable AppArmor 2.4 compatability"
-+ depends on SECURITY_APPARMOR
-+ default y
-+ help
-+ This option enables compatability with AppArmor 2.4. It is
-+ recommended if compatability with older versions of AppArmor
-+ is desired.
-diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
-index 2dafe50..0bb604b 100644
---- a/security/apparmor/Makefile
-+++ b/security/apparmor/Makefile
-@@ -4,9 +4,10 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
-
- apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
- path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
-- resource.o sid.o file.o
-+ resource.o sid.o file.o net.o
-+apparmor-$(CONFIG_SECURITY_APPARMOR_COMPAT_24) += apparmorfs-24.o
-
--clean-files := capability_names.h rlim_names.h
-+clean-files := capability_names.h rlim_names.h af_names.h
-
-
- # Build a lower case string table of capability names
-@@ -44,9 +45,24 @@ cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ;\
- sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
- echo "};" >> $@
-
-+# Build a lower case string table of address family names.
-+# Transform lines from
-+# #define AF_INET 2 /* Internet IP Protocol */
-+# to
-+# [2] = "inet",
-+quiet_cmd_make-af = GEN $@
-+cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
-+ sed $< >> $@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e \
-+ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+).*/[\2] = "\L\1",/p';\
-+ echo "};" >> $@
-+
-+
- $(obj)/capability.o : $(obj)/capability_names.h
- $(obj)/resource.o : $(obj)/rlim_names.h
-+$(obj)/net.o : $(obj)/af_names.h
- $(obj)/capability_names.h : $(srctree)/include/linux/capability.h
- $(call cmd,make-caps)
- $(obj)/rlim_names.h : $(srctree)/include/asm-generic/resource.h
- $(call cmd,make-rlim)
-+$(obj)/af_names.h : $(srctree)/include/linux/socket.h
-+ $(call cmd,make-af)
-\ No newline at end of file
-diff --git a/security/apparmor/apparmorfs-24.c b/security/apparmor/apparmorfs-24.c
-new file mode 100644
-index 0000000..dc8c744
---- /dev/null
-+++ b/security/apparmor/apparmorfs-24.c
-@@ -0,0 +1,287 @@
-+/*
-+ * AppArmor security module
-+ *
-+ * This file contains AppArmor /sys/kernel/secrutiy/apparmor interface functions
-+ *
-+ * Copyright (C) 1998-2008 Novell/SUSE
-+ * Copyright 2009-2010 Canonical Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ *
-+ *
-+ * This file contain functions providing an interface for <= AppArmor 2.4
-+ * compatibility. It is dependent on CONFIG_SECURITY_APPARMOR_COMPAT_24
-+ * being set (see Makefile).
-+ */
-+
-+#include <linux/security.h>
-+#include <linux/vmalloc.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/uaccess.h>
-+#include <linux/namei.h>
-+
-+#include "include/apparmor.h"
-+#include "include/audit.h"
-+#include "include/context.h"
-+#include "include/policy.h"
-+
-+
-+/* apparmor/matching */
-+static ssize_t aa_matching_read(struct file *file, char __user *buf,
-+ size_t size, loff_t *ppos)
-+{
-+ const char matching[] = "pattern=aadfa audit perms=crwxamlk/ "
-+ "user::other";
-+
-+ return simple_read_from_buffer(buf, size, ppos, matching,
-+ sizeof(matching) - 1);
-+}
-+
-+const struct file_operations aa_fs_matching_fops = {
-+ .read = aa_matching_read,
-+};
-+
-+/* apparmor/features */
-+static ssize_t aa_features_read(struct file *file, char __user *buf,
-+ size_t size, loff_t *ppos)
-+{
-+ const char features[] = "file=3.1 capability=2.0 network=1.0 "
-+ "change_hat=1.5 change_profile=1.1 " "aanamespaces=1.1 rlimit=1.1";
-+
-+ return simple_read_from_buffer(buf, size, ppos, features,
-+ sizeof(features) - 1);
-+}
-+
-+const struct file_operations aa_fs_features_fops = {
-+ .read = aa_features_read,
-+};
-+
-+/**
-+ * __next_namespace - find the next namespace to list
-+ * @root: root namespace to stop search at (NOT NULL)
-+ * @ns: current ns position (NOT NULL)
-+ *
-+ * Find the next namespace from @ns under @root and handle all locking needed
-+ * while switching current namespace.
-+ *
-+ * Returns: next namespace or NULL if at last namespace under @root
-+ * NOTE: will not unlock root->lock
-+ */
-+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
-+ struct aa_namespace *ns)
-+{
-+ struct aa_namespace *parent;
-+
-+ /* is next namespace a child */
-+ if (!list_empty(&ns->sub_ns)) {
-+ struct aa_namespace *next;
-+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
-+ read_lock(&next->lock);
-+ return next;
-+ }
-+
-+ /* check if the next ns is a sibling, parent, gp, .. */
-+ parent = ns->parent;
-+ while (parent) {
-+ read_unlock(&ns->lock);
-+ list_for_each_entry_continue(ns, &parent->sub_ns, base.list) {
-+ read_lock(&ns->lock);
-+ return ns;
-+ }
-+ if (parent == root)
-+ return NULL;
-+ ns = parent;
-+ parent = parent->parent;
-+ }
-+
-+ return NULL;
-+}
-+
-+/**
-+ * __first_profile - find the first profile in a namespace
-+ * @root: namespace that is root of profiles being displayed (NOT NULL)
-+ * @ns: namespace to start in (NOT NULL)
-+ *
-+ * Returns: unrefcounted profile or NULL if no profile
-+ */
-+static struct aa_profile *__first_profile(struct aa_namespace *root,
-+ struct aa_namespace *ns)
-+{
-+ for ( ; ns; ns = __next_namespace(root, ns)) {
-+ if (!list_empty(&ns->base.profiles))
-+ return list_first_entry(&ns->base.profiles,
-+ struct aa_profile, base.list);
-+ }
-+ return NULL;
-+}
-+
-+/**
-+ * __next_profile - step to the next profile in a profile tree
-+ * @profile: current profile in tree (NOT NULL)
-+ *
-+ * Perform a depth first taversal on the profile tree in a namespace
-+ *
-+ * Returns: next profile or NULL if done
-+ * Requires: profile->ns.lock to be held
-+ */
-+static struct aa_profile *__next_profile(struct aa_profile *p)
-+{
-+ struct aa_profile *parent;
-+ struct aa_namespace *ns = p->ns;
-+
-+ /* is next profile a child */
-+ if (!list_empty(&p->base.profiles))
-+ return list_first_entry(&p->base.profiles, typeof(*p),
-+ base.list);
-+
-+ /* is next profile a sibling, parent sibling, gp, subling, .. */
-+ parent = p->parent;
-+ while (parent) {
-+ list_for_each_entry_continue(p, &parent->base.profiles,
-+ base.list)
-+ return p;
-+ p = parent;
-+ parent = parent->parent;
-+ }
-+
-+ /* is next another profile in the namespace */
-+ list_for_each_entry_continue(p, &ns->base.profiles, base.list)
-+ return p;
-+
-+ return NULL;
-+}
-+
-+/**
-+ * next_profile - step to the next profile in where ever it may be
-+ * @root: root namespace (NOT NULL)
-+ * @profile: current profile (NOT NULL)
-+ *
-+ * Returns: next profile or NULL if there isn't one
-+ */
-+static struct aa_profile *next_profile(struct aa_namespace *root,
-+ struct aa_profile *profile)
-+{
-+ struct aa_profile *next = __next_profile(profile);
-+ if (next)
-+ return next;
-+
-+ /* finished all profiles in namespace move to next namespace */
-+ return __first_profile(root, __next_namespace(root, profile->ns));
-+}
-+
-+/**
-+ * p_start - start a depth first traversal of profile tree
-+ * @f: seq_file to fill
-+ * @pos: current position
-+ *
-+ * Returns: first profile under current namespace or NULL if none found
-+ *
-+ * acquires first ns->lock
-+ */
-+static void *p_start(struct seq_file *f, loff_t *pos)
-+ __acquires(root->lock)
-+{
-+ struct aa_profile *profile = NULL;
-+ struct aa_namespace *root = aa_current_profile()->ns;
-+ loff_t l = *pos;
-+ f->private = aa_get_namespace(root);
-+
-+
-+ /* find the first profile */
-+ read_lock(&root->lock);
-+ profile = __first_profile(root, root);
-+
-+ /* skip to position */
-+ for (; profile && l > 0; l--)
-+ profile = next_profile(root, profile);
-+
-+ return profile;
-+}
-+
-+/**
-+ * p_next - read the next profile entry
-+ * @f: seq_file to fill
-+ * @p: profile previously returned
-+ * @pos: current position
-+ *
-+ * Returns: next profile after @p or NULL if none
-+ *
-+ * may acquire/release locks in namespace tree as necessary
-+ */
-+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
-+{
-+ struct aa_profile *profile = p;
-+ struct aa_namespace *root = f->private;
-+ (*pos)++;
-+
-+ return next_profile(root, profile);
-+}
-+
-+/**
-+ * p_stop - stop depth first traversal
-+ * @f: seq_file we are filling
-+ * @p: the last profile writen
-+ *
-+ * Release all locking done by p_start/p_next on namespace tree
-+ */
-+static void p_stop(struct seq_file *f, void *p)
-+ __releases(root->lock)
-+{
-+ struct aa_profile *profile = p;
-+ struct aa_namespace *root = f->private, *ns;
-+
-+ if (profile) {
-+ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
-+ read_unlock(&ns->lock);
-+ }
-+ read_unlock(&root->lock);
-+ aa_put_namespace(root);
-+}
-+
-+/**
-+ * seq_show_profile - show a profile entry
-+ * @f: seq_file to file
-+ * @p: current position (profile) (NOT NULL)
-+ *
-+ * Returns: error on failure
-+ */
-+static int seq_show_profile(struct seq_file *f, void *p)
-+{
-+ struct aa_profile *profile = (struct aa_profile *)p;
-+ struct aa_namespace *root = f->private;
-+
-+ if (profile->ns != root)
-+ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
-+ seq_printf(f, "%s (%s)\n", profile->base.hname,
-+ COMPLAIN_MODE(profile) ? "complain" : "enforce");
-+
-+ return 0;
-+}
-+
-+static const struct seq_operations aa_fs_profiles_op = {
-+ .start = p_start,
-+ .next = p_next,
-+ .stop = p_stop,
-+ .show = seq_show_profile,
-+};
-+
-+static int profiles_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open(file, &aa_fs_profiles_op);
-+}
-+
-+static int profiles_release(struct inode *inode, struct file *file)
-+{
-+ return seq_release(inode, file);
-+}
-+
-+const struct file_operations aa_fs_profiles_fops = {
-+ .open = profiles_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = profiles_release,
-+};
-diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
-index 69ddb47..be0f0f9 100644
---- a/security/apparmor/apparmorfs.c
-+++ b/security/apparmor/apparmorfs.c
-@@ -187,7 +187,11 @@ void __init aa_destroy_aafs(void)
- aafs_remove(".remove");
- aafs_remove(".replace");
- aafs_remove(".load");
--
-+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
-+ aafs_remove("profiles");
-+ aafs_remove("matching");
-+ aafs_remove("features");
-+#endif
- securityfs_remove(aa_fs_dentry);
- aa_fs_dentry = NULL;
- }
-@@ -218,7 +222,17 @@ static int __init aa_create_aafs(void)
- aa_fs_dentry = NULL;
- goto error;
- }
--
-+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
-+ error = aafs_create("matching", 0444, &aa_fs_matching_fops);
-+ if (error)
-+ goto error;
-+ error = aafs_create("features", 0444, &aa_fs_features_fops);
-+ if (error)
-+ goto error;
-+ error = aafs_create("profiles", 0440, &aa_fs_profiles_fops);
-+ if (error)
-+ goto error;
-+#endif
- error = aafs_create(".load", 0640, &aa_fs_profile_load);
- if (error)
- goto error;
-diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
-index c1e18ba..7316d77 100644
---- a/security/apparmor/domain.c
-+++ b/security/apparmor/domain.c
-@@ -395,6 +395,11 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
- new_profile = find_attach(ns, &ns->base.profiles, name);
- if (!new_profile)
- goto cleanup;
-+ /*
-+ * NOTE: Domain transitions from unconfined are allowed
-+ * even when no_new_privs is set because this aways results
-+ * in a further reduction of permissions.
-+ */
- goto apply;
- }
-
-@@ -455,6 +460,16 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
- /* fail exec */
- error = -EACCES;
-
-+ /*
-+ * Policy has specified a domain transition, if no_new_privs then
-+ * fail the exec.
-+ */
-+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
-+ aa_put_profile(new_profile);
-+ error = -EPERM;
-+ goto cleanup;
-+ }
-+
- if (!new_profile)
- goto audit;
-
-@@ -609,6 +624,14 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
- const char *target = NULL, *info = NULL;
- int error = 0;
-
-+ /*
-+ * Fail explicitly requested domain transitions if no_new_privs.
-+ * There is no exception for unconfined as change_hat is not
-+ * available.
-+ */
-+ if (current->no_new_privs)
-+ return -EPERM;
-+
- /* released below */
- cred = get_current_cred();
- cxt = cred->security;
-@@ -750,6 +773,18 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
- cxt = cred->security;
- profile = aa_cred_profile(cred);
-
-+ /*
-+ * Fail explicitly requested domain transitions if no_new_privs
-+ * and not unconfined.
-+ * Domain transitions from unconfined are allowed even when
-+ * no_new_privs is set because this aways results in a reduction
-+ * of permissions.
-+ */
-+ if (current->no_new_privs && !unconfined(profile)) {
-+ put_cred(cred);
-+ return -EPERM;
-+ }
-+
- if (ns_name) {
- /* released below */
- ns = aa_find_namespace(profile->ns, ns_name);
-diff --git a/security/apparmor/file.c b/security/apparmor/file.c
-index 7312db7..faf3f03 100644
---- a/security/apparmor/file.c
-+++ b/security/apparmor/file.c
-@@ -349,8 +349,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
- int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry)
- {
-- struct path link = { new_dir->mnt, new_dentry };
-- struct path target = { new_dir->mnt, old_dentry };
-+ struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
-+ struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
- struct path_cond cond = {
- old_dentry->d_inode->i_uid,
- old_dentry->d_inode->i_mode
-diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
-index cb1e93a..14f955c 100644
---- a/security/apparmor/include/apparmorfs.h
-+++ b/security/apparmor/include/apparmorfs.h
-@@ -17,4 +17,10 @@
-
- extern void __init aa_destroy_aafs(void);
-
-+#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
-+extern const struct file_operations aa_fs_matching_fops;
-+extern const struct file_operations aa_fs_features_fops;
-+extern const struct file_operations aa_fs_profiles_fops;
-+#endif
-+
- #endif /* __AA_APPARMORFS_H */
-diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
-new file mode 100644
-index 0000000..3c7d599
---- /dev/null
-+++ b/security/apparmor/include/net.h
-@@ -0,0 +1,40 @@
-+/*
-+ * AppArmor security module
-+ *
-+ * This file contains AppArmor network mediation definitions.
-+ *
-+ * Copyright (C) 1998-2008 Novell/SUSE
-+ * Copyright 2009-2010 Canonical Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ */
-+
-+#ifndef __AA_NET_H
-+#define __AA_NET_H
-+
-+#include <net/sock.h>
-+
-+/* struct aa_net - network confinement data
-+ * @allowed: basic network families permissions
-+ * @audit_network: which network permissions to force audit
-+ * @quiet_network: which network permissions to quiet rejects
-+ */
-+struct aa_net {
-+ u16 allow[AF_MAX];
-+ u16 audit[AF_MAX];
-+ u16 quiet[AF_MAX];
-+};
-+
-+extern int aa_net_perm(int op, struct aa_profile *profile, u16 family,
-+ int type, int protocol, struct sock *sk);
-+extern int aa_revalidate_sk(int op, struct sock *sk);
-+
-+static inline void aa_free_net_rules(struct aa_net *new)
-+{
-+ /* NOP */
-+}
-+
-+#endif /* __AA_NET_H */
-diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
-index aeda5cf..6776929 100644
---- a/security/apparmor/include/policy.h
-+++ b/security/apparmor/include/policy.h
-@@ -27,6 +27,7 @@
- #include "capability.h"
- #include "domain.h"
- #include "file.h"
-+#include "net.h"
- #include "resource.h"
-
- extern const char *profile_mode_names[];
-@@ -145,6 +146,7 @@ struct aa_namespace {
- * @size: the memory consumed by this profiles rules
- * @file: The set of rules governing basic file access and domain transitions
- * @caps: capabilities for the profile
-+ * @net: network controls for the profile
- * @rlimits: rlimits for the profile
- *
- * The AppArmor profile contains the basic confinement data. Each profile
-@@ -181,6 +183,7 @@ struct aa_profile {
-
- struct aa_file_rules file;
- struct aa_caps caps;
-+ struct aa_net net;
- struct aa_rlimit rlimits;
- };
-
-diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
-index 3783202..4cc8dbf 100644
---- a/security/apparmor/lsm.c
-+++ b/security/apparmor/lsm.c
-@@ -32,6 +32,7 @@
- #include "include/context.h"
- #include "include/file.h"
- #include "include/ipc.h"
-+#include "include/net.h"
- #include "include/path.h"
- #include "include/policy.h"
- #include "include/procattr.h"
-@@ -186,7 +187,7 @@ static int common_perm_dir_dentry(int op, struct path *dir,
- struct dentry *dentry, u32 mask,
- struct path_cond *cond)
- {
-- struct path path = { dir->mnt, dentry };
-+ struct path path = { .mnt = dir->mnt, .dentry = dentry };
-
- return common_perm(op, &path, mask, cond);
- }
-@@ -203,7 +204,7 @@ static int common_perm_dir_dentry(int op, struct path *dir,
- static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
- struct dentry *dentry, u32 mask)
- {
-- struct path path = { mnt, dentry };
-+ struct path path = { .mnt = mnt, .dentry = dentry };
- struct path_cond cond = { dentry->d_inode->i_uid,
- dentry->d_inode->i_mode
- };
-@@ -325,8 +326,8 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
-
- profile = aa_current_profile();
- if (!unconfined(profile)) {
-- struct path old_path = { old_dir->mnt, old_dentry };
-- struct path new_path = { new_dir->mnt, new_dentry };
-+ struct path old_path = { .mnt = old_dir->mnt, .dentry = old_dentry };
-+ struct path new_path = { .mnt = new_dir->mnt, .dentry = new_dentry };
- struct path_cond cond = { old_dentry->d_inode->i_uid,
- old_dentry->d_inode->i_mode
- };
-@@ -621,7 +622,105 @@ static int apparmor_task_setrlimit(struct task_struct *task,
- return error;
- }
-
--static struct security_operations apparmor_ops = {
-+static int apparmor_socket_create(int family, int type, int protocol, int kern)
-+{
-+ struct aa_profile *profile;
-+ int error = 0;
-+
-+ if (kern)
-+ return 0;
-+
-+ profile = __aa_current_profile();
-+ if (!unconfined(profile))
-+ error = aa_net_perm(OP_CREATE, profile, family, type, protocol,
-+ NULL);
-+ return error;
-+}
-+
-+static int apparmor_socket_bind(struct socket *sock,
-+ struct sockaddr *address, int addrlen)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_BIND, sk);
-+}
-+
-+static int apparmor_socket_connect(struct socket *sock,
-+ struct sockaddr *address, int addrlen)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_CONNECT, sk);
-+}
-+
-+static int apparmor_socket_listen(struct socket *sock, int backlog)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_LISTEN, sk);
-+}
-+
-+static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_ACCEPT, sk);
-+}
-+
-+static int apparmor_socket_sendmsg(struct socket *sock,
-+ struct msghdr *msg, int size)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_SENDMSG, sk);
-+}
-+
-+static int apparmor_socket_recvmsg(struct socket *sock,
-+ struct msghdr *msg, int size, int flags)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_RECVMSG, sk);
-+}
-+
-+static int apparmor_socket_getsockname(struct socket *sock)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_GETSOCKNAME, sk);
-+}
-+
-+static int apparmor_socket_getpeername(struct socket *sock)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_GETPEERNAME, sk);
-+}
-+
-+static int apparmor_socket_getsockopt(struct socket *sock, int level,
-+ int optname)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_GETSOCKOPT, sk);
-+}
-+
-+static int apparmor_socket_setsockopt(struct socket *sock, int level,
-+ int optname)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_SETSOCKOPT, sk);
-+}
-+
-+static int apparmor_socket_shutdown(struct socket *sock, int how)
-+{
-+ struct sock *sk = sock->sk;
-+
-+ return aa_revalidate_sk(OP_SOCK_SHUTDOWN, sk);
-+}
-+
-+static struct security_operations apparmor_ops __read_only = {
- .name = "apparmor",
-
- .ptrace_access_check = apparmor_ptrace_access_check,
-@@ -652,6 +751,19 @@ static struct security_operations apparmor_ops = {
- .getprocattr = apparmor_getprocattr,
- .setprocattr = apparmor_setprocattr,
-
-+ .socket_create = apparmor_socket_create,
-+ .socket_bind = apparmor_socket_bind,
-+ .socket_connect = apparmor_socket_connect,
-+ .socket_listen = apparmor_socket_listen,
-+ .socket_accept = apparmor_socket_accept,
-+ .socket_sendmsg = apparmor_socket_sendmsg,
-+ .socket_recvmsg = apparmor_socket_recvmsg,
-+ .socket_getsockname = apparmor_socket_getsockname,
-+ .socket_getpeername = apparmor_socket_getpeername,
-+ .socket_getsockopt = apparmor_socket_getsockopt,
-+ .socket_setsockopt = apparmor_socket_setsockopt,
-+ .socket_shutdown = apparmor_socket_shutdown,
-+
- .cred_alloc_blank = apparmor_cred_alloc_blank,
- .cred_free = apparmor_cred_free,
- .cred_prepare = apparmor_cred_prepare,
-diff --git a/security/apparmor/match.c b/security/apparmor/match.c
-index 94de6b4..081491e 100644
---- a/security/apparmor/match.c
-+++ b/security/apparmor/match.c
-@@ -57,8 +57,17 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
- if (bsize < tsize)
- goto out;
-
-+ /* Pad table allocation for next/check by 256 entries to remain
-+ * backwards compatible with old (buggy) tools and remain safe without
-+ * run time checks
-+ */
-+ if (th.td_id == YYTD_ID_NXT || th.td_id == YYTD_ID_CHK)
-+ tsize += 256 * th.td_flags;
-+
- table = kvmalloc(tsize);
- if (table) {
-+ /* ensure the pad is clear, else there will be errors */
-+ memset(table, 0, tsize);
- *table = th;
- if (th.td_flags == YYTD_DATA8)
- UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
-@@ -134,11 +143,19 @@ static int verify_dfa(struct aa_dfa *dfa, int flags)
- goto out;
-
- if (flags & DFA_FLAG_VERIFY_STATES) {
-+ int warning = 0;
- for (i = 0; i < state_count; i++) {
- if (DEFAULT_TABLE(dfa)[i] >= state_count)
- goto out;
- /* TODO: do check that DEF state recursion terminates */
- if (BASE_TABLE(dfa)[i] + 255 >= trans_count) {
-+ if (warning)
-+ continue;
-+ printk(KERN_WARNING "AppArmor DFA next/check "
-+ "upper bounds error fixed, upgrade "
-+ "user space tools \n");
-+ warning = 1;
-+ } else if (BASE_TABLE(dfa)[i] >= trans_count) {
- printk(KERN_ERR "AppArmor DFA next/check upper "
- "bounds error\n");
- goto out;
-diff --git a/security/apparmor/net.c b/security/apparmor/net.c
-new file mode 100644
-index 0000000..1765901
---- /dev/null
-+++ b/security/apparmor/net.c
-@@ -0,0 +1,170 @@
-+/*
-+ * AppArmor security module
-+ *
-+ * This file contains AppArmor network mediation
-+ *
-+ * Copyright (C) 1998-2008 Novell/SUSE
-+ * Copyright 2009-2010 Canonical Ltd.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ */
-+
-+#include "include/apparmor.h"
-+#include "include/audit.h"
-+#include "include/context.h"
-+#include "include/net.h"
-+#include "include/policy.h"
-+
-+#include "af_names.h"
-+
-+static const char *sock_type_names[] = {
-+ "unknown(0)",
-+ "stream",
-+ "dgram",
-+ "raw",
-+ "rdm",
-+ "seqpacket",
-+ "dccp",
-+ "unknown(7)",
-+ "unknown(8)",
-+ "unknown(9)",
-+ "packet",
-+};
-+
-+/* audit callback for net specific fields */
-+static void audit_cb(struct audit_buffer *ab, void *va)
-+{
-+ struct common_audit_data *sa = va;
-+
-+ audit_log_format(ab, " family=");
-+ if (address_family_names[sa->u.net.family]) {
-+ audit_log_string(ab, address_family_names[sa->u.net.family]);
-+ } else {
-+ audit_log_format(ab, " \"unknown(%d)\"", sa->u.net.family);
-+ }
-+
-+ audit_log_format(ab, " sock_type=");
-+ if (sock_type_names[sa->aad.net.type]) {
-+ audit_log_string(ab, sock_type_names[sa->aad.net.type]);
-+ } else {
-+ audit_log_format(ab, "\"unknown(%d)\"", sa->aad.net.type);
-+ }
-+
-+ audit_log_format(ab, " protocol=%d", sa->aad.net.protocol);
-+}
-+
-+/**
-+ * audit_net - audit network access
-+ * @profile: profile being enforced (NOT NULL)
-+ * @op: operation being checked
-+ * @family: network family
-+ * @type: network type
-+ * @protocol: network protocol
-+ * @sk: socket auditing is being applied to
-+ * @error: error code for failure else 0
-+ *
-+ * Returns: %0 or sa->error else other errorcode on failure
-+ */
-+static int audit_net(struct aa_profile *profile, int op, u16 family, int type,
-+ int protocol, struct sock *sk, int error)
-+{
-+ int audit_type = AUDIT_APPARMOR_AUTO;
-+ struct common_audit_data sa;
-+ if (sk) {
-+ COMMON_AUDIT_DATA_INIT(&sa, NET);
-+ } else {
-+ COMMON_AUDIT_DATA_INIT(&sa, NONE);
-+ }
-+ /* todo fill in socket addr info */
-+
-+ sa.aad.op = op,
-+ sa.u.net.family = family;
-+ sa.u.net.sk = sk;
-+ sa.aad.net.type = type;
-+ sa.aad.net.protocol = protocol;
-+ sa.aad.error = error;
-+
-+ if (likely(!sa.aad.error)) {
-+ u16 audit_mask = profile->net.audit[sa.u.net.family];
-+ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
-+ !(1 << sa.aad.net.type & audit_mask)))
-+ return 0;
-+ audit_type = AUDIT_APPARMOR_AUDIT;
-+ } else {
-+ u16 quiet_mask = profile->net.quiet[sa.u.net.family];
-+ u16 kill_mask = 0;
-+ u16 denied = (1 << sa.aad.net.type) & ~quiet_mask;
-+
-+ if (denied & kill_mask)
-+ audit_type = AUDIT_APPARMOR_KILL;
-+
-+ if ((denied & quiet_mask) &&
-+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
-+ AUDIT_MODE(profile) != AUDIT_ALL)
-+ return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
-+ }
-+
-+ return aa_audit(audit_type, profile, GFP_KERNEL, &sa, audit_cb);
-+}
-+
-+/**
-+ * aa_net_perm - very course network access check
-+ * @op: operation being checked
-+ * @profile: profile being enforced (NOT NULL)
-+ * @family: network family
-+ * @type: network type
-+ * @protocol: network protocol
-+ *
-+ * Returns: %0 else error if permission denied
-+ */
-+int aa_net_perm(int op, struct aa_profile *profile, u16 family, int type,
-+ int protocol, struct sock *sk)
-+{
-+ u16 family_mask;
-+ int error;
-+
-+ if ((family < 0) || (family >= AF_MAX))
-+ return -EINVAL;
-+
-+ if ((type < 0) || (type >= SOCK_MAX))
-+ return -EINVAL;
-+
-+ /* unix domain and netlink sockets are handled by ipc */
-+ if (family == AF_UNIX || family == AF_NETLINK)
-+ return 0;
-+
-+ family_mask = profile->net.allow[family];
-+
-+ error = (family_mask & (1 << type)) ? 0 : -EACCES;
-+
-+ return audit_net(profile, op, family, type, protocol, sk, error);
-+}
-+
-+/**
-+ * aa_revalidate_sk - Revalidate access to a sock
-+ * @op: operation being checked
-+ * @sk: sock being revalidated (NOT NULL)
-+ *
-+ * Returns: %0 else error if permission denied
-+ */
-+int aa_revalidate_sk(int op, struct sock *sk)
-+{
-+ struct aa_profile *profile;
-+ int error = 0;
-+
-+ /* aa_revalidate_sk should not be called from interrupt context
-+ * don't mediate these calls as they are not task related
-+ */
-+ if (in_interrupt())
-+ return 0;
-+
-+ profile = __aa_current_profile();
-+ if (!unconfined(profile))
-+ error = aa_net_perm(op, profile, sk->sk_family, sk->sk_type,
-+ sk->sk_protocol, sk);
-+
-+ return error;
-+}
-diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
-index 4f0eade..4d5ce13 100644
---- a/security/apparmor/policy.c
-+++ b/security/apparmor/policy.c
-@@ -745,6 +745,7 @@ static void free_profile(struct aa_profile *profile)
-
- aa_free_file_rules(&profile->file);
- aa_free_cap_rules(&profile->caps);
-+ aa_free_net_rules(&profile->net);
- aa_free_rlimit_rules(&profile->rlimits);
-
- aa_free_sid(profile->sid);
-diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
-index 741dd13..ee8043e 100644
---- a/security/apparmor/policy_unpack.c
-+++ b/security/apparmor/policy_unpack.c
-@@ -190,6 +190,19 @@ fail:
- return 0;
- }
-
-+static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
-+{
-+ if (unpack_nameX(e, AA_U16, name)) {
-+ if (!inbounds(e, sizeof(u16)))
-+ return 0;
-+ if (data)
-+ *data = le16_to_cpu(get_unaligned((u16 *) e->pos));
-+ e->pos += sizeof(u16);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
- static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
- {
- if (unpack_nameX(e, AA_U32, name)) {
-@@ -468,7 +481,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
- {
- struct aa_profile *profile = NULL;
- const char *name = NULL;
-- int error = -EPROTO;
-+ size_t size = 0;
-+ int i, error = -EPROTO;
- kernel_cap_t tmpcap;
- u32 tmp;
-
-@@ -559,6 +573,38 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
- if (!unpack_rlimits(e, profile))
- goto fail;
-
-+ size = unpack_array(e, "net_allowed_af");
-+ if (size) {
-+
-+ for (i = 0; i < size; i++) {
-+ /* discard extraneous rules that this kernel will
-+ * never request
-+ */
-+ if (i >= AF_MAX) {
-+ u16 tmp;
-+ if (!unpack_u16(e, &tmp, NULL) ||
-+ !unpack_u16(e, &tmp, NULL) ||
-+ !unpack_u16(e, &tmp, NULL))
-+ goto fail;
-+ continue;
-+ }
-+ if (!unpack_u16(e, &profile->net.allow[i], NULL))
-+ goto fail;
-+ if (!unpack_u16(e, &profile->net.audit[i], NULL))
-+ goto fail;
-+ if (!unpack_u16(e, &profile->net.quiet[i], NULL))
-+ goto fail;
-+ }
-+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
-+ goto fail;
-+ /*
-+ * allow unix domain and netlink sockets they are handled
-+ * by IPC
-+ */
-+ }
-+ profile->net.allow[AF_UNIX] = 0xffff;
-+ profile->net.allow[AF_NETLINK] = 0xffff;
-+
- /* get file rules */
- profile->file.dfa = unpack_dfa(e);
- if (IS_ERR(profile->file.dfa)) {
-diff --git a/security/commoncap.c b/security/commoncap.c
-index 12440ee..2ec6d88 100644
---- a/security/commoncap.c
-+++ b/security/commoncap.c
-@@ -29,6 +29,7 @@
- #include <linux/securebits.h>
- #include <linux/user_namespace.h>
- #include <linux/personality.h>
-+#include <net/sock.h>
-
- /*
- * If a non-root user executes a setuid-root binary in
-@@ -59,7 +60,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
-
- int cap_netlink_recv(struct sk_buff *skb, int cap)
- {
-- if (!cap_raised(current_cap(), cap))
-+ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
- return -EPERM;
- return 0;
- }
-@@ -424,6 +425,45 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
- return 0;
- }
-
-+/* returns:
-+ 1 for suid privilege
-+ 2 for sgid privilege
-+ 3 for fscap privilege
-+*/
-+int is_privileged_binary(const struct dentry *dentry)
-+{
-+ struct cpu_vfs_cap_data capdata;
-+ struct inode *inode = dentry->d_inode;
-+
-+ if (!inode || S_ISDIR(inode->i_mode))
-+ return 0;
-+
-+ if (inode->i_mode & S_ISUID)
-+ return 1;
-+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
-+ return 2;
-+
-+ if (!get_vfs_caps_from_disk(dentry, &capdata)) {
-+ if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
-+ return 3;
-+ }
-+
-+ return 0;
-+}
-+
-+/* returns 1 for suid root privilege
-+ returns 3 for fscap privilege
-+*/
-+int is_root_privileged_binary(const struct dentry *dentry)
-+{
-+ int ret = is_privileged_binary(dentry);
-+ if (ret == 3)
-+ return ret;
-+ if (ret == 1 && dentry->d_inode->i_uid == 0)
-+ return ret;
-+ return 0;
-+}
-+
- /*
- * Attempt to get the on-exec apply capability sets for an executable file from
- * its xattrs and, if present, apply them to the proposed credentials being
-@@ -521,14 +561,17 @@ skip:
-
-
- /* Don't let someone trace a set[ug]id/setpcap binary with the revised
-- * credentials unless they have the appropriate permit
-+ * credentials unless they have the appropriate permit.
-+ *
-+ * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
- */
- if ((new->euid != old->uid ||
- new->egid != old->gid ||
- !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
- bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
- /* downgrade; they get no more than they had, and maybe less */
-- if (!capable(CAP_SETUID)) {
-+ if (!capable(CAP_SETUID) ||
-+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
- new->euid = new->uid;
- new->egid = new->gid;
- }
-@@ -585,6 +628,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
- {
- const struct cred *cred = current_cred();
-
-+ if (gr_acl_enable_at_secure())
-+ return 1;
-+
- if (cred->uid != 0) {
- if (bprm->cap_effective)
- return 1;
-diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
-index 3ccf7ac..d73ad64 100644
---- a/security/integrity/ima/ima.h
-+++ b/security/integrity/ima/ima.h
-@@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
- extern spinlock_t ima_queue_lock;
-
- struct ima_h_table {
-- atomic_long_t len; /* number of stored measurements in the list */
-- atomic_long_t violations;
-+ atomic_long_unchecked_t len; /* number of stored measurements in the list */
-+ atomic_long_unchecked_t violations;
- struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
- };
- extern struct ima_h_table ima_htable;
-diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
-index 88a2788..581ab92 100644
---- a/security/integrity/ima/ima_api.c
-+++ b/security/integrity/ima/ima_api.c
-@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
- int result;
-
- /* can overflow, only indicator */
-- atomic_long_inc(&ima_htable.violations);
-+ atomic_long_inc_unchecked(&ima_htable.violations);
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
-diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
-index c5c5a72..2ad942f 100644
---- a/security/integrity/ima/ima_audit.c
-+++ b/security/integrity/ima/ima_audit.c
-@@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
- audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, fname);
- }
-- if (inode)
-- audit_log_format(ab, " dev=%s ino=%lu",
-- inode->i_sb->s_id, inode->i_ino);
-+ if (inode) {
-+ audit_log_format(ab, " dev=");
-+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
-+ audit_log_format(ab, " ino=%lu", inode->i_ino);
-+ }
- audit_log_format(ab, " res=%d", !result ? 0 : 1);
- audit_log_end(ab);
- }
-diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
-index e1aa2b4..52027bf 100644
---- a/security/integrity/ima/ima_fs.c
-+++ b/security/integrity/ima/ima_fs.c
-@@ -28,12 +28,12 @@
- static int valid_policy = 1;
- #define TMPBUFLEN 12
- static ssize_t ima_show_htable_value(char __user *buf, size_t count,
-- loff_t *ppos, atomic_long_t *val)
-+ loff_t *ppos, atomic_long_unchecked_t *val)
- {
- char tmpbuf[TMPBUFLEN];
- ssize_t len;
-
-- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
-+ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
- return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
- }
-
-diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
-index 55a6271..ad829c3 100644
---- a/security/integrity/ima/ima_queue.c
-+++ b/security/integrity/ima/ima_queue.c
-@@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
- INIT_LIST_HEAD(&qe->later);
- list_add_tail_rcu(&qe->later, &ima_measurements);
-
-- atomic_long_inc(&ima_htable.len);
-+ atomic_long_inc_unchecked(&ima_htable.len);
- key = ima_hash_key(entry->digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
- return 0;
-diff --git a/security/keys/compat.c b/security/keys/compat.c
-index 1b0b7bf..9476b92 100644
---- a/security/keys/compat.c
-+++ b/security/keys/compat.c
-@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
- if (ret == 0)
- goto no_payload_free;
-
-- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-+ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
- err:
- if (iov != iovstack)
- kfree(iov);
-diff --git a/security/keys/key.c b/security/keys/key.c
-index 4414abd..bb89c73 100644
---- a/security/keys/key.c
-+++ b/security/keys/key.c
-@@ -956,7 +956,7 @@ int register_key_type(struct key_type *ktype)
- }
-
- /* store the type */
-- list_add(&ktype->link, &key_types_list);
-+ pax_list_add((struct list_head *)&ktype->link, &key_types_list);
- ret = 0;
-
- out:
-@@ -976,7 +976,7 @@ EXPORT_SYMBOL(register_key_type);
- void unregister_key_type(struct key_type *ktype)
- {
- down_write(&key_types_sem);
-- list_del_init(&ktype->link);
-+ pax_list_del_init((struct list_head *)&ktype->link);
- downgrade_write(&key_types_sem);
- key_gc_keytype(ktype);
- up_read(&key_types_sem);
-@@ -993,9 +993,9 @@ void __init key_init(void)
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-
- /* add the special key types */
-- list_add_tail(&key_type_keyring.link, &key_types_list);
-- list_add_tail(&key_type_dead.link, &key_types_list);
-- list_add_tail(&key_type_user.link, &key_types_list);
-+ pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
-+ pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
-+ pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
-
- /* record the root user tracking */
- rb_link_node(&root_key_user.node,
-diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
-index b70eaa2..35b5b71 100644
---- a/security/keys/keyctl.c
-+++ b/security/keys/keyctl.c
-@@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
- /*
- * Copy the iovec data from userspace
- */
--static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
-+static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
- unsigned ioc)
- {
- for (; ioc > 0; ioc--) {
-@@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
- * If successful, 0 will be returned.
- */
- long keyctl_instantiate_key_common(key_serial_t id,
-- const struct iovec *payload_iov,
-+ const struct iovec __user *payload_iov,
- unsigned ioc,
- size_t plen,
- key_serial_t ringid)
-@@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
- [0].iov_len = plen
- };
-
-- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
-+ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
- }
-
- return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
-@@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
- if (ret == 0)
- goto no_payload_free;
-
-- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-+ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
- err:
- if (iov != iovstack)
- kfree(iov);
-diff --git a/security/keys/keyring.c b/security/keys/keyring.c
-index 37a7f3b..86dc19f 100644
---- a/security/keys/keyring.c
-+++ b/security/keys/keyring.c
-@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
- ret = -EFAULT;
-
- for (loop = 0; loop < klist->nkeys; loop++) {
-+ key_serial_t serial;
- key = klist->keys[loop];
-+ serial = key->serial;
-
- tmp = sizeof(key_serial_t);
- if (tmp > buflen)
- tmp = buflen;
-
-- if (copy_to_user(buffer,
-- &key->serial,
-- tmp) != 0)
-+ if (copy_to_user(buffer, &serial, tmp))
- goto error;
-
- buflen -= tmp;
-diff --git a/security/keys/request_key.c b/security/keys/request_key.c
-index 8246532..45c403f 100644
---- a/security/keys/request_key.c
-+++ b/security/keys/request_key.c
-@@ -423,6 +423,7 @@ link_check_failed:
-
- link_prealloc_failed:
- mutex_unlock(&user->cons_lock);
-+ key_put(key);
- kleave(" = %d [prelink]", ret);
- return ret;
-
-diff --git a/security/lsm_audit.c b/security/lsm_audit.c
-index 893af8a..ba9237c 100644
---- a/security/lsm_audit.c
-+++ b/security/lsm_audit.c
-@@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
- audit_log_d_path(ab, "path=", &a->u.path);
-
- inode = a->u.path.dentry->d_inode;
-- if (inode)
-- audit_log_format(ab, " dev=%s ino=%lu",
-- inode->i_sb->s_id,
-- inode->i_ino);
-+ if (inode) {
-+ audit_log_format(ab, " dev=");
-+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
-+ audit_log_format(ab, " ino=%lu", inode->i_ino);
-+ }
- break;
- }
- case LSM_AUDIT_DATA_DENTRY: {
-@@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
- audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
-
- inode = a->u.dentry->d_inode;
-- if (inode)
-- audit_log_format(ab, " dev=%s ino=%lu",
-- inode->i_sb->s_id,
-- inode->i_ino);
-+ if (inode) {
-+ audit_log_format(ab, " dev=");
-+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
-+ audit_log_format(ab, " ino=%lu", inode->i_ino);
-+ }
- break;
- }
- case LSM_AUDIT_DATA_INODE: {
-@@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
- dentry->d_name.name);
- dput(dentry);
- }
-- audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
-- inode->i_ino);
-+ audit_log_format(ab, " dev=");
-+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
-+ audit_log_format(ab, " ino=%lu", inode->i_ino);
- break;
- }
- case LSM_AUDIT_DATA_TASK:
-diff --git a/security/min_addr.c b/security/min_addr.c
-index f728728..6457a0c 100644
---- a/security/min_addr.c
-+++ b/security/min_addr.c
-@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
- */
- static void update_mmap_min_addr(void)
- {
-+#ifndef SPARC
- #ifdef CONFIG_LSM_MMAP_MIN_ADDR
- if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
- mmap_min_addr = dac_mmap_min_addr;
-@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
- #else
- mmap_min_addr = dac_mmap_min_addr;
- #endif
-+#endif
- }
-
- /*
-diff --git a/security/security.c b/security/security.c
-index e2f684a..1649b69 100644
---- a/security/security.c
-+++ b/security/security.c
-@@ -26,8 +26,8 @@
- static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
- CONFIG_DEFAULT_SECURITY;
-
--static struct security_operations *security_ops;
--static struct security_operations default_security_ops = {
-+struct security_operations *security_ops __read_only;
-+struct security_operations default_security_ops __read_only = {
- .name = "default",
- };
-
-@@ -66,11 +66,6 @@ int __init security_init(void)
- return 0;
- }
-
--void reset_security_ops(void)
--{
-- security_ops = &default_security_ops;
--}
--
- /* Save user chosen LSM */
- static int __init choose_lsm(char *str)
- {
-@@ -162,6 +157,13 @@ int security_capable(struct user_namespace *ns, const struct cred *cred,
- SECURITY_CAP_AUDIT);
- }
-
-+int security_capable_noaudit(struct user_namespace *ns, const struct cred *cred,
-+ int cap)
-+{
-+ return security_ops->capable(current, cred, ns, cap,
-+ SECURITY_CAP_NOAUDIT);
-+}
-+
- int security_real_capable(struct task_struct *tsk, struct user_namespace *ns,
- int cap)
- {
-diff --git a/security/selinux/avc.c b/security/selinux/avc.c
-index dca1c22..4fa4591 100644
---- a/security/selinux/avc.c
-+++ b/security/selinux/avc.c
-@@ -59,7 +59,7 @@ struct avc_node {
- struct avc_cache {
- struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
- spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
-- atomic_t lru_hint; /* LRU hint for reclaim scan */
-+ atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */
- atomic_t active_nodes;
- u32 latest_notif; /* latest revocation notification */
- };
-@@ -173,7 +173,7 @@ void __init avc_init(void)
- spin_lock_init(&avc_cache.slots_lock[i]);
- }
- atomic_set(&avc_cache.active_nodes, 0);
-- atomic_set(&avc_cache.lru_hint, 0);
-+ atomic_set_unchecked(&avc_cache.lru_hint, 0);
-
- avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
- 0, SLAB_PANIC, NULL);
-@@ -251,7 +251,7 @@ static inline int avc_reclaim_node(void)
- spinlock_t *lock;
-
- for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
-- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
-+ hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
- head = &avc_cache.slots[hvalue];
- lock = &avc_cache.slots_lock[hvalue];
-
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 0cd7097a..56b85a0 100644
---- a/security/selinux/hooks.c
-+++ b/security/selinux/hooks.c
-@@ -95,8 +95,6 @@
-
- #define NUM_SEL_MNT_OPTS 5
-
--extern struct security_operations *security_ops;
--
- /* SECMARK reference count */
- static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
-
-@@ -2035,6 +2033,13 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
- new_tsec->sid = old_tsec->exec_sid;
- /* Reset exec SID on execve. */
- new_tsec->exec_sid = 0;
-+
-+ /*
-+ * Minimize confusion: if no_new_privs and a transition is
-+ * explicitly requested, then fail the exec.
-+ */
-+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
-+ return -EPERM;
- } else {
- /* Check for a default transition on this program. */
- rc = security_transition_sid(old_tsec->sid, isec->sid,
-@@ -2047,7 +2052,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
- COMMON_AUDIT_DATA_INIT(&ad, PATH);
- ad.u.path = bprm->file->f_path;
-
-- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
-+ if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
-+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
- new_tsec->sid = old_tsec->sid;
-
- if (new_tsec->sid == old_tsec->sid) {
-@@ -3049,7 +3055,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
- int rc = 0;
-
- if (default_noexec &&
-- (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
-+ (prot & PROT_EXEC) && (!file || IS_PRIVATE(file->f_path.dentry->d_inode) ||
-+ (!shared && (prot & PROT_WRITE)))) {
- /*
- * We are making executable an anonymous mapping or a
- * private file mapping that will also be writable.
-@@ -5572,7 +5579,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
-
- #endif
-
--static struct security_operations selinux_ops = {
-+static struct security_operations selinux_ops __read_only = {
- .name = "selinux",
-
- .ptrace_access_check = selinux_ptrace_access_check,
-@@ -5918,6 +5925,9 @@ static void selinux_nf_ip_exit(void)
- #ifdef CONFIG_SECURITY_SELINUX_DISABLE
- static int selinux_disabled;
-
-+extern struct security_operations *security_ops;
-+extern struct security_operations default_security_ops;
-+
- int selinux_disable(void)
- {
- if (ss_initialized) {
-@@ -5935,7 +5945,9 @@ int selinux_disable(void)
- selinux_disabled = 1;
- selinux_enabled = 0;
-
-- reset_security_ops();
-+ pax_open_kernel();
-+ security_ops = &default_security_ops;
-+ pax_close_kernel();
-
- /* Try to destroy the avc node cache */
- avc_disable();
-diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
-index b43813c..74be837 100644
---- a/security/selinux/include/xfrm.h
-+++ b/security/selinux/include/xfrm.h
-@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
-
- static inline void selinux_xfrm_notify_policyload(void)
- {
-- atomic_inc(&flow_cache_genid);
-+ atomic_inc_unchecked(&flow_cache_genid);
- }
- #else
- static inline int selinux_xfrm_enabled(void)
-diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
-index 774c159..801c50d 100644
---- a/security/smack/smack_lsm.c
-+++ b/security/smack/smack_lsm.c
-@@ -3483,7 +3483,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
- return 0;
- }
-
--struct security_operations smack_ops = {
-+struct security_operations smack_ops __read_only = {
- .name = "smack",
-
- .ptrace_access_check = smack_ptrace_access_check,
-diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
-index 4003907..13a2b55 100644
---- a/security/tomoyo/file.c
-+++ b/security/tomoyo/file.c
-@@ -692,7 +692,7 @@ int tomoyo_path_number_perm(const u8 type, struct path *path,
- {
- struct tomoyo_request_info r;
- struct tomoyo_obj_info obj = {
-- .path1 = *path,
-+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
- };
- int error = -ENOMEM;
- struct tomoyo_path_info buf;
-@@ -740,7 +740,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
- struct tomoyo_path_info buf;
- struct tomoyo_request_info r;
- struct tomoyo_obj_info obj = {
-- .path1 = *path,
-+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
- };
- int idx;
-
-@@ -786,7 +786,7 @@ int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
- {
- struct tomoyo_request_info r;
- struct tomoyo_obj_info obj = {
-- .path1 = *path,
-+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
- };
- int error;
- struct tomoyo_path_info buf;
-@@ -843,7 +843,7 @@ int tomoyo_mkdev_perm(const u8 operation, struct path *path,
- {
- struct tomoyo_request_info r;
- struct tomoyo_obj_info obj = {
-- .path1 = *path,
-+ .path1 = { .mnt = path->mnt, .dentry = path->dentry },
- };
- int error = -ENOMEM;
- struct tomoyo_path_info buf;
-@@ -890,8 +890,8 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
- struct tomoyo_path_info buf2;
- struct tomoyo_request_info r;
- struct tomoyo_obj_info obj = {
-- .path1 = *path1,
-- .path2 = *path2,
-+ .path1 = { .mnt = path1->mnt, .dentry = path1->dentry },
-+ .path2 = { .mnt = path2->mnt, .dentry = path2->dentry }
- };
- int idx;
-
-diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
-index 4b327b6..c1f0860 100644
---- a/security/tomoyo/tomoyo.c
-+++ b/security/tomoyo/tomoyo.c
-@@ -146,7 +146,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
- */
- static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
- {
-- struct path path = { mnt, dentry };
-+ struct path path = { .mnt = mnt, .dentry = dentry };
- return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
- }
-
-@@ -172,7 +172,7 @@ static int tomoyo_path_truncate(struct path *path)
- */
- static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
- {
-- struct path path = { parent->mnt, dentry };
-+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
- return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
- }
-
-@@ -188,7 +188,7 @@ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
- static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
- int mode)
- {
-- struct path path = { parent->mnt, dentry };
-+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
- return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
- mode & S_IALLUGO);
- }
-@@ -203,7 +203,7 @@ static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
- */
- static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
- {
-- struct path path = { parent->mnt, dentry };
-+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
- return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
- }
-
-@@ -219,7 +219,7 @@ static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
- static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
- const char *old_name)
- {
-- struct path path = { parent->mnt, dentry };
-+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
- return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
- }
-
-@@ -236,7 +236,7 @@ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
- static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
- int mode, unsigned int dev)
- {
-- struct path path = { parent->mnt, dentry };
-+ struct path path = { .mnt = parent->mnt, .dentry = dentry };
- int type = TOMOYO_TYPE_CREATE;
- const unsigned int perm = mode & S_IALLUGO;
-
-@@ -275,8 +275,8 @@ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
- static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
- struct dentry *new_dentry)
- {
-- struct path path1 = { new_dir->mnt, old_dentry };
-- struct path path2 = { new_dir->mnt, new_dentry };
-+ struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
-+ struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
- return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
- }
-
-@@ -295,8 +295,8 @@ static int tomoyo_path_rename(struct path *old_parent,
- struct path *new_parent,
- struct dentry *new_dentry)
- {
-- struct path path1 = { old_parent->mnt, old_dentry };
-- struct path path2 = { new_parent->mnt, new_dentry };
-+ struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
-+ struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
- return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
- }
-
-@@ -362,7 +362,7 @@ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
- static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
- {
-- struct path path = { mnt, dentry };
-+ struct path path = { .mnt = mnt, .dentry = dentry };
- return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, &path,
- mode & S_IALLUGO);
- }
-@@ -425,7 +425,7 @@ static int tomoyo_sb_mount(char *dev_name, struct path *path,
- */
- static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
- {
-- struct path path = { mnt, mnt->mnt_root };
-+ struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
- return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
- }
-
-@@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
- * tomoyo_security_ops is a "struct security_operations" which is used for
- * registering TOMOYO.
- */
--static struct security_operations tomoyo_security_ops = {
-+static struct security_operations tomoyo_security_ops __read_only = {
- .name = "tomoyo",
- .cred_alloc_blank = tomoyo_cred_alloc_blank,
- .cred_prepare = tomoyo_cred_prepare,
-diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
-index 762af68..7103453 100644
---- a/sound/aoa/codecs/onyx.c
-+++ b/sound/aoa/codecs/onyx.c
-@@ -54,7 +54,7 @@ struct onyx {
- spdif_locked:1,
- analog_locked:1,
- original_mute:2;
-- int open_count;
-+ local_t open_count;
- struct codec_info *codec_info;
-
- /* mutex serializes concurrent access to the device
-@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
- struct onyx *onyx = cii->codec_data;
-
- mutex_lock(&onyx->mutex);
-- onyx->open_count++;
-+ local_inc(&onyx->open_count);
- mutex_unlock(&onyx->mutex);
-
- return 0;
-@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
- struct onyx *onyx = cii->codec_data;
-
- mutex_lock(&onyx->mutex);
-- onyx->open_count--;
-- if (!onyx->open_count)
-+ if (local_dec_and_test(&onyx->open_count))
- onyx->spdif_locked = onyx->analog_locked = 0;
- mutex_unlock(&onyx->mutex);
-
-diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
-index ffd2025..df062c9 100644
---- a/sound/aoa/codecs/onyx.h
-+++ b/sound/aoa/codecs/onyx.h
-@@ -11,6 +11,7 @@
- #include <linux/i2c.h>
- #include <asm/pmac_low_i2c.h>
- #include <asm/prom.h>
-+#include <asm/local.h>
-
- /* PCM3052 register definitions */
-
-diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
-index 542f69e..fe6e8c3 100644
---- a/sound/core/oss/pcm_oss.c
-+++ b/sound/core/oss/pcm_oss.c
-@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
-- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
-+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
- snd_leave_user(fs);
- } else {
-- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
-+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
- }
- if (ret != -EPIPE && ret != -ESTRPIPE)
- break;
-@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
-- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
-+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
- snd_leave_user(fs);
- } else {
-- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
-+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
- }
- if (ret == -EPIPE) {
- if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
-@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
- struct snd_pcm_plugin_channel *channels;
- size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
- if (!in_kernel) {
-- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
-+ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
- return -EFAULT;
- buf = runtime->oss.buffer;
- }
-@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
- }
- } else {
- tmp = snd_pcm_oss_write2(substream,
-- (const char __force *)buf,
-+ (const char __force_kernel *)buf,
- runtime->oss.period_bytes, 0);
- if (tmp <= 0)
- goto err;
-@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_sframes_t frames, frames1;
- #ifdef CONFIG_SND_PCM_OSS_PLUGINS
-- char __user *final_dst = (char __force __user *)buf;
-+ char __user *final_dst = (char __force_user *)buf;
- if (runtime->oss.plugin_first) {
- struct snd_pcm_plugin_channel *channels;
- size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
-@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
- xfer += tmp;
- runtime->oss.buffer_used -= tmp;
- } else {
-- tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
-+ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
- runtime->oss.period_bytes, 0);
- if (tmp <= 0)
- goto err;
-@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
- size1);
- size1 /= runtime->channels; /* frames */
- fs = snd_enter_user();
-- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
-+ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
- snd_leave_user(fs);
- }
- } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
-diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
-index 4dbb66e..eda2998 100644
---- a/sound/core/pcm_compat.c
-+++ b/sound/core/pcm_compat.c
-@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
- int err;
-
- fs = snd_enter_user();
-- err = snd_pcm_delay(substream, &delay);
-+ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
- snd_leave_user(fs);
- if (err < 0)
- return err;
-diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
-index 8a00555..48f07fe 100644
---- a/sound/core/pcm_native.c
-+++ b/sound/core/pcm_native.c
-@@ -2790,11 +2790,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
- switch (substream->stream) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
-- (void __user *)arg);
-+ (void __force_user *)arg);
- break;
- case SNDRV_PCM_STREAM_CAPTURE:
- result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
-- (void __user *)arg);
-+ (void __force_user *)arg);
- break;
- default:
- result = -EINVAL;
-diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
-index 8d4d5e8..fdd0826 100644
---- a/sound/core/seq/oss/seq_oss.c
-+++ b/sound/core/seq/oss/seq_oss.c
-@@ -75,8 +75,8 @@ static int __init alsa_seq_oss_init(void)
- {
- int rc;
- static struct snd_seq_dev_ops ops = {
-- snd_seq_oss_synth_register,
-- snd_seq_oss_synth_unregister,
-+ .init_device = snd_seq_oss_synth_register,
-+ .free_device = snd_seq_oss_synth_unregister,
- };
-
- snd_seq_autoload_lock();
-diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
-index 5cf8d65..912a79c 100644
---- a/sound/core/seq/seq_device.c
-+++ b/sound/core/seq/seq_device.c
-@@ -64,7 +64,7 @@ struct ops_list {
- int argsize; /* argument size */
-
- /* operators */
-- struct snd_seq_dev_ops ops;
-+ struct snd_seq_dev_ops *ops;
-
- /* registred devices */
- struct list_head dev_list; /* list of devices */
-@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
-
- mutex_lock(&ops->reg_mutex);
- /* copy driver operators */
-- ops->ops = *entry;
-+ ops->ops = entry;
- ops->driver |= DRIVER_LOADED;
- ops->argsize = argsize;
-
-@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
- dev->name, ops->id, ops->argsize, dev->argsize);
- return -EINVAL;
- }
-- if (ops->ops.init_device(dev) >= 0) {
-+ if (ops->ops->init_device(dev) >= 0) {
- dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
- ops->num_init_devices++;
- } else {
-@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
- dev->name, ops->id, ops->argsize, dev->argsize);
- return -EINVAL;
- }
-- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
-+ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
- dev->status = SNDRV_SEQ_DEVICE_FREE;
- dev->driver_data = NULL;
- ops->num_init_devices--;
-diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
-index 64069db..3c6d392 100644
---- a/sound/core/seq/seq_midi.c
-+++ b/sound/core/seq/seq_midi.c
-@@ -462,8 +462,8 @@ snd_seq_midisynth_unregister_port(struct snd_seq_device *dev)
- static int __init alsa_seq_midi_init(void)
- {
- static struct snd_seq_dev_ops ops = {
-- snd_seq_midisynth_register_port,
-- snd_seq_midisynth_unregister_port,
-+ .init_device = snd_seq_midisynth_register_port,
-+ .free_device = snd_seq_midisynth_unregister_port,
- };
- memset(&synths, 0, sizeof(synths));
- snd_seq_autoload_lock();
-diff --git a/sound/core/sound.c b/sound/core/sound.c
-index 8e17b4d..6819e80 100644
---- a/sound/core/sound.c
-+++ b/sound/core/sound.c
-@@ -87,7 +87,7 @@ static void snd_request_other(int minor)
- case SNDRV_MINOR_TIMER: str = "snd-timer"; break;
- default: return;
- }
-- request_module(str);
-+ request_module("%s", str);
- }
-
- #endif /* modular kernel */
-diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
-index f24bf9a..1f7b67c 100644
---- a/sound/drivers/mts64.c
-+++ b/sound/drivers/mts64.c
-@@ -29,6 +29,7 @@
- #include <sound/initval.h>
- #include <sound/rawmidi.h>
- #include <sound/control.h>
-+#include <asm/local.h>
-
- #define CARD_NAME "Miditerminal 4140"
- #define DRIVER_NAME "MTS64"
-@@ -67,7 +68,7 @@ struct mts64 {
- struct pardevice *pardev;
- int pardev_claimed;
-
-- int open_count;
-+ local_t open_count;
- int current_midi_output_port;
- int current_midi_input_port;
- u8 mode[MTS64_NUM_INPUT_PORTS];
-@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
- {
- struct mts64 *mts = substream->rmidi->private_data;
-
-- if (mts->open_count == 0) {
-+ if (local_read(&mts->open_count) == 0) {
- /* We don't need a spinlock here, because this is just called
- if the device has not been opened before.
- So there aren't any IRQs from the device */
-@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
-
- msleep(50);
- }
-- ++(mts->open_count);
-+ local_inc(&mts->open_count);
-
- return 0;
- }
-@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
- struct mts64 *mts = substream->rmidi->private_data;
- unsigned long flags;
-
-- --(mts->open_count);
-- if (mts->open_count == 0) {
-+ if (local_dec_return(&mts->open_count) == 0) {
- /* We need the spinlock_irqsave here because we can still
- have IRQs at this point */
- spin_lock_irqsave(&mts->lock, flags);
-@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
-
- msleep(500);
-
-- } else if (mts->open_count < 0)
-- mts->open_count = 0;
-+ } else if (local_read(&mts->open_count) < 0)
-+ local_set(&mts->open_count, 0);
-
- return 0;
- }
-diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
-index 723562e..c3ff2f5 100644
---- a/sound/drivers/opl3/opl3_seq.c
-+++ b/sound/drivers/opl3/opl3_seq.c
-@@ -281,8 +281,8 @@ static int __init alsa_opl3_seq_init(void)
- {
- static struct snd_seq_dev_ops ops =
- {
-- snd_opl3_seq_new_device,
-- snd_opl3_seq_delete_device
-+ .init_device = snd_opl3_seq_new_device,
-+ .free_device = snd_opl3_seq_delete_device
- };
-
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops,
-diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
-index b953fb4..1999c01 100644
---- a/sound/drivers/opl4/opl4_lib.c
-+++ b/sound/drivers/opl4/opl4_lib.c
-@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
- MODULE_DESCRIPTION("OPL4 driver");
- MODULE_LICENSE("GPL");
-
--static void inline snd_opl4_wait(struct snd_opl4 *opl4)
-+static inline void snd_opl4_wait(struct snd_opl4 *opl4)
- {
- int timeout = 10;
- while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
-diff --git a/sound/drivers/opl4/opl4_seq.c b/sound/drivers/opl4/opl4_seq.c
-index 9919769..d7de36c 100644
---- a/sound/drivers/opl4/opl4_seq.c
-+++ b/sound/drivers/opl4/opl4_seq.c
-@@ -198,8 +198,8 @@ static int snd_opl4_seq_delete_device(struct snd_seq_device *dev)
- static int __init alsa_opl4_synth_init(void)
- {
- static struct snd_seq_dev_ops ops = {
-- snd_opl4_seq_new_device,
-- snd_opl4_seq_delete_device
-+ .init_device = snd_opl4_seq_new_device,
-+ .free_device = snd_opl4_seq_delete_device
- };
-
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops,
-diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
-index f664823..590c745 100644
---- a/sound/drivers/portman2x4.c
-+++ b/sound/drivers/portman2x4.c
-@@ -48,6 +48,7 @@
- #include <sound/initval.h>
- #include <sound/rawmidi.h>
- #include <sound/control.h>
-+#include <asm/local.h>
-
- #define CARD_NAME "Portman 2x4"
- #define DRIVER_NAME "portman"
-@@ -85,7 +86,7 @@ struct portman {
- struct pardevice *pardev;
- int pardev_claimed;
-
-- int open_count;
-+ local_t open_count;
- int mode[PORTMAN_NUM_INPUT_PORTS];
- struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
- };
-diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
-index 87657dd..a8268d4 100644
---- a/sound/firewire/amdtp.c
-+++ b/sound/firewire/amdtp.c
-@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
- ptr = s->pcm_buffer_pointer + data_blocks;
- if (ptr >= pcm->runtime->buffer_size)
- ptr -= pcm->runtime->buffer_size;
-- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
-+ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
-
- s->pcm_period_pointer += data_blocks;
- if (s->pcm_period_pointer >= pcm->runtime->period_size) {
-@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
- */
- void amdtp_out_stream_update(struct amdtp_out_stream *s)
- {
-- ACCESS_ONCE(s->source_node_id_field) =
-+ ACCESS_ONCE_RW(s->source_node_id_field) =
- (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
- }
- EXPORT_SYMBOL(amdtp_out_stream_update);
-diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
-index 537a9cb..8e8c8e9 100644
---- a/sound/firewire/amdtp.h
-+++ b/sound/firewire/amdtp.h
-@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
- static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
- struct snd_pcm_substream *pcm)
- {
-- ACCESS_ONCE(s->pcm) = pcm;
-+ ACCESS_ONCE_RW(s->pcm) = pcm;
- }
-
- /**
-diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
-index cd094ec..eca1277 100644
---- a/sound/firewire/isight.c
-+++ b/sound/firewire/isight.c
-@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
- ptr += count;
- if (ptr >= runtime->buffer_size)
- ptr -= runtime->buffer_size;
-- ACCESS_ONCE(isight->buffer_pointer) = ptr;
-+ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
-
- isight->period_counter += count;
- if (isight->period_counter >= runtime->period_size) {
-@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
- if (err < 0)
- return err;
-
-- ACCESS_ONCE(isight->pcm_active) = true;
-+ ACCESS_ONCE_RW(isight->pcm_active) = true;
-
- return 0;
- }
-@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
- {
- struct isight *isight = substream->private_data;
-
-- ACCESS_ONCE(isight->pcm_active) = false;
-+ ACCESS_ONCE_RW(isight->pcm_active) = false;
-
- mutex_lock(&isight->mutex);
- isight_stop_streaming(isight);
-@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
-- ACCESS_ONCE(isight->pcm_running) = true;
-+ ACCESS_ONCE_RW(isight->pcm_running) = true;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
-- ACCESS_ONCE(isight->pcm_running) = false;
-+ ACCESS_ONCE_RW(isight->pcm_running) = false;
- break;
- default:
- return -EINVAL;
-diff --git a/sound/isa/sb/emu8000_synth.c b/sound/isa/sb/emu8000_synth.c
-index 4e3fcfb..ab45a9d 100644
---- a/sound/isa/sb/emu8000_synth.c
-+++ b/sound/isa/sb/emu8000_synth.c
-@@ -120,8 +120,8 @@ static int __init alsa_emu8000_init(void)
- {
-
- static struct snd_seq_dev_ops ops = {
-- snd_emu8000_new_device,
-- snd_emu8000_delete_device,
-+ .init_device = snd_emu8000_new_device,
-+ .free_device = snd_emu8000_delete_device,
- };
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops,
- sizeof(struct snd_emu8000*));
-diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
-index 733b014..56ce96f 100644
---- a/sound/oss/sb_audio.c
-+++ b/sound/oss/sb_audio.c
-@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
- buf16 = (signed short *)(localbuf + localoffs);
- while (c)
- {
-- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
-+ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
- if (copy_from_user(lbuf8,
- userbuf+useroffs + p,
- locallen))
-diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
-index 09d4648..cf234c7 100644
---- a/sound/oss/swarm_cs4297a.c
-+++ b/sound/oss/swarm_cs4297a.c
-@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
- {
- struct cs4297a_state *s;
- u32 pwr, id;
-- mm_segment_t fs;
- int rval;
- #ifndef CONFIG_BCM_CS4297A_CSWARM
- u64 cfg;
-@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
- if (!rval) {
- char *sb1250_duart_present;
-
-+#if 0
-+ mm_segment_t fs;
- fs = get_fs();
- set_fs(KERNEL_DS);
--#if 0
- val = SOUND_MASK_LINE;
- mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
- for (i = 0; i < ARRAY_SIZE(initvol); i++) {
- val = initvol[i].vol;
- mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
- }
-+ set_fs(fs);
- // cs4297a_write_ac97(s, 0x18, 0x0808);
- #else
- // cs4297a_write_ac97(s, 0x5e, 0x180);
- cs4297a_write_ac97(s, 0x02, 0x0808);
- cs4297a_write_ac97(s, 0x18, 0x0808);
- #endif
-- set_fs(fs);
-
- list_add(&s->list, &cs4297a_devs);
-
-diff --git a/sound/pci/emu10k1/emu10k1_synth.c b/sound/pci/emu10k1/emu10k1_synth.c
-index 4c41c90..37f3631 100644
---- a/sound/pci/emu10k1/emu10k1_synth.c
-+++ b/sound/pci/emu10k1/emu10k1_synth.c
-@@ -108,8 +108,8 @@ static int __init alsa_emu10k1_synth_init(void)
- {
-
- static struct snd_seq_dev_ops ops = {
-- snd_emu10k1_synth_new_device,
-- snd_emu10k1_synth_delete_device,
-+ .init_device = snd_emu10k1_synth_new_device,
-+ .free_device = snd_emu10k1_synth_delete_device,
- };
- return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops,
- sizeof(struct snd_emu10k1_synth_arg));
-diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
-index ee95618..7752241 100644
---- a/sound/pci/hda/hda_codec.c
-+++ b/sound/pci/hda/hda_codec.c
-@@ -852,14 +852,10 @@ find_codec_preset(struct hda_codec *codec)
- mutex_unlock(&preset_mutex);
-
- if (mod_requested < HDA_MODREQ_MAX_COUNT) {
-- char name[32];
- if (!mod_requested)
-- snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
-- codec->vendor_id);
-+ request_module("snd-hda-codec-id:%08x", codec->vendor_id);
- else
-- snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
-- (codec->vendor_id >> 16) & 0xffff);
-- request_module(name);
-+ request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
- mod_requested++;
- goto again;
- }
-diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
-index 03ee4e3..be86b46 100644
---- a/sound/pci/ymfpci/ymfpci_main.c
-+++ b/sound/pci/ymfpci/ymfpci_main.c
-@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
- if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
- break;
- }
-- if (atomic_read(&chip->interrupt_sleep_count)) {
-- atomic_set(&chip->interrupt_sleep_count, 0);
-+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
-+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
- wake_up(&chip->interrupt_sleep);
- }
- __end:
-@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
- continue;
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&chip->interrupt_sleep, &wait);
-- atomic_inc(&chip->interrupt_sleep_count);
-+ atomic_inc_unchecked(&chip->interrupt_sleep_count);
- schedule_timeout_uninterruptible(msecs_to_jiffies(50));
- remove_wait_queue(&chip->interrupt_sleep, &wait);
- }
-@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
- snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
- spin_unlock(&chip->reg_lock);
-
-- if (atomic_read(&chip->interrupt_sleep_count)) {
-- atomic_set(&chip->interrupt_sleep_count, 0);
-+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
-+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
- wake_up(&chip->interrupt_sleep);
- }
- }
-@@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
- spin_lock_init(&chip->reg_lock);
- spin_lock_init(&chip->voice_lock);
- init_waitqueue_head(&chip->interrupt_sleep);
-- atomic_set(&chip->interrupt_sleep_count, 0);
-+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
- chip->card = card;
- chip->pci = pci;
- chip->irq = -1;
-diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
-index 83c4bd5..f75658c 100644
---- a/sound/soc/fsl/fsl_ssi.c
-+++ b/sound/soc/fsl/fsl_ssi.c
-@@ -608,7 +608,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
- {
- struct fsl_ssi_private *ssi_private;
- int ret = 0;
-- struct device_attribute *dev_attr = NULL;
-+ device_attribute_no_const *dev_attr = NULL;
- struct device_node *np = pdev->dev.of_node;
- const char *p, *sprop;
- const uint32_t *iprop;
-diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
-index ee15337..ab0ec34 100644
---- a/sound/soc/soc-pcm.c
-+++ b/sound/soc/soc-pcm.c
-@@ -627,13 +627,15 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
- rtd->pcm = pcm;
- pcm->private_data = rtd;
- if (platform->driver->ops) {
-- soc_pcm_ops.mmap = platform->driver->ops->mmap;
-- soc_pcm_ops.pointer = platform->driver->ops->pointer;
-- soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
-- soc_pcm_ops.copy = platform->driver->ops->copy;
-- soc_pcm_ops.silence = platform->driver->ops->silence;
-- soc_pcm_ops.ack = platform->driver->ops->ack;
-- soc_pcm_ops.page = platform->driver->ops->page;
-+ pax_open_kernel();
-+ *(void **)&soc_pcm_ops.mmap = platform->driver->ops->mmap;
-+ *(void **)&soc_pcm_ops.pointer = platform->driver->ops->pointer;
-+ *(void **)&soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
-+ *(void **)&soc_pcm_ops.copy = platform->driver->ops->copy;
-+ *(void **)&soc_pcm_ops.silence = platform->driver->ops->silence;
-+ *(void **)&soc_pcm_ops.ack = platform->driver->ops->ack;
-+ *(void **)&soc_pcm_ops.page = platform->driver->ops->page;
-+ pax_close_kernel();
- }
-
- if (playback)
-diff --git a/sound/sound_core.c b/sound/sound_core.c
-index 6ce2778..f25c378 100644
---- a/sound/sound_core.c
-+++ b/sound/sound_core.c
-@@ -293,7 +293,7 @@ retry:
- }
-
- device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
-- NULL, s->name+6);
-+ NULL, "%s", s->name+6);
- return s->unit_minor;
-
- fail:
-diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
-index 7778b8e..3d619fc 100644
---- a/sound/synth/emux/emux_seq.c
-+++ b/sound/synth/emux/emux_seq.c
-@@ -33,13 +33,13 @@ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *inf
- * MIDI emulation operators
- */
- static struct snd_midi_op emux_ops = {
-- snd_emux_note_on,
-- snd_emux_note_off,
-- snd_emux_key_press,
-- snd_emux_terminate_note,
-- snd_emux_control,
-- snd_emux_nrpn,
-- snd_emux_sysex,
-+ .note_on = snd_emux_note_on,
-+ .note_off = snd_emux_note_off,
-+ .key_press = snd_emux_key_press,
-+ .note_terminate = snd_emux_terminate_note,
-+ .control = snd_emux_control,
-+ .nrpn = snd_emux_nrpn,
-+ .sysex = snd_emux_sysex,
- };
-
-
-diff --git a/sound/usb/card.h b/sound/usb/card.h
-index 0a7ca6c..f4b948c 100644
---- a/sound/usb/card.h
-+++ b/sound/usb/card.h
-@@ -45,6 +45,7 @@ struct snd_urb_ops {
- int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
- int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
- };
-+typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
-
- struct snd_usb_substream {
- struct snd_usb_stream *stream;
-@@ -96,7 +97,7 @@ struct snd_usb_substream {
- struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
- spinlock_t lock;
-
-- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
-+ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
- int last_frame_number; /* stored frame number */
- int last_delay; /* stored delay */
- };
-diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
-new file mode 100644
-index 0000000..60e7af2
---- /dev/null
-+++ b/tools/gcc/.gitignore
-@@ -0,0 +1,2 @@
-+randomize_layout_seed.h
-+randomize_layout_hash.h
-diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
-new file mode 100644
-index 0000000..a51677e
---- /dev/null
-+++ b/tools/gcc/Makefile
-@@ -0,0 +1,52 @@
-+#CC := gcc
-+#PLUGIN_SOURCE_FILES := pax_plugin.c
-+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
-+GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
-+#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
-+
-+ifeq ($(PLUGINCC),$(HOSTCC))
-+HOSTLIBS := hostlibs
-+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb
-+export HOST_EXTRACFLAGS
-+else
-+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti -fno-exceptions -fasynchronous-unwind-tables -ggdb -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable
-+export HOST_EXTRACXXFLAGS
-+endif
-+
-+export GCCPLUGINS_DIR HOSTLIBS
-+
-+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
-+$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
-+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
-+$(HOSTLIBS)-y += colorize_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
-+$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so
-+
-+subdir-$(CONFIG_PAX_SIZE_OVERFLOW) := size_overflow_plugin
-+subdir- += size_overflow_plugin
-+
-+always := $($(HOSTLIBS)-y)
-+
-+constify_plugin-objs := constify_plugin.o
-+stackleak_plugin-objs := stackleak_plugin.o
-+kallocstat_plugin-objs := kallocstat_plugin.o
-+kernexec_plugin-objs := kernexec_plugin.o
-+checker_plugin-objs := checker_plugin.o
-+colorize_plugin-objs := colorize_plugin.o
-+latent_entropy_plugin-objs := latent_entropy_plugin.o
-+structleak_plugin-objs := structleak_plugin.o
-+randomize_layout_plugin-objs := randomize_layout_plugin.o
-+
-+$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
-+
-+quiet_cmd_create_randomize_layout_seed = GENSEED $@
-+ cmd_create_randomize_layout_seed = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
-+$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
-+ $(call if_changed,create_randomize_layout_seed)
-+
-+targets += randomize_layout_seed.h randomize_layout_hash.h
-diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
-new file mode 100644
-index 0000000..5452feea
---- /dev/null
-+++ b/tools/gcc/checker_plugin.c
-@@ -0,0 +1,150 @@
-+/*
-+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to implement various sparse (source code checker) features
-+ *
-+ * TODO:
-+ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
-+ *
-+ * BUGS:
-+ * - none known
-+ */
-+
-+#include "gcc-common.h"
-+
-+extern void c_register_addr_space (const char *str, addr_space_t as);
-+extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
-+extern enum machine_mode default_addr_space_address_mode (addr_space_t);
-+extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
-+extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
-+extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info checker_plugin_info = {
-+ .version = "201304082245",
-+ .help = NULL,
-+};
-+
-+#define ADDR_SPACE_KERNEL 0
-+#define ADDR_SPACE_FORCE_KERNEL 1
-+#define ADDR_SPACE_USER 2
-+#define ADDR_SPACE_FORCE_USER 3
-+#define ADDR_SPACE_IOMEM 0
-+#define ADDR_SPACE_FORCE_IOMEM 0
-+#define ADDR_SPACE_PERCPU 0
-+#define ADDR_SPACE_FORCE_PERCPU 0
-+#define ADDR_SPACE_RCU 0
-+#define ADDR_SPACE_FORCE_RCU 0
-+
-+static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
-+{
-+ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
-+}
-+
-+static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
-+{
-+ return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
-+}
-+
-+static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
-+{
-+ return default_addr_space_valid_pointer_mode(mode, as);
-+}
-+
-+static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
-+{
-+ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
-+}
-+
-+static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
-+{
-+ return default_addr_space_legitimize_address(x, oldx, mode, as);
-+}
-+
-+static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
-+{
-+ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
-+ return true;
-+
-+ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
-+ return true;
-+
-+ return subset == superset;
-+}
-+
-+static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
-+{
-+// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
-+// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
-+
-+ return op;
-+}
-+
-+static void register_checker_address_spaces(void *event_data, void *data)
-+{
-+ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
-+ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
-+ c_register_addr_space("__user", ADDR_SPACE_USER);
-+ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
-+// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
-+// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
-+// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
-+// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
-+// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
-+// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
-+
-+ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
-+ targetm.addr_space.address_mode = checker_addr_space_address_mode;
-+ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
-+ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
-+// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
-+ targetm.addr_space.subset_p = checker_addr_space_subset_p;
-+ targetm.addr_space.convert = checker_addr_space_convert;
-+}
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ int i;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
-+
-+ for (i = 0; i < argc; ++i)
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+
-+ if (TARGET_64BIT == 0)
-+ return 0;
-+
-+ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
-new file mode 100644
-index 0000000..0c96d8a
---- /dev/null
-+++ b/tools/gcc/colorize_plugin.c
-@@ -0,0 +1,215 @@
-+/*
-+ * Copyright 2012-2015 by PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to colorize diagnostic output
-+ *
-+ */
-+
-+#include "gcc-common.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info colorize_plugin_info = {
-+ .version = "201404202350",
-+ .help = "color=[never|always|auto]\tdetermine when to colorize\n",
-+};
-+
-+#define GREEN "\033[32m\033[K"
-+#define LIGHTGREEN "\033[1;32m\033[K"
-+#define YELLOW "\033[33m\033[K"
-+#define LIGHTYELLOW "\033[1;33m\033[K"
-+#define RED "\033[31m\033[K"
-+#define LIGHTRED "\033[1;31m\033[K"
-+#define BLUE "\033[34m\033[K"
-+#define LIGHTBLUE "\033[1;34m\033[K"
-+#define BRIGHT "\033[1;m\033[K"
-+#define NORMAL "\033[m\033[K"
-+
-+static diagnostic_starter_fn old_starter;
-+static diagnostic_finalizer_fn old_finalizer;
-+
-+static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
-+{
-+ const char *color;
-+ char *newprefix;
-+
-+ switch (diagnostic->kind) {
-+ case DK_NOTE:
-+ color = LIGHTBLUE;
-+ break;
-+
-+ case DK_PEDWARN:
-+ case DK_WARNING:
-+ color = LIGHTYELLOW;
-+ break;
-+
-+ case DK_ERROR:
-+ case DK_FATAL:
-+ case DK_ICE:
-+ case DK_PERMERROR:
-+ case DK_SORRY:
-+ color = LIGHTRED;
-+ break;
-+
-+ default:
-+ color = NORMAL;
-+ }
-+
-+ old_starter(context, diagnostic);
-+ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
-+ return;
-+ pp_destroy_prefix(context->printer);
-+ pp_set_prefix(context->printer, newprefix);
-+}
-+
-+static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
-+{
-+ old_finalizer(context, diagnostic);
-+}
-+
-+static void colorize_arm(void)
-+{
-+ old_starter = diagnostic_starter(global_dc);
-+ old_finalizer = diagnostic_finalizer(global_dc);
-+
-+ diagnostic_starter(global_dc) = start_colorize;
-+ diagnostic_finalizer(global_dc) = finalize_colorize;
-+}
-+
-+static unsigned int execute_colorize_rearm(void)
-+{
-+ if (diagnostic_starter(global_dc) == start_colorize)
-+ return 0;
-+
-+ colorize_arm();
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data colorize_rearm_pass_data = {
-+#else
-+struct simple_ipa_opt_pass colorize_rearm_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "colorize_rearm",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = execute_colorize_rearm,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class colorize_rearm_pass : public simple_ipa_opt_pass {
-+public:
-+ colorize_rearm_pass() : simple_ipa_opt_pass(colorize_rearm_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return execute_colorize_rearm(); }
-+#else
-+ unsigned int execute() { return execute_colorize_rearm(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_colorize_rearm_pass(void)
-+{
-+ return new colorize_rearm_pass();
-+}
-+#else
-+static struct opt_pass *make_colorize_rearm_pass(void)
-+{
-+ return &colorize_rearm_pass.pass;
-+}
-+#endif
-+
-+static void colorize_start_unit(void *gcc_data, void *user_data)
-+{
-+ colorize_arm();
-+}
-+
-+static bool should_colorize(void)
-+{
-+#if BUILDING_GCC_VERSION >= 4009
-+ return false;
-+#else
-+ char const *t = getenv("TERM");
-+
-+ return t && strcmp(t, "dumb") && isatty(STDERR_FILENO);
-+#endif
-+}
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ int i;
-+ struct register_pass_info colorize_rearm_pass_info;
-+ bool colorize;
-+
-+ colorize_rearm_pass_info.pass = make_colorize_rearm_pass();
-+ colorize_rearm_pass_info.reference_pass_name = "*free_lang_data";
-+ colorize_rearm_pass_info.ref_pass_instance_number = 1;
-+ colorize_rearm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
-+
-+ colorize = getenv("GCC_COLORS") ? should_colorize() : false;
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "color")) {
-+ if (!argv[i].value) {
-+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ continue;
-+ }
-+ if (!strcmp(argv[i].value, "always"))
-+ colorize = true;
-+ else if (!strcmp(argv[i].value, "never"))
-+ colorize = false;
-+ else if (!strcmp(argv[i].value, "auto"))
-+ colorize = should_colorize();
-+ else
-+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ if (colorize) {
-+ // TODO: parse GCC_COLORS as used by gcc 4.9+
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
-+ }
-+ return 0;
-+}
-diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
-new file mode 100644
-index 0000000..c5de280
---- /dev/null
-+++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,568 @@
-+/*
-+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
-+ * Copyright 2011-2015 by PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
-+ *
-+ * Homepage:
-+ * http://www.grsecurity.net/~ephox/const_plugin/
-+ *
-+ * Usage:
-+ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
-+ * $ gcc -fplugin=constify_plugin.so test.c -O2
-+ */
-+
-+#include "gcc-common.h"
-+
-+// unused C type flag in all versions 4.5-5.0
-+#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info const_plugin_info = {
-+ .version = "201401270210",
-+ .help = "no-constify\tturn off constification\n",
-+};
-+
-+typedef struct {
-+ bool has_fptr_field;
-+ bool has_writable_field;
-+ bool has_do_const_field;
-+ bool has_no_const_field;
-+} constify_info;
-+
-+static const_tree get_field_type(const_tree field)
-+{
-+ return strip_array_types(TREE_TYPE(field));
-+}
-+
-+static bool is_fptr(const_tree field)
-+{
-+ const_tree ptr = get_field_type(field);
-+
-+ if (TREE_CODE(ptr) != POINTER_TYPE)
-+ return false;
-+
-+ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
-+}
-+
-+/*
-+ * determine whether the given structure type meets the requirements for automatic constification,
-+ * including the constification attributes on nested structure types
-+ */
-+static void constifiable(const_tree node, constify_info *cinfo)
-+{
-+ const_tree field;
-+
-+ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
-+
-+ // e.g., pointer to structure fields while still constructing the structure type
-+ if (TYPE_FIELDS(node) == NULL_TREE)
-+ return;
-+
-+ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
-+ const_tree type = get_field_type(field);
-+ enum tree_code code = TREE_CODE(type);
-+
-+ if (node == type)
-+ continue;
-+
-+ if (is_fptr(field))
-+ cinfo->has_fptr_field = true;
-+ else if (!TREE_READONLY(field))
-+ cinfo->has_writable_field = true;
-+
-+ if (code == RECORD_TYPE || code == UNION_TYPE) {
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
-+ cinfo->has_do_const_field = true;
-+ else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
-+ cinfo->has_no_const_field = true;
-+ else
-+ constifiable(type, cinfo);
-+ }
-+ }
-+}
-+
-+static bool constified(const_tree node)
-+{
-+ constify_info cinfo = {
-+ .has_fptr_field = false,
-+ .has_writable_field = false,
-+ .has_do_const_field = false,
-+ .has_no_const_field = false
-+ };
-+
-+ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
-+
-+ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
-+// gcc_assert(!TYPE_READONLY(node));
-+ return false;
-+ }
-+
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
-+ gcc_assert(TYPE_READONLY(node));
-+ return true;
-+ }
-+
-+ constifiable(node, &cinfo);
-+ if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
-+ return false;
-+
-+ return TYPE_READONLY(node);
-+}
-+
-+static void deconstify_tree(tree node);
-+
-+static void deconstify_type(tree type)
-+{
-+ tree field;
-+
-+ gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
-+
-+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
-+ const_tree fieldtype = get_field_type(field);
-+
-+ // special case handling of simple ptr-to-same-array-type members
-+ if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
-+ tree ptrtype = TREE_TYPE(TREE_TYPE(field));
-+
-+ if (TREE_TYPE(TREE_TYPE(field)) == type)
-+ continue;
-+ if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
-+ continue;
-+ if (!constified(ptrtype))
-+ continue;
-+ if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
-+ TREE_TYPE(field) = copy_node(TREE_TYPE(field));
-+ TREE_TYPE(TREE_TYPE(field)) = build_qualified_type(type, TYPE_QUALS(ptrtype) & ~TYPE_QUAL_CONST);
-+ }
-+ continue;
-+ }
-+ if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
-+ continue;
-+ if (!constified(fieldtype))
-+ continue;
-+
-+ deconstify_tree(field);
-+ TREE_READONLY(field) = 0;
-+ }
-+ TYPE_READONLY(type) = 0;
-+ C_TYPE_FIELDS_READONLY(type) = 0;
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
-+ TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
-+ TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
-+ }
-+}
-+
-+static void deconstify_tree(tree node)
-+{
-+ tree old_type, new_type, field;
-+
-+ old_type = TREE_TYPE(node);
-+ while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
-+ node = TREE_TYPE(node) = copy_node(old_type);
-+ old_type = TREE_TYPE(old_type);
-+ }
-+
-+ gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
-+ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
-+
-+ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
-+ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
-+ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
-+ DECL_FIELD_CONTEXT(field) = new_type;
-+
-+ deconstify_type(new_type);
-+
-+ TREE_TYPE(node) = new_type;
-+}
-+
-+static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ tree type;
-+ constify_info cinfo = {
-+ .has_fptr_field = false,
-+ .has_writable_field = false,
-+ .has_do_const_field = false,
-+ .has_no_const_field = false
-+ };
-+
-+ *no_add_attrs = true;
-+ if (TREE_CODE(*node) == FUNCTION_DECL) {
-+ error("%qE attribute does not apply to functions (%qF)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TREE_CODE(*node) == PARM_DECL) {
-+ error("%qE attribute does not apply to function parameters (%qD)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TREE_CODE(*node) == VAR_DECL) {
-+ error("%qE attribute does not apply to variables (%qD)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TYPE_P(*node)) {
-+ type = *node;
-+ } else {
-+ gcc_assert(TREE_CODE(*node) == TYPE_DECL);
-+ type = TREE_TYPE(*node);
-+ }
-+
-+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
-+ error("%qE attribute used on %qT applies to struct and union types only", name, type);
-+ return NULL_TREE;
-+ }
-+
-+ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
-+ error("%qE attribute is already applied to the type %qT", name, type);
-+ return NULL_TREE;
-+ }
-+
-+ if (TYPE_P(*node)) {
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
-+ error("%qE attribute used on type %qT is incompatible with 'do_const'", name, type);
-+ else
-+ *no_add_attrs = false;
-+ return NULL_TREE;
-+ }
-+
-+ constifiable(type, &cinfo);
-+ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
-+ deconstify_tree(*node);
-+ TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
-+ return NULL_TREE;
-+ }
-+
-+ if (TYPE_FIELDS(type))
-+ error("%qE attribute used on type %qT that is not constified", name, type);
-+ return NULL_TREE;
-+}
-+
-+static void constify_type(tree type)
-+{
-+ TYPE_READONLY(type) = 1;
-+ C_TYPE_FIELDS_READONLY(type) = 1;
-+ TYPE_CONSTIFY_VISITED(type) = 1;
-+// TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
-+// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
-+}
-+
-+static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ *no_add_attrs = true;
-+ if (!TYPE_P(*node)) {
-+ error("%qE attribute applies to types only (%qD)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
-+ error("%qE attribute used on %qT applies to struct and union types only", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
-+ error("%qE attribute used on %qT is already applied to the type", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
-+ error("%qE attribute used on %qT is incompatible with 'no_const'", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ *no_add_attrs = false;
-+ return NULL_TREE;
-+}
-+
-+static struct attribute_spec no_const_attr = {
-+ .name = "no_const",
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_no_const_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = true
-+#endif
-+};
-+
-+static struct attribute_spec do_const_attr = {
-+ .name = "do_const",
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_do_const_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = true
-+#endif
-+};
-+
-+static void register_attributes(void *event_data, void *data)
-+{
-+ register_attribute(&no_const_attr);
-+ register_attribute(&do_const_attr);
-+}
-+
-+static void finish_type(void *event_data, void *data)
-+{
-+ tree type = (tree)event_data;
-+ constify_info cinfo = {
-+ .has_fptr_field = false,
-+ .has_writable_field = false,
-+ .has_do_const_field = false,
-+ .has_no_const_field = false
-+ };
-+
-+ if (type == NULL_TREE || type == error_mark_node)
-+ return;
-+
-+#if BUILDING_GCC_VERSION >= 5000
-+ if (TREE_CODE(type) == ENUMERAL_TYPE)
-+ return;
-+#endif
-+
-+ if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
-+ return;
-+
-+ constifiable(type, &cinfo);
-+
-+ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
-+ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
-+ deconstify_type(type);
-+ TYPE_CONSTIFY_VISITED(type) = 1;
-+ } else
-+ error("'no_const' attribute used on type %qT that is not constified", type);
-+ return;
-+ }
-+
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
-+ if (!cinfo.has_writable_field) {
-+ error("'do_const' attribute used on type %qT that is%sconstified", type, cinfo.has_fptr_field ? " " : " not ");
-+ return;
-+ }
-+ constify_type(type);
-+ return;
-+ }
-+
-+ if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
-+ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
-+ error("'do_const' attribute used on type %qT that is constified", type);
-+ return;
-+ }
-+ constify_type(type);
-+ return;
-+ }
-+
-+ deconstify_type(type);
-+ TYPE_CONSTIFY_VISITED(type) = 1;
-+}
-+
-+static void check_global_variables(void *event_data, void *data)
-+{
-+#if BUILDING_GCC_VERSION >= 4009
-+ varpool_node *node;
-+#else
-+ struct varpool_node *node;
-+#endif
-+
-+ FOR_EACH_VARIABLE(node) {
-+ tree var = NODE_DECL(node);
-+ tree type = TREE_TYPE(var);
-+
-+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
-+ continue;
-+
-+ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
-+ continue;
-+
-+ if (!TYPE_CONSTIFY_VISITED(type))
-+ continue;
-+
-+ if (DECL_EXTERNAL(var))
-+ continue;
-+
-+ if (DECL_INITIAL(var))
-+ continue;
-+
-+ // this works around a gcc bug/feature where uninitialized globals
-+ // are moved into the .bss section regardless of any constification
-+ DECL_INITIAL(var) = build_constructor(type, NULL);
-+// inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var);
-+ }
-+}
-+
-+static unsigned int check_local_variables(void)
-+{
-+ unsigned int ret = 0;
-+ tree var;
-+
-+ unsigned int i;
-+
-+ FOR_EACH_LOCAL_DECL(cfun, i, var) {
-+ tree type = TREE_TYPE(var);
-+
-+ gcc_assert(DECL_P(var));
-+ if (is_global_var(var))
-+ continue;
-+
-+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
-+ continue;
-+
-+ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
-+ continue;
-+
-+ if (!TYPE_CONSTIFY_VISITED(type))
-+ continue;
-+
-+ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
-+ ret = 1;
-+ }
-+ return ret;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data check_local_variables_pass_data = {
-+#else
-+static struct gimple_opt_pass check_local_variables_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "check_local_variables",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = check_local_variables,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class check_local_variables_pass : public gimple_opt_pass {
-+public:
-+ check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return check_local_variables(); }
-+#else
-+ unsigned int execute() { return check_local_variables(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_check_local_variables_pass(void)
-+{
-+ return new check_local_variables_pass();
-+}
-+#else
-+static struct opt_pass *make_check_local_variables_pass(void)
-+{
-+ return &check_local_variables_pass.pass;
-+}
-+#endif
-+
-+static struct {
-+ const char *name;
-+ const char *asm_op;
-+} sections[] = {
-+ {".init.rodata", "\t.section\t.init.rodata,\"a\""},
-+ {".ref.rodata", "\t.section\t.ref.rodata,\"a\""},
-+ {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""},
-+ {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""},
-+ {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""},
-+ {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""},
-+ {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""},
-+ {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""},
-+ {".data..read_only", "\t.section\t.data..read_only,\"a\""},
-+};
-+
-+static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
-+
-+static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
-+{
-+ size_t i;
-+
-+ for (i = 0; i < ARRAY_SIZE(sections); i++)
-+ if (!strcmp(sections[i].name, name))
-+ return 0;
-+ return old_section_type_flags(decl, name, reloc);
-+}
-+
-+static void constify_start_unit(void *gcc_data, void *user_data)
-+{
-+// size_t i;
-+
-+// for (i = 0; i < ARRAY_SIZE(sections); i++)
-+// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
-+// sections[i].section = get_section(sections[i].name, 0, NULL);
-+
-+ old_section_type_flags = targetm.section_type_flags;
-+ targetm.section_type_flags = constify_section_type_flags;
-+}
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ int i;
-+ bool constify = true;
-+
-+ struct register_pass_info check_local_variables_pass_info;
-+
-+ check_local_variables_pass_info.pass = make_check_local_variables_pass();
-+ check_local_variables_pass_info.reference_pass_name = "ssa";
-+ check_local_variables_pass_info.ref_pass_instance_number = 1;
-+ check_local_variables_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!(strcmp(argv[i].key, "no-constify"))) {
-+ constify = false;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ if (strncmp(lang_hooks.name, "GNU C", 5) && !strncmp(lang_hooks.name, "GNU C+", 6)) {
-+ inform(UNKNOWN_LOCATION, G_("%s supports C only, not %s"), plugin_name, lang_hooks.name);
-+ constify = false;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
-+ if (constify) {
-+ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
-+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &check_local_variables_pass_info);
-+ register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
-new file mode 100644
-index 0000000..70924d4
---- /dev/null
-+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,787 @@
-+#ifndef GCC_COMMON_H_INCLUDED
-+#define GCC_COMMON_H_INCLUDED
-+
-+#include "plugin.h"
-+#include "bversion.h"
-+#include "plugin-version.h"
-+#include "config.h"
-+#include "system.h"
-+#include "coretypes.h"
-+#include "tm.h"
-+#include "line-map.h"
-+#include "input.h"
-+#include "tree.h"
-+
-+#include "tree-inline.h"
-+#include "version.h"
-+#include "rtl.h"
-+#include "tm_p.h"
-+#include "flags.h"
-+//#include "insn-attr.h"
-+//#include "insn-config.h"
-+//#include "insn-flags.h"
-+#include "hard-reg-set.h"
-+//#include "recog.h"
-+#include "output.h"
-+#include "except.h"
-+#include "function.h"
-+#include "toplev.h"
-+//#include "expr.h"
-+#include "basic-block.h"
-+#include "intl.h"
-+#include "ggc.h"
-+//#include "regs.h"
-+#include "timevar.h"
-+
-+#include "params.h"
-+
-+#if BUILDING_GCC_VERSION <= 4009
-+#include "pointer-set.h"
-+#else
-+#include "hash-map.h"
-+#endif
-+
-+#include "emit-rtl.h"
-+//#include "reload.h"
-+//#include "ira.h"
-+//#include "dwarf2asm.h"
-+#include "debug.h"
-+#include "target.h"
-+#include "langhooks.h"
-+#include "cfgloop.h"
-+//#include "hosthooks.h"
-+#include "cgraph.h"
-+#include "opts.h"
-+//#include "coverage.h"
-+//#include "value-prof.h"
-+
-+#if BUILDING_GCC_VERSION == 4005
-+#include <sys/mman.h>
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4007
-+#include "tree-pretty-print.h"
-+#include "gimple-pretty-print.h"
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4006
-+//#include "c-tree.h"
-+//#include "cp/cp-tree.h"
-+#include "c-family/c-common.h"
-+#else
-+#include "c-common.h"
-+#endif
-+
-+#if BUILDING_GCC_VERSION <= 4008
-+#include "tree-flow.h"
-+#else
-+#include "tree-cfgcleanup.h"
-+#include "tree-ssa-operands.h"
-+#include "tree-into-ssa.h"
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4008
-+#include "is-a.h"
-+#endif
-+
-+#include "diagnostic.h"
-+//#include "tree-diagnostic.h"
-+#include "tree-dump.h"
-+#include "tree-pass.h"
-+//#include "df.h"
-+#include "predict.h"
-+#include "ipa-utils.h"
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+#include "varasm.h"
-+#include "stor-layout.h"
-+#include "internal-fn.h"
-+#include "gimple-expr.h"
-+#include "gimple-fold.h"
-+//#include "diagnostic-color.h"
-+#include "context.h"
-+#include "tree-ssa-alias.h"
-+#include "tree-ssa.h"
-+#include "stringpool.h"
-+#include "tree-ssanames.h"
-+#include "print-tree.h"
-+#include "tree-eh.h"
-+#include "stmt.h"
-+#include "gimplify.h"
-+#endif
-+
-+#include "gimple.h"
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+#include "tree-ssa-operands.h"
-+#include "tree-phinodes.h"
-+#include "tree-cfg.h"
-+#include "gimple-iterator.h"
-+#include "gimple-ssa.h"
-+#include "ssa-iterators.h"
-+#endif
-+
-+//#include "lto/lto.h"
-+#if BUILDING_GCC_VERSION >= 4007
-+//#include "data-streamer.h"
-+#else
-+//#include "lto-streamer.h"
-+#endif
-+//#include "lto-compress.h"
-+#if BUILDING_GCC_VERSION >= 5000
-+//#include "lto-section-names.h"
-+#include "builtins.h"
-+#endif
-+
-+//#include "expr.h" where are you...
-+extern rtx emit_move_insn(rtx x, rtx y);
-+
-+// missing from basic_block.h...
-+extern void debug_dominance_info(enum cdi_direction dir);
-+extern void debug_dominance_tree(enum cdi_direction dir, basic_block root);
-+
-+#ifdef __cplusplus
-+static inline void debug_tree(const_tree t)
-+{
-+ debug_tree(CONST_CAST_TREE(t));
-+}
-+#else
-+#define debug_tree(t) debug_tree(CONST_CAST_TREE(t))
-+#endif
-+
-+#define __unused __attribute__((__unused__))
-+
-+#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
-+#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
-+#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node))
-+#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node))
-+
-+// should come from c-tree.h if only it were installed for gcc 4.5...
-+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
-+
-+#if BUILDING_GCC_VERSION == 4005
-+#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls, (I) = 0; vars && ((D) = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), (I)++)
-+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
-+#define FOR_EACH_VEC_ELT(T, V, I, P) for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I))
-+#define TODO_rebuild_cgraph_edges 0
-+#define SCOPE_FILE_SCOPE_P(EXP) (!(EXP))
-+
-+#ifndef O_BINARY
-+#define O_BINARY 0
-+#endif
-+
-+static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
-+{
-+ tree fndecl;
-+
-+ if (!is_gimple_call(stmt))
-+ return false;
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
-+ return false;
-+// print_node(stderr, "pax", fndecl, 4);
-+ return DECL_FUNCTION_CODE(fndecl) == code;
-+}
-+
-+static inline bool is_simple_builtin(tree decl)
-+{
-+ if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
-+ return false;
-+
-+ switch (DECL_FUNCTION_CODE(decl)) {
-+ /* Builtins that expand to constants. */
-+ case BUILT_IN_CONSTANT_P:
-+ case BUILT_IN_EXPECT:
-+ case BUILT_IN_OBJECT_SIZE:
-+ case BUILT_IN_UNREACHABLE:
-+ /* Simple register moves or loads from stack. */
-+ case BUILT_IN_RETURN_ADDRESS:
-+ case BUILT_IN_EXTRACT_RETURN_ADDR:
-+ case BUILT_IN_FROB_RETURN_ADDR:
-+ case BUILT_IN_RETURN:
-+ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
-+ case BUILT_IN_FRAME_ADDRESS:
-+ case BUILT_IN_VA_END:
-+ case BUILT_IN_STACK_SAVE:
-+ case BUILT_IN_STACK_RESTORE:
-+ /* Exception state returns or moves registers around. */
-+ case BUILT_IN_EH_FILTER:
-+ case BUILT_IN_EH_POINTER:
-+ case BUILT_IN_EH_COPY_VALUES:
-+ return true;
-+
-+ default:
-+ return false;
-+ }
-+}
-+
-+static inline void add_local_decl(struct function *fun, tree d)
-+{
-+ gcc_assert(TREE_CODE(d) == VAR_DECL);
-+ fun->local_decls = tree_cons(NULL_TREE, d, fun->local_decls);
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION <= 4006
-+#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
-+#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP)
-+#define EDGE_PRESERVE 0ULL
-+#define HOST_WIDE_INT_PRINT_HEX_PURE "%" HOST_WIDE_INT_PRINT "x"
-+#define flag_fat_lto_objects true
-+
-+#define get_random_seed(noinit) ({ \
-+ unsigned HOST_WIDE_INT seed; \
-+ sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed); \
-+ seed * seed; })
-+
-+#define int_const_binop(code, arg1, arg2) int_const_binop((code), (arg1), (arg2), 0)
-+
-+static inline bool gimple_clobber_p(gimple s __unused)
-+{
-+ return false;
-+}
-+
-+static inline bool gimple_asm_clobbers_memory_p(const_gimple stmt)
-+{
-+ unsigned i;
-+
-+ for (i = 0; i < gimple_asm_nclobbers(stmt); i++) {
-+ tree op = gimple_asm_clobber_op(stmt, i);
-+ if (!strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "memory"))
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+static inline tree builtin_decl_implicit(enum built_in_function fncode)
-+{
-+ return implicit_built_in_decls[fncode];
-+}
-+
-+static inline int ipa_reverse_postorder(struct cgraph_node **order)
-+{
-+ return cgraph_postorder(order);
-+}
-+
-+static inline struct cgraph_node *cgraph_get_create_node(tree decl)
-+{
-+ struct cgraph_node *node = cgraph_get_node(decl);
-+
-+ return node ? node : cgraph_node(decl);
-+}
-+
-+static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
-+{
-+ return node->analyzed && !node->thunk.thunk_p && !node->alias;
-+}
-+
-+static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void)
-+{
-+ struct cgraph_node *node;
-+
-+ for (node = cgraph_nodes; node; node = node->next)
-+ if (cgraph_function_with_gimple_body_p(node))
-+ return node;
-+ return NULL;
-+}
-+
-+static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node)
-+{
-+ for (node = node->next; node; node = node->next)
-+ if (cgraph_function_with_gimple_body_p(node))
-+ return node;
-+ return NULL;
-+}
-+
-+#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
-+ for ((node) = cgraph_first_function_with_gimple_body(); (node); \
-+ (node) = cgraph_next_function_with_gimple_body(node))
-+
-+static inline void varpool_add_new_variable(tree decl)
-+{
-+ varpool_finalize_decl(decl);
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION == 4006
-+extern void debug_gimple_stmt(gimple);
-+extern void debug_gimple_seq(gimple_seq);
-+extern void print_gimple_seq(FILE *, gimple_seq, int, int);
-+extern void print_gimple_stmt(FILE *, gimple, int, int);
-+extern void print_gimple_expr(FILE *, gimple, int, int);
-+extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
-+#endif
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+#define FOR_EACH_FUNCTION(node) for (node = cgraph_nodes; node; node = node->next)
-+#define FOR_EACH_VARIABLE(node) for (node = varpool_nodes; node; node = node->next)
-+#define PROP_loops 0
-+#define NODE_SYMBOL(node) (node)
-+#define NODE_DECL(node) (node)->decl
-+#define INSN_LOCATION(INSN) RTL_LOCATION(INSN)
-+
-+static inline int bb_loop_depth(const_basic_block bb)
-+{
-+ return bb->loop_father ? loop_depth(bb->loop_father) : 0;
-+}
-+
-+static inline bool gimple_store_p(gimple gs)
-+{
-+ tree lhs = gimple_get_lhs(gs);
-+ return lhs && !is_gimple_reg(lhs);
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION == 4007 || BUILDING_GCC_VERSION == 4008
-+static inline struct cgraph_node *cgraph_alias_target(struct cgraph_node *n)
-+{
-+ return cgraph_alias_aliased_node(n);
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION <= 4009
-+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
-+ cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
-+#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
-+ cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
-+#endif
-+
-+#if BUILDING_GCC_VERSION <= 4008
-+#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)
-+#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN)
-+#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info)
-+#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
-+#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
-+#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block)
-+#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map)
-+#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status)
-+#define BASIC_BLOCK_FOR_FN(FN, N) BASIC_BLOCK_FOR_FUNCTION((FN), (N))
-+#define NODE_IMPLICIT_ALIAS(node) (node)->same_body_alias
-+
-+static inline bool tree_fits_shwi_p(const_tree t)
-+{
-+ if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
-+ return false;
-+
-+ if (TREE_INT_CST_HIGH(t) == 0 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) >= 0)
-+ return true;
-+
-+ if (TREE_INT_CST_HIGH(t) == -1 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) < 0 && !TYPE_UNSIGNED(TREE_TYPE(t)))
-+ return true;
-+
-+ return false;
-+}
-+
-+static inline bool tree_fits_uhwi_p(const_tree t)
-+{
-+ if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
-+ return false;
-+
-+ return TREE_INT_CST_HIGH(t) == 0;
-+}
-+
-+static inline HOST_WIDE_INT tree_to_shwi(const_tree t)
-+{
-+ gcc_assert(tree_fits_shwi_p(t));
-+ return TREE_INT_CST_LOW(t);
-+}
-+
-+static inline unsigned HOST_WIDE_INT tree_to_uhwi(const_tree t)
-+{
-+ gcc_assert(tree_fits_uhwi_p(t));
-+ return TREE_INT_CST_LOW(t);
-+}
-+
-+static inline const char *get_tree_code_name(enum tree_code code)
-+{
-+ gcc_assert(code < MAX_TREE_CODES);
-+ return tree_code_name[code];
-+}
-+
-+#define ipa_remove_stmt_references(cnode, stmt)
-+
-+typedef union gimple_statement_d gasm;
-+typedef union gimple_statement_d gassign;
-+typedef union gimple_statement_d gcall;
-+typedef union gimple_statement_d gcond;
-+typedef union gimple_statement_d gdebug;
-+typedef union gimple_statement_d gphi;
-+typedef union gimple_statement_d greturn;
-+
-+static inline gasm *as_a_gasm(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gasm *as_a_const_gasm(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gassign *as_a_gassign(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gassign *as_a_const_gassign(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gcall *as_a_gcall(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gcall *as_a_const_gcall(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gcond *as_a_gcond(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gcond *as_a_const_gcond(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gdebug *as_a_gdebug(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gphi *as_a_gphi(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gphi *as_a_const_gphi(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline greturn *as_a_greturn(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const greturn *as_a_const_greturn(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION == 4008
-+#define NODE_SYMBOL(node) (&(node)->symbol)
-+#define NODE_DECL(node) (node)->symbol.decl
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4008
-+#define add_referenced_var(var)
-+#define mark_sym_for_renaming(var)
-+#define varpool_mark_needed_node(node)
-+#define create_var_ann(var)
-+#define TODO_dump_func 0
-+#define TODO_dump_cgraph 0
-+#endif
-+
-+#if BUILDING_GCC_VERSION <= 4009
-+#define TODO_verify_il 0
-+#define AVAIL_INTERPOSABLE AVAIL_OVERWRITABLE
-+
-+#define section_name_prefix LTO_SECTION_NAME_PREFIX
-+#define fatal_error(loc, gmsgid, ...) fatal_error((gmsgid), __VA_ARGS__)
-+
-+typedef struct rtx_def rtx_insn;
-+
-+static inline void set_decl_section_name(tree node, const char *value)
-+{
-+ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value);
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION == 4009
-+typedef struct gimple_statement_asm gasm;
-+typedef struct gimple_statement_base gassign;
-+typedef struct gimple_statement_call gcall;
-+typedef struct gimple_statement_base gcond;
-+typedef struct gimple_statement_base gdebug;
-+typedef struct gimple_statement_phi gphi;
-+typedef struct gimple_statement_base greturn;
-+
-+static inline gasm *as_a_gasm(gimple stmt)
-+{
-+ return as_a<gasm>(stmt);
-+}
-+
-+static inline const gasm *as_a_const_gasm(const_gimple stmt)
-+{
-+ return as_a<const gasm>(stmt);
-+}
-+
-+static inline gassign *as_a_gassign(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gassign *as_a_const_gassign(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gcall *as_a_gcall(gimple stmt)
-+{
-+ return as_a<gcall>(stmt);
-+}
-+
-+static inline const gcall *as_a_const_gcall(const_gimple stmt)
-+{
-+ return as_a<const gcall>(stmt);
-+}
-+
-+static inline gcond *as_a_gcond(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gcond *as_a_const_gcond(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gdebug *as_a_gdebug(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline gphi *as_a_gphi(gimple stmt)
-+{
-+ return as_a<gphi>(stmt);
-+}
-+
-+static inline const gphi *as_a_const_gphi(const_gimple stmt)
-+{
-+ return as_a<const gphi>(stmt);
-+}
-+
-+static inline greturn *as_a_greturn(gimple stmt)
-+{
-+ return stmt;
-+}
-+
-+static inline const greturn *as_a_const_greturn(const_gimple stmt)
-+{
-+ return stmt;
-+}
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+#define TODO_ggc_collect 0
-+#define NODE_SYMBOL(node) (node)
-+#define NODE_DECL(node) (node)->decl
-+#define cgraph_node_name(node) (node)->name()
-+#define NODE_IMPLICIT_ALIAS(node) (node)->cpp_implicit_alias
-+#endif
-+
-+#if BUILDING_GCC_VERSION >= 5000
-+#define TODO_verify_ssa TODO_verify_il
-+#define TODO_verify_flow TODO_verify_il
-+#define TODO_verify_stmts TODO_verify_il
-+#define TODO_verify_rtl_sharing TODO_verify_il
-+
-+//#define TREE_INT_CST_HIGH(NODE) ({ TREE_INT_CST_EXT_NUNITS(NODE) > 1 ? (unsigned HOST_WIDE_INT)TREE_INT_CST_ELT(NODE, 1) : 0; })
-+
-+#define INSN_DELETED_P(insn) (insn)->deleted()
-+
-+// symtab/cgraph related
-+#define debug_cgraph_node(node) (node)->debug()
-+#define cgraph_get_node(decl) cgraph_node::get(decl)
-+#define cgraph_get_create_node(decl) cgraph_node::get_create(decl)
-+#define cgraph_n_nodes symtab->cgraph_count
-+#define cgraph_max_uid symtab->cgraph_max_uid
-+#define varpool_get_node(decl) varpool_node::get(decl)
-+
-+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
-+ (caller)->create_edge((callee), (call_stmt), (count), (freq))
-+#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
-+ (caller)->create_edge_including_clones((callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
-+
-+typedef struct cgraph_node *cgraph_node_ptr;
-+typedef struct cgraph_edge *cgraph_edge_p;
-+typedef struct varpool_node *varpool_node_ptr;
-+
-+static inline void change_decl_assembler_name(tree decl, tree name)
-+{
-+ symtab->change_decl_assembler_name(decl, name);
-+}
-+
-+static inline void varpool_finalize_decl(tree decl)
-+{
-+ varpool_node::finalize_decl(decl);
-+}
-+
-+static inline void varpool_add_new_variable(tree decl)
-+{
-+ varpool_node::add(decl);
-+}
-+
-+static inline unsigned int rebuild_cgraph_edges(void)
-+{
-+ return cgraph_edge::rebuild_edges();
-+}
-+
-+static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability)
-+{
-+ return node->function_symbol(availability);
-+}
-+
-+static inline cgraph_node_ptr cgraph_function_or_thunk_node(cgraph_node_ptr node, enum availability *availability = NULL)
-+{
-+ return node->ultimate_alias_target(availability);
-+}
-+
-+static inline bool cgraph_only_called_directly_p(cgraph_node_ptr node)
-+{
-+ return node->only_called_directly_p();
-+}
-+
-+static inline enum availability cgraph_function_body_availability(cgraph_node_ptr node)
-+{
-+ return node->get_availability();
-+}
-+
-+static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
-+{
-+ return node->get_alias_target();
-+}
-+
-+static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
-+{
-+ return symtab->add_cgraph_insertion_hook(hook, data);
-+}
-+
-+static inline void cgraph_remove_function_insertion_hook(struct cgraph_node_hook_list *entry)
-+{
-+ symtab->remove_cgraph_insertion_hook(entry);
-+}
-+
-+static inline struct cgraph_node_hook_list *cgraph_add_node_removal_hook(cgraph_node_hook hook, void *data)
-+{
-+ return symtab->add_cgraph_removal_hook(hook, data);
-+}
-+
-+static inline void cgraph_remove_node_removal_hook(struct cgraph_node_hook_list *entry)
-+{
-+ symtab->remove_cgraph_removal_hook(entry);
-+}
-+
-+static inline struct cgraph_2node_hook_list *cgraph_add_node_duplication_hook(cgraph_2node_hook hook, void *data)
-+{
-+ return symtab->add_cgraph_duplication_hook(hook, data);
-+}
-+
-+static inline void cgraph_remove_node_duplication_hook(struct cgraph_2node_hook_list *entry)
-+{
-+ symtab->remove_cgraph_duplication_hook(entry);
-+}
-+
-+// gimple related
-+static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree lhs, tree op1, tree op2 MEM_STAT_DECL)
-+{
-+ return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
-+}
-+
-+template <>
-+template <>
-+inline bool is_a_helper<const gassign *>::test(const_gimple gs)
-+{
-+ return gs->code == GIMPLE_ASSIGN;
-+}
-+
-+template <>
-+template <>
-+inline bool is_a_helper<const greturn *>::test(const_gimple gs)
-+{
-+ return gs->code == GIMPLE_RETURN;
-+}
-+
-+static inline gasm *as_a_gasm(gimple stmt)
-+{
-+ return as_a<gasm *>(stmt);
-+}
-+
-+static inline const gasm *as_a_const_gasm(const_gimple stmt)
-+{
-+ return as_a<const gasm *>(stmt);
-+}
-+
-+static inline gassign *as_a_gassign(gimple stmt)
-+{
-+ return as_a<gassign *>(stmt);
-+}
-+
-+static inline const gassign *as_a_const_gassign(const_gimple stmt)
-+{
-+ return as_a<const gassign *>(stmt);
-+}
-+
-+static inline gcall *as_a_gcall(gimple stmt)
-+{
-+ return as_a<gcall *>(stmt);
-+}
-+
-+static inline const gcall *as_a_const_gcall(const_gimple stmt)
-+{
-+ return as_a<const gcall *>(stmt);
-+}
-+
-+static inline gphi *as_a_gphi(gimple stmt)
-+{
-+ return as_a<gphi *>(stmt);
-+}
-+
-+static inline const gphi *as_a_const_gphi(const_gimple stmt)
-+{
-+ return as_a<const gphi *>(stmt);
-+}
-+
-+static inline greturn *as_a_greturn(gimple stmt)
-+{
-+ return as_a<greturn *>(stmt);
-+}
-+
-+static inline const greturn *as_a_const_greturn(const_gimple stmt)
-+{
-+ return as_a<const greturn *>(stmt);
-+}
-+
-+// IPA/LTO related
-+#define ipa_ref_list_referring_iterate(L,I,P) (L)->referring.iterate((I), &(P))
-+#define ipa_ref_list_reference_iterate(L,I,P) (L)->reference.iterate((I), &(P))
-+
-+static inline cgraph_node_ptr ipa_ref_referring_node(struct ipa_ref *ref)
-+{
-+ return dyn_cast<cgraph_node_ptr>(ref->referring);
-+}
-+
-+static inline void ipa_remove_stmt_references(symtab_node *referring_node, gimple stmt)
-+{
-+ referring_node->remove_stmt_references(stmt);
-+}
-+#endif
-+
-+#endif
-diff --git a/tools/gcc/gen-random-seed.sh b/tools/gcc/gen-random-seed.sh
-new file mode 100644
-index 0000000..7514850
---- /dev/null
-+++ b/tools/gcc/gen-random-seed.sh
-@@ -0,0 +1,8 @@
-+#!/bin/sh
-+
-+if [ ! -f "$1" ]; then
-+ SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'`
-+ echo "const char *randstruct_seed = \"$SEED\";" > "$1"
-+ HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
-+ echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
-+fi
-diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
-new file mode 100644
-index 0000000..457d54e
---- /dev/null
-+++ b/tools/gcc/kallocstat_plugin.c
-@@ -0,0 +1,188 @@
-+/*
-+ * Copyright 2011-2015 by the PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to find the distribution of k*alloc sizes
-+ *
-+ * TODO:
-+ *
-+ * BUGS:
-+ * - none known
-+ */
-+
-+#include "gcc-common.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info kallocstat_plugin_info = {
-+ .version = "201401260140",
-+ .help = NULL
-+};
-+
-+static const char * const kalloc_functions[] = {
-+ "__kmalloc",
-+ "kmalloc",
-+ "kmalloc_large",
-+ "kmalloc_node",
-+ "kmalloc_order",
-+ "kmalloc_order_trace",
-+ "kmalloc_slab",
-+ "kzalloc",
-+ "kzalloc_node",
-+};
-+
-+static bool is_kalloc(const char *fnname)
-+{
-+ size_t i;
-+
-+ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
-+ if (!strcmp(fnname, kalloc_functions[i]))
-+ return true;
-+ return false;
-+}
-+
-+static unsigned int execute_kallocstat(void)
-+{
-+ basic_block bb;
-+
-+ // 1. loop through BBs and GIMPLE statements
-+ FOR_EACH_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ // gimple match:
-+ tree fndecl, size;
-+ gimple stmt;
-+ const char *fnname;
-+
-+ // is it a call
-+ stmt = gsi_stmt(gsi);
-+ if (!is_gimple_call(stmt))
-+ continue;
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ continue;
-+ if (TREE_CODE(fndecl) != FUNCTION_DECL)
-+ continue;
-+
-+ // is it a call to k*alloc
-+ fnname = DECL_NAME_POINTER(fndecl);
-+ if (!is_kalloc(fnname))
-+ continue;
-+
-+ // is the size arg const or the result of a simple const assignment
-+ size = gimple_call_arg(stmt, 0);
-+ while (true) {
-+ expanded_location xloc;
-+ size_t size_val;
-+
-+ if (TREE_CONSTANT(size)) {
-+ xloc = expand_location(gimple_location(stmt));
-+ if (!xloc.file)
-+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
-+ size_val = TREE_INT_CST_LOW(size);
-+ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
-+ break;
-+ }
-+
-+ if (TREE_CODE(size) != SSA_NAME)
-+ break;
-+ stmt = SSA_NAME_DEF_STMT(size);
-+//debug_gimple_stmt(stmt);
-+//debug_tree(size);
-+ if (!stmt || !is_gimple_assign(stmt))
-+ break;
-+ if (gimple_num_ops(stmt) != 2)
-+ break;
-+ size = gimple_assign_rhs1(stmt);
-+ }
-+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
-+//debug_tree(gimple_call_fn(call_stmt));
-+//print_node(stderr, "pax", fndecl, 4);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data kallocstat_pass_data = {
-+#else
-+static struct gimple_opt_pass kallocstat_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "kallocstat",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = execute_kallocstat,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class kallocstat_pass : public gimple_opt_pass {
-+public:
-+ kallocstat_pass() : gimple_opt_pass(kallocstat_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return execute_kallocstat(); }
-+#else
-+ unsigned int execute() { return execute_kallocstat(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_kallocstat_pass(void)
-+{
-+ return new kallocstat_pass();
-+}
-+#else
-+static struct opt_pass *make_kallocstat_pass(void)
-+{
-+ return &kallocstat_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ struct register_pass_info kallocstat_pass_info;
-+
-+ kallocstat_pass_info.pass = make_kallocstat_pass();
-+ kallocstat_pass_info.reference_pass_name = "ssa";
-+ kallocstat_pass_info.ref_pass_instance_number = 1;
-+ kallocstat_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
-new file mode 100644
-index 0000000..b0d8255
---- /dev/null
-+++ b/tools/gcc/kernexec_plugin.c
-@@ -0,0 +1,547 @@
-+/*
-+ * Copyright 2011-2015 by the PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
-+ *
-+ * TODO:
-+ *
-+ * BUGS:
-+ * - none known
-+ */
-+
-+#include "gcc-common.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info kernexec_plugin_info = {
-+ .version = "201401260140",
-+ .help = "method=[bts|or]\tinstrumentation method\n"
-+};
-+
-+static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
-+static void (*kernexec_instrument_retaddr)(rtx);
-+
-+/*
-+ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
-+ */
-+static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
-+{
-+ gasm *asm_movabs_stmt;
-+
-+ // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
-+ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
-+ gimple_asm_set_volatile(asm_movabs_stmt, true);
-+ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
-+ update_stmt(asm_movabs_stmt);
-+}
-+
-+/*
-+ * find all asm() stmts that clobber r12 and add a reload of r12
-+ */
-+static unsigned int execute_kernexec_reload(void)
-+{
-+ basic_block bb;
-+
-+ // 1. loop through BBs and GIMPLE statements
-+ FOR_EACH_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ // gimple match: __asm__ ("" : : : "r12");
-+ gimple stmt;
-+ gasm *asm_stmt;
-+ size_t nclobbers;
-+
-+ // is it an asm ...
-+ stmt = gsi_stmt(gsi);
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ continue;
-+
-+ asm_stmt = as_a_gasm(stmt);
-+
-+ // ... clobbering r12
-+ nclobbers = gimple_asm_nclobbers(asm_stmt);
-+ while (nclobbers--) {
-+ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
-+ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
-+ continue;
-+ kernexec_reload_fptr_mask(&gsi);
-+//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
-+ break;
-+ }
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
-+ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
-+ */
-+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
-+{
-+ gimple assign_intptr, assign_new_fptr;
-+ gcall *call_stmt;
-+ tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
-+
-+ call_stmt = as_a_gcall(gsi_stmt(*gsi));
-+ old_fptr = gimple_call_fn(call_stmt);
-+
-+ // create temporary unsigned long variable used for bitops and cast fptr to it
-+ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
-+ add_referenced_var(intptr);
-+ intptr = make_ssa_name(intptr, NULL);
-+ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
-+ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
-+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
-+ update_stmt(assign_intptr);
-+
-+ // apply logical or to temporary unsigned long and bitmask
-+ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
-+// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
-+ orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
-+ intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
-+ assign_intptr = gimple_build_assign(intptr, orptr);
-+ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
-+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
-+ update_stmt(assign_intptr);
-+
-+ // cast temporary unsigned long back to a temporary fptr variable
-+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
-+ add_referenced_var(new_fptr);
-+ new_fptr = make_ssa_name(new_fptr, NULL);
-+ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
-+ SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
-+ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
-+ update_stmt(assign_new_fptr);
-+
-+ // replace call stmt fn with the new fptr
-+ gimple_call_set_fn(call_stmt, new_fptr);
-+ update_stmt(call_stmt);
-+}
-+
-+static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
-+{
-+ gasm *asm_or_stmt;
-+ gcall *call_stmt;
-+ tree old_fptr, new_fptr, input, output;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *inputs = NULL;
-+ VEC(tree, gc) *outputs = NULL;
-+#else
-+ vec<tree, va_gc> *inputs = NULL;
-+ vec<tree, va_gc> *outputs = NULL;
-+#endif
-+
-+ call_stmt = as_a_gcall(gsi_stmt(*gsi));
-+ old_fptr = gimple_call_fn(call_stmt);
-+
-+ // create temporary fptr variable
-+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
-+ add_referenced_var(new_fptr);
-+ new_fptr = make_ssa_name(new_fptr, NULL);
-+
-+ // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
-+ input = build_tree_list(NULL_TREE, build_string(2, "0"));
-+ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
-+ output = build_tree_list(NULL_TREE, build_string(3, "=r"));
-+ output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, gc, inputs, input);
-+ VEC_safe_push(tree, gc, outputs, output);
-+#else
-+ vec_safe_push(inputs, input);
-+ vec_safe_push(outputs, output);
-+#endif
-+ asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
-+ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
-+ gimple_asm_set_volatile(asm_or_stmt, true);
-+ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
-+ update_stmt(asm_or_stmt);
-+
-+ // replace call stmt fn with the new fptr
-+ gimple_call_set_fn(call_stmt, new_fptr);
-+ update_stmt(call_stmt);
-+}
-+
-+/*
-+ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
-+ */
-+static unsigned int execute_kernexec_fptr(void)
-+{
-+ basic_block bb;
-+
-+ // 1. loop through BBs and GIMPLE statements
-+ FOR_EACH_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
-+ tree fn;
-+ gimple stmt;
-+ gcall *call_stmt;
-+
-+ // is it a call ...
-+ stmt = gsi_stmt(gsi);
-+ if (!is_gimple_call(stmt))
-+ continue;
-+ call_stmt = as_a_gcall(stmt);
-+ fn = gimple_call_fn(call_stmt);
-+ if (TREE_CODE(fn) == ADDR_EXPR)
-+ continue;
-+ if (TREE_CODE(fn) != SSA_NAME)
-+ gcc_unreachable();
-+
-+ // ... through a function pointer
-+ if (SSA_NAME_VAR(fn) != NULL_TREE) {
-+ fn = SSA_NAME_VAR(fn);
-+ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
-+ debug_tree(fn);
-+ gcc_unreachable();
-+ }
-+ }
-+ fn = TREE_TYPE(fn);
-+ if (TREE_CODE(fn) != POINTER_TYPE)
-+ continue;
-+ fn = TREE_TYPE(fn);
-+ if (TREE_CODE(fn) != FUNCTION_TYPE)
-+ continue;
-+
-+ kernexec_instrument_fptr(&gsi);
-+
-+//debug_tree(gimple_call_fn(call_stmt));
-+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
-+static void kernexec_instrument_retaddr_bts(rtx insn)
-+{
-+ rtx btsq;
-+ rtvec argvec, constraintvec, labelvec;
-+ int line;
-+
-+ // create asm volatile("btsq $63,(%%rsp)":::)
-+ argvec = rtvec_alloc(0);
-+ constraintvec = rtvec_alloc(0);
-+ labelvec = rtvec_alloc(0);
-+ line = expand_location(RTL_LOCATION(insn)).line;
-+ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
-+ MEM_VOLATILE_P(btsq) = 1;
-+// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
-+ emit_insn_before(btsq, insn);
-+}
-+
-+// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
-+static void kernexec_instrument_retaddr_or(rtx insn)
-+{
-+ rtx orq;
-+ rtvec argvec, constraintvec, labelvec;
-+ int line;
-+
-+ // create asm volatile("orq %%r12,(%%rsp)":::)
-+ argvec = rtvec_alloc(0);
-+ constraintvec = rtvec_alloc(0);
-+ labelvec = rtvec_alloc(0);
-+ line = expand_location(RTL_LOCATION(insn)).line;
-+ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
-+ MEM_VOLATILE_P(orq) = 1;
-+// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
-+ emit_insn_before(orq, insn);
-+}
-+
-+/*
-+ * find all asm level function returns and forcibly set the highest bit of the return address
-+ */
-+static unsigned int execute_kernexec_retaddr(void)
-+{
-+ rtx_insn *insn;
-+
-+// if (stack_realign_drap)
-+// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
-+
-+ // 1. find function returns
-+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
-+ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
-+ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
-+ // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
-+ rtx body;
-+
-+ // is it a retn
-+ if (!JUMP_P(insn))
-+ continue;
-+ body = PATTERN(insn);
-+ if (GET_CODE(body) == PARALLEL)
-+ body = XVECEXP(body, 0, 0);
-+ if (!ANY_RETURN_P(body))
-+ continue;
-+ kernexec_instrument_retaddr(insn);
-+ }
-+
-+// print_simple_rtl(stderr, get_insns());
-+// print_rtl(stderr, get_insns());
-+
-+ return 0;
-+}
-+
-+static bool kernexec_cmodel_check(void)
-+{
-+ tree section;
-+
-+ if (ix86_cmodel != CM_KERNEL)
-+ return false;
-+
-+ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
-+ if (!section || !TREE_VALUE(section))
-+ return true;
-+
-+ section = TREE_VALUE(TREE_VALUE(section));
-+ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
-+ return true;
-+
-+ return false;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data kernexec_reload_pass_data = {
-+#else
-+static struct gimple_opt_pass kernexec_reload_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "kernexec_reload",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = kernexec_cmodel_check,
-+ .execute = execute_kernexec_reload,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data kernexec_fptr_pass_data = {
-+#else
-+static struct gimple_opt_pass kernexec_fptr_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "kernexec_fptr",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = kernexec_cmodel_check,
-+ .execute = execute_kernexec_fptr,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data kernexec_retaddr_pass_data = {
-+#else
-+static struct rtl_opt_pass kernexec_retaddr_pass = {
-+ .pass = {
-+#endif
-+ .type = RTL_PASS,
-+ .name = "kernexec_retaddr",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = kernexec_cmodel_check,
-+ .execute = execute_kernexec_retaddr,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class kernexec_reload_pass : public gimple_opt_pass {
-+public:
-+ kernexec_reload_pass() : gimple_opt_pass(kernexec_reload_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return kernexec_cmodel_check(); }
-+ virtual unsigned int execute(function *) { return execute_kernexec_reload(); }
-+#else
-+ bool gate() { return kernexec_cmodel_check(); }
-+ unsigned int execute() { return execute_kernexec_reload(); }
-+#endif
-+};
-+
-+class kernexec_fptr_pass : public gimple_opt_pass {
-+public:
-+ kernexec_fptr_pass() : gimple_opt_pass(kernexec_fptr_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return kernexec_cmodel_check(); }
-+ virtual unsigned int execute(function *) { return execute_kernexec_fptr(); }
-+#else
-+ bool gate() { return kernexec_cmodel_check(); }
-+ unsigned int execute() { return execute_kernexec_fptr(); }
-+#endif
-+};
-+
-+class kernexec_retaddr_pass : public rtl_opt_pass {
-+public:
-+ kernexec_retaddr_pass() : rtl_opt_pass(kernexec_retaddr_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return kernexec_cmodel_check(); }
-+ virtual unsigned int execute(function *) { return execute_kernexec_retaddr(); }
-+#else
-+ bool gate() { return kernexec_cmodel_check(); }
-+ unsigned int execute() { return execute_kernexec_retaddr(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_kernexec_reload_pass(void)
-+{
-+ return new kernexec_reload_pass();
-+}
-+
-+static opt_pass *make_kernexec_fptr_pass(void)
-+{
-+ return new kernexec_fptr_pass();
-+}
-+
-+static opt_pass *make_kernexec_retaddr_pass(void)
-+{
-+ return new kernexec_retaddr_pass();
-+}
-+#else
-+static struct opt_pass *make_kernexec_reload_pass(void)
-+{
-+ return &kernexec_reload_pass.pass;
-+}
-+
-+static struct opt_pass *make_kernexec_fptr_pass(void)
-+{
-+ return &kernexec_fptr_pass.pass;
-+}
-+
-+static struct opt_pass *make_kernexec_retaddr_pass(void)
-+{
-+ return &kernexec_retaddr_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ int i;
-+ struct register_pass_info kernexec_reload_pass_info;
-+ struct register_pass_info kernexec_fptr_pass_info;
-+ struct register_pass_info kernexec_retaddr_pass_info;
-+
-+ kernexec_reload_pass_info.pass = make_kernexec_reload_pass();
-+ kernexec_reload_pass_info.reference_pass_name = "ssa";
-+ kernexec_reload_pass_info.ref_pass_instance_number = 1;
-+ kernexec_reload_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ kernexec_fptr_pass_info.pass = make_kernexec_fptr_pass();
-+ kernexec_fptr_pass_info.reference_pass_name = "ssa";
-+ kernexec_fptr_pass_info.ref_pass_instance_number = 1;
-+ kernexec_fptr_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ kernexec_retaddr_pass_info.pass = make_kernexec_retaddr_pass();
-+ kernexec_retaddr_pass_info.reference_pass_name = "pro_and_epilogue";
-+ kernexec_retaddr_pass_info.ref_pass_instance_number = 1;
-+ kernexec_retaddr_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
-+
-+ if (TARGET_64BIT == 0)
-+ return 0;
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "method")) {
-+ if (!argv[i].value) {
-+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ continue;
-+ }
-+ if (!strcmp(argv[i].value, "bts")) {
-+ kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
-+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
-+ } else if (!strcmp(argv[i].value, "or")) {
-+ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
-+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
-+ fix_register("r12", 1, 1);
-+ } else
-+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
-+ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
-+
-+ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
-new file mode 100644
-index 0000000..ac6f9b4
---- /dev/null
-+++ b/tools/gcc/latent_entropy_plugin.c
-@@ -0,0 +1,474 @@
-+/*
-+ * Copyright 2012-2015 by the PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to help generate a little bit of entropy from program state,
-+ * used throughout the uptime of the kernel
-+ *
-+ * TODO:
-+ * - add ipa pass to identify not explicitly marked candidate functions
-+ * - mix in more program state (function arguments/return values, loop variables, etc)
-+ * - more instrumentation control via attribute parameters
-+ *
-+ * BUGS:
-+ * - none known
-+ */
-+
-+#include "gcc-common.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+static GTY(()) tree latent_entropy_decl;
-+
-+static struct plugin_info latent_entropy_plugin_info = {
-+ .version = "201504282240",
-+ .help = NULL
-+};
-+
-+static unsigned HOST_WIDE_INT seed;
-+static unsigned HOST_WIDE_INT get_random_const(void)
-+{
-+ unsigned int i;
-+ unsigned HOST_WIDE_INT ret = 0;
-+
-+ for (i = 0; i < 8 * sizeof ret; i++) {
-+ ret = (ret << 1) | (seed & 1);
-+ seed >>= 1;
-+ if (ret & 1)
-+ seed ^= 0xD800000000000000ULL;
-+ }
-+
-+ return ret;
-+}
-+
-+static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ tree type;
-+ unsigned long long mask;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(constructor_elt, gc) *vals;
-+#else
-+ vec<constructor_elt, va_gc> *vals;
-+#endif
-+
-+ switch (TREE_CODE(*node)) {
-+ default:
-+ *no_add_attrs = true;
-+ error("%qE attribute only applies to functions and variables", name);
-+ break;
-+
-+ case VAR_DECL:
-+ if (DECL_INITIAL(*node)) {
-+ *no_add_attrs = true;
-+ error("variable %qD with %qE attribute must not be initialized", *node, name);
-+ break;
-+ }
-+
-+ if (!TREE_STATIC(*node)) {
-+ *no_add_attrs = true;
-+ error("variable %qD with %qE attribute must not be local", *node, name);
-+ break;
-+ }
-+
-+ type = TREE_TYPE(*node);
-+ switch (TREE_CODE(type)) {
-+ default:
-+ *no_add_attrs = true;
-+ error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields", *node, name);
-+ break;
-+
-+ case RECORD_TYPE: {
-+ tree field;
-+ unsigned int nelt = 0;
-+
-+ for (field = TYPE_FIELDS(type); field; nelt++, field = TREE_CHAIN(field)) {
-+ tree fieldtype;
-+
-+ fieldtype = TREE_TYPE(field);
-+ if (TREE_CODE(fieldtype) != INTEGER_TYPE) {
-+ *no_add_attrs = true;
-+ error("structure variable %qD with %qE attribute has a non-integer field %qE", *node, name, field);
-+ break;
-+ }
-+ }
-+
-+ if (field)
-+ break;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ vals = VEC_alloc(constructor_elt, gc, nelt);
-+#else
-+ vec_alloc(vals, nelt);
-+#endif
-+
-+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
-+ tree fieldtype;
-+
-+ fieldtype = TREE_TYPE(field);
-+ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(fieldtype)) - 1);
-+ mask = 2 * (mask - 1) + 1;
-+
-+ if (TYPE_UNSIGNED(fieldtype))
-+ CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cstu(fieldtype, mask & get_random_const()));
-+ else
-+ CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cst(fieldtype, mask & get_random_const()));
-+ }
-+
-+ DECL_INITIAL(*node) = build_constructor(type, vals);
-+//debug_tree(DECL_INITIAL(*node));
-+ break;
-+ }
-+
-+ case INTEGER_TYPE:
-+ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
-+ mask = 2 * (mask - 1) + 1;
-+
-+ if (TYPE_UNSIGNED(type))
-+ DECL_INITIAL(*node) = build_int_cstu(type, mask & get_random_const());
-+ else
-+ DECL_INITIAL(*node) = build_int_cst(type, mask & get_random_const());
-+ break;
-+
-+ case ARRAY_TYPE: {
-+ tree elt_type, array_size, elt_size;
-+ unsigned int i, nelt;
-+
-+ elt_type = TREE_TYPE(type);
-+ elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
-+ array_size = TYPE_SIZE_UNIT(type);
-+
-+ if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size || TREE_CODE(array_size) != INTEGER_CST) {
-+ *no_add_attrs = true;
-+ error("array variable %qD with %qE attribute must be a fixed length integer array type", *node, name);
-+ break;
-+ }
-+
-+ nelt = TREE_INT_CST_LOW(array_size) / TREE_INT_CST_LOW(elt_size);
-+#if BUILDING_GCC_VERSION <= 4007
-+ vals = VEC_alloc(constructor_elt, gc, nelt);
-+#else
-+ vec_alloc(vals, nelt);
-+#endif
-+
-+ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(elt_type)) - 1);
-+ mask = 2 * (mask - 1) + 1;
-+
-+ for (i = 0; i < nelt; i++)
-+ if (TYPE_UNSIGNED(elt_type))
-+ CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cstu(elt_type, mask & get_random_const()));
-+ else
-+ CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cst(elt_type, mask & get_random_const()));
-+
-+ DECL_INITIAL(*node) = build_constructor(type, vals);
-+//debug_tree(DECL_INITIAL(*node));
-+ break;
-+ }
-+ }
-+ break;
-+
-+ case FUNCTION_DECL:
-+ break;
-+ }
-+
-+ return NULL_TREE;
-+}
-+
-+static struct attribute_spec latent_entropy_attr = {
-+ .name = "latent_entropy",
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = true,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_latent_entropy_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = false
-+#endif
-+};
-+
-+static void register_attributes(void *event_data, void *data)
-+{
-+ register_attribute(&latent_entropy_attr);
-+}
-+
-+static bool gate_latent_entropy(void)
-+{
-+ // don't bother with noreturn functions for now
-+ if (TREE_THIS_VOLATILE(current_function_decl))
-+ return false;
-+
-+ // gcc-4.5 doesn't discover some trivial noreturn functions
-+ if (EDGE_COUNT(EXIT_BLOCK_PTR_FOR_FN(cfun)->preds) == 0)
-+ return false;
-+
-+ return lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)) != NULL_TREE;
-+}
-+
-+static enum tree_code get_op(tree *rhs)
-+{
-+ static enum tree_code op;
-+ unsigned HOST_WIDE_INT random_const;
-+
-+ random_const = get_random_const();
-+
-+ switch (op) {
-+ case BIT_XOR_EXPR:
-+ op = PLUS_EXPR;
-+ break;
-+
-+ case PLUS_EXPR:
-+ if (rhs) {
-+ op = LROTATE_EXPR;
-+ random_const &= HOST_BITS_PER_WIDE_INT - 1;
-+ break;
-+ }
-+
-+ case LROTATE_EXPR:
-+ default:
-+ op = BIT_XOR_EXPR;
-+ break;
-+ }
-+ if (rhs)
-+ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
-+ return op;
-+}
-+
-+static void perturb_local_entropy(basic_block bb, tree local_entropy)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+ tree addxorrol, rhs;
-+ enum tree_code op;
-+
-+ op = get_op(&rhs);
-+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
-+ assign = gimple_build_assign(local_entropy, addxorrol);
-+ gsi = gsi_after_labels(bb);
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+//debug_bb(bb);
-+}
-+
-+static void perturb_latent_entropy(basic_block bb, tree rhs)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+ tree addxorrol, temp;
-+
-+ // 1. create temporary copy of latent_entropy
-+ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
-+ add_referenced_var(temp);
-+
-+ // 2. read...
-+ temp = make_ssa_name(temp, NULL);
-+ assign = gimple_build_assign(temp, latent_entropy_decl);
-+ SSA_NAME_DEF_STMT(temp) = assign;
-+ add_referenced_var(latent_entropy_decl);
-+ gsi = gsi_after_labels(bb);
-+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+
-+ // 3. ...modify...
-+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
-+ temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
-+ assign = gimple_build_assign(temp, addxorrol);
-+ SSA_NAME_DEF_STMT(temp) = assign;
-+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+
-+ // 4. ...write latent_entropy
-+ assign = gimple_build_assign(latent_entropy_decl, temp);
-+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+}
-+
-+static unsigned int execute_latent_entropy(void)
-+{
-+ basic_block bb;
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+ tree local_entropy;
-+
-+ if (!latent_entropy_decl) {
-+#if BUILDING_GCC_VERSION >= 4009
-+ varpool_node *node;
-+#else
-+ struct varpool_node *node;
-+#endif
-+
-+ FOR_EACH_VARIABLE(node) {
-+ tree var = NODE_DECL(node);
-+
-+ if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 1)
-+ continue;
-+ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
-+ continue;
-+ latent_entropy_decl = var;
-+// debug_tree(var);
-+ break;
-+ }
-+ if (!latent_entropy_decl) {
-+// debug_tree(current_function_decl);
-+ return 0;
-+ }
-+ }
-+
-+//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
-+
-+ // 1. create local entropy variable
-+ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
-+ add_referenced_var(local_entropy);
-+ mark_sym_for_renaming(local_entropy);
-+
-+ // 2. initialize local entropy variable
-+ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ if (dom_info_available_p(CDI_DOMINATORS))
-+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+ gsi = gsi_start_bb(bb);
-+
-+ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
-+// gimple_set_location(assign, loc);
-+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+//debug_bb(bb);
-+ gcc_assert(single_succ_p(bb));
-+ bb = single_succ(bb);
-+
-+ // 3. instrument each BB with an operation on the local entropy variable
-+ while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
-+ perturb_local_entropy(bb, local_entropy);
-+//debug_bb(bb);
-+ bb = bb->next_bb;
-+ };
-+
-+ // 4. mix local entropy into the global entropy variable
-+ gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
-+ perturb_latent_entropy(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)), local_entropy);
-+//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)));
-+ return 0;
-+}
-+
-+static void latent_entropy_start_unit(void *gcc_data, void *user_data)
-+{
-+ tree latent_entropy_type;
-+
-+ seed = get_random_seed(false);
-+
-+ if (in_lto_p)
-+ return;
-+
-+ // extern volatile u64 latent_entropy
-+ gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
-+ latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
-+ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
-+
-+ TREE_STATIC(latent_entropy_decl) = 1;
-+ TREE_PUBLIC(latent_entropy_decl) = 1;
-+ TREE_USED(latent_entropy_decl) = 1;
-+ DECL_PRESERVE_P(latent_entropy_decl) = 1;
-+ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
-+ DECL_EXTERNAL(latent_entropy_decl) = 1;
-+ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
-+ lang_hooks.decls.pushdecl(latent_entropy_decl);
-+// DECL_ASSEMBLER_NAME(latent_entropy_decl);
-+// varpool_finalize_decl(latent_entropy_decl);
-+// varpool_mark_needed_node(latent_entropy_decl);
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data latent_entropy_pass_data = {
-+#else
-+static struct gimple_opt_pass latent_entropy_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "latent_entropy",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = gate_latent_entropy,
-+ .execute = execute_latent_entropy,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_gimple_leh | PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class latent_entropy_pass : public gimple_opt_pass {
-+public:
-+ latent_entropy_pass() : gimple_opt_pass(latent_entropy_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return gate_latent_entropy(); }
-+ virtual unsigned int execute(function *) { return execute_latent_entropy(); }
-+#else
-+ bool gate() { return gate_latent_entropy(); }
-+ unsigned int execute() { return execute_latent_entropy(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_latent_entropy_pass(void)
-+{
-+ return new latent_entropy_pass();
-+}
-+#else
-+static struct opt_pass *make_latent_entropy_pass(void)
-+{
-+ return &latent_entropy_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ struct register_pass_info latent_entropy_pass_info;
-+
-+ latent_entropy_pass_info.pass = make_latent_entropy_pass();
-+ latent_entropy_pass_info.reference_pass_name = "optimized";
-+ latent_entropy_pass_info.ref_pass_instance_number = 1;
-+ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
-+ {
-+ .base = &latent_entropy_decl,
-+ .nelt = 1,
-+ .stride = sizeof(latent_entropy_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ LAST_GGC_ROOT_TAB
-+ };
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_latent_entropy);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
-new file mode 100644
-index 0000000..40dcfa9
---- /dev/null
-+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,922 @@
-+/*
-+ * Copyright 2014,2015 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
-+ * and PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Usage:
-+ * $ # for 4.5/4.6/C based 4.7
-+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
-+ * $ # for C++ based 4.7/4.8+
-+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
-+ * $ gcc -fplugin=./randomize_layout_plugin.so test.c -O2
-+ */
-+
-+#include "gcc-common.h"
-+#include "randomize_layout_seed.h"
-+
-+#if BUILDING_GCC_MAJOR < 4 || (BUILDING_GCC_MAJOR == 4 && BUILDING_GCC_MINOR < 6) || \
-+ (BUILDING_GCC_MAJOR == 4 && BUILDING_GCC_MINOR == 6 && BUILDING_GCC_PATCHLEVEL < 4)
-+#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer."
-+#endif
-+
-+#define ORIG_TYPE_NAME(node) \
-+ (TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
-+
-+int plugin_is_GPL_compatible;
-+
-+static int performance_mode;
-+
-+static struct plugin_info randomize_layout_plugin_info = {
-+ .version = "201402201816",
-+ .help = "disable\t\t\tdo not activate plugin\n"
-+ "performance-mode\tenable cacheline-aware layout randomization\n"
-+};
-+
-+/* from old Linux dcache.h */
-+static inline unsigned long
-+partial_name_hash(unsigned long c, unsigned long prevhash)
-+{
-+ return (prevhash + (c << 4) + (c >> 4)) * 11;
-+}
-+static inline unsigned int
-+name_hash(const unsigned char *name)
-+{
-+ unsigned long hash = 0;
-+ unsigned int len = strlen((const char *)name);
-+ while (len--)
-+ hash = partial_name_hash(*name++, hash);
-+ return (unsigned int)hash;
-+}
-+
-+static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ tree type;
-+
-+ *no_add_attrs = true;
-+ if (TREE_CODE(*node) == FUNCTION_DECL) {
-+ error("%qE attribute does not apply to functions (%qF)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TREE_CODE(*node) == PARM_DECL) {
-+ error("%qE attribute does not apply to function parameters (%qD)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TREE_CODE(*node) == VAR_DECL) {
-+ error("%qE attribute does not apply to variables (%qD)", name, *node);
-+ return NULL_TREE;
-+ }
-+
-+ if (TYPE_P(*node)) {
-+ type = *node;
-+ } else {
-+ gcc_assert(TREE_CODE(*node) == TYPE_DECL);
-+ type = TREE_TYPE(*node);
-+ }
-+
-+ if (TREE_CODE(type) != RECORD_TYPE) {
-+ error("%qE attribute used on %qT applies to struct types only", name, type);
-+ return NULL_TREE;
-+ }
-+
-+ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
-+ error("%qE attribute is already applied to the type %qT", name, type);
-+ return NULL_TREE;
-+ }
-+
-+ *no_add_attrs = false;
-+
-+ return NULL_TREE;
-+}
-+
-+/* set on complete types that we don't need to inspect further at all */
-+static tree handle_randomize_considered_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ *no_add_attrs = false;
-+ return NULL_TREE;
-+}
-+
-+/*
-+ * set on types that we've performed a shuffle on, to prevent re-shuffling
-+ * this does not preclude us from inspecting its fields for potential shuffles
-+ */
-+static tree handle_randomize_performed_attr(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ *no_add_attrs = false;
-+ return NULL_TREE;
-+}
-+
-+/*
-+ * 64bit variant of Bob Jenkins' public domain PRNG
-+ * 256 bits of internal state
-+ */
-+
-+typedef unsigned long long u64;
-+
-+typedef struct ranctx { u64 a; u64 b; u64 c; u64 d; } ranctx;
-+
-+#define rot(x,k) (((x)<<(k))|((x)>>(64-(k))))
-+static u64 ranval(ranctx *x) {
-+ u64 e = x->a - rot(x->b, 7);
-+ x->a = x->b ^ rot(x->c, 13);
-+ x->b = x->c + rot(x->d, 37);
-+ x->c = x->d + e;
-+ x->d = e + x->a;
-+ return x->d;
-+}
-+
-+static void raninit(ranctx *x, u64 *seed) {
-+ int i;
-+
-+ x->a = seed[0];
-+ x->b = seed[1];
-+ x->c = seed[2];
-+ x->d = seed[3];
-+
-+ for (i=0; i < 30; ++i)
-+ (void)ranval(x);
-+}
-+
-+static u64 shuffle_seed[4];
-+
-+struct partition_group {
-+ tree tree_start;
-+ unsigned long start;
-+ unsigned long length;
-+};
-+
-+static void partition_struct(tree *fields, unsigned long length, struct partition_group *size_groups, unsigned long *num_groups)
-+{
-+ unsigned long i;
-+ unsigned long accum_size = 0;
-+ unsigned long accum_length = 0;
-+ unsigned long group_idx = 0;
-+
-+ gcc_assert(length < INT_MAX);
-+
-+ memset(size_groups, 0, sizeof(struct partition_group) * length);
-+
-+ for (i = 0; i < length; i++) {
-+ if (size_groups[group_idx].tree_start == NULL_TREE) {
-+ size_groups[group_idx].tree_start = fields[i];
-+ size_groups[group_idx].start = i;
-+ accum_length = 0;
-+ accum_size = 0;
-+ }
-+ accum_size += (unsigned long)int_size_in_bytes(TREE_TYPE(fields[i]));
-+ accum_length++;
-+ if (accum_size >= 64) {
-+ size_groups[group_idx].length = accum_length;
-+ accum_length = 0;
-+ group_idx++;
-+ }
-+ }
-+
-+ if (size_groups[group_idx].tree_start != NULL_TREE &&
-+ !size_groups[group_idx].length) {
-+ size_groups[group_idx].length = accum_length;
-+ group_idx++;
-+ }
-+
-+ *num_groups = group_idx;
-+}
-+
-+static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
-+{
-+ unsigned long i, x;
-+ struct partition_group size_group[length];
-+ unsigned long num_groups = 0;
-+ unsigned long randnum;
-+
-+ partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
-+ for (i = num_groups - 1; i > 0; i--) {
-+ struct partition_group tmp;
-+ randnum = ranval(prng_state) % (i + 1);
-+ tmp = size_group[i];
-+ size_group[i] = size_group[randnum];
-+ size_group[randnum] = tmp;
-+ }
-+
-+ for (x = 0; x < num_groups; x++) {
-+ for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start; i--) {
-+ tree tmp;
-+ if (DECL_BIT_FIELD_TYPE(newtree[i]))
-+ continue;
-+ randnum = ranval(prng_state) % (i + 1);
-+ // we could handle this case differently if desired
-+ if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
-+ continue;
-+ tmp = newtree[i];
-+ newtree[i] = newtree[randnum];
-+ newtree[randnum] = tmp;
-+ }
-+ }
-+}
-+
-+static void full_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
-+{
-+ unsigned long i, randnum;
-+
-+ for (i = length - 1; i > 0; i--) {
-+ tree tmp;
-+ randnum = ranval(prng_state) % (i + 1);
-+ tmp = newtree[i];
-+ newtree[i] = newtree[randnum];
-+ newtree[randnum] = tmp;
-+ }
-+}
-+
-+/* modern in-place Fisher-Yates shuffle */
-+static void shuffle(const_tree type, tree *newtree, unsigned long length)
-+{
-+ unsigned long i;
-+ u64 seed[4];
-+ ranctx prng_state;
-+ const unsigned char *structname;
-+
-+ if (length == 0)
-+ return;
-+
-+ gcc_assert(TREE_CODE(type) == RECORD_TYPE);
-+
-+ structname = ORIG_TYPE_NAME(type);
-+
-+#ifdef __DEBUG_PLUGIN
-+ fprintf(stderr, "Shuffling struct %s %p\n", (const char *)structname, type);
-+#ifdef __DEBUG_VERBOSE
-+ debug_tree((tree)type);
-+#endif
-+#endif
-+
-+ for (i = 0; i < 4; i++) {
-+ seed[i] = shuffle_seed[i];
-+ seed[i] ^= name_hash(structname);
-+ }
-+
-+ raninit(&prng_state, (u64 *)&seed);
-+
-+ if (performance_mode)
-+ performance_shuffle(newtree, length, &prng_state);
-+ else
-+ full_shuffle(newtree, length, &prng_state);
-+}
-+
-+static bool is_flexible_array(const_tree field)
-+{
-+ const_tree fieldtype;
-+ const_tree typesize;
-+ const_tree elemtype;
-+ const_tree elemsize;
-+
-+ fieldtype = TREE_TYPE(field);
-+ typesize = TYPE_SIZE(fieldtype);
-+
-+ if (TREE_CODE(fieldtype) != ARRAY_TYPE)
-+ return false;
-+
-+ elemtype = TREE_TYPE(fieldtype);
-+ elemsize = TYPE_SIZE(elemtype);
-+
-+ /* size of type is represented in bits */
-+
-+ if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
-+ TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
-+ return true;
-+
-+ if (typesize != NULL_TREE &&
-+ (TREE_CONSTANT(typesize) && (!TREE_INT_CST_LOW(typesize) ||
-+ TREE_INT_CST_LOW(typesize) == TREE_INT_CST_LOW(elemsize))))
-+ return true;
-+
-+ return false;
-+}
-+
-+static int relayout_struct(tree type)
-+{
-+ unsigned long num_fields = (unsigned long)list_length(TYPE_FIELDS(type));
-+ unsigned long shuffle_length = num_fields;
-+ tree field;
-+ tree newtree[num_fields];
-+ unsigned long i;
-+ tree list;
-+ tree variant;
-+ expanded_location xloc;
-+
-+ if (TYPE_FIELDS(type) == NULL_TREE)
-+ return 0;
-+
-+ if (num_fields < 2)
-+ return 0;
-+
-+ gcc_assert(TREE_CODE(type) == RECORD_TYPE);
-+
-+ gcc_assert(num_fields < INT_MAX);
-+
-+ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)) ||
-+ lookup_attribute("no_randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))))
-+ return 0;
-+
-+ /* Workaround for 3rd-party VirtualBox source that we can't modify ourselves */
-+ if (!strcmp((const char *)ORIG_TYPE_NAME(type), "INTNETTRUNKFACTORY") ||
-+ !strcmp((const char *)ORIG_TYPE_NAME(type), "RAWPCIFACTORY"))
-+ return 0;
-+
-+ /* throw out any structs in uapi */
-+ xloc = expand_location(DECL_SOURCE_LOCATION(TYPE_FIELDS(type)));
-+
-+ if (strstr(xloc.file, "/uapi/"))
-+ error(G_("attempted to randomize userland API struct %s"), ORIG_TYPE_NAME(type));
-+
-+ for (field = TYPE_FIELDS(type), i = 0; field; field = TREE_CHAIN(field), i++) {
-+ gcc_assert(TREE_CODE(field) == FIELD_DECL);
-+ newtree[i] = field;
-+ }
-+
-+ /*
-+ * enforce that we don't randomize the layout of the last
-+ * element of a struct if it's a 0 or 1-length array
-+ * or a proper flexible array
-+ */
-+ if (is_flexible_array(newtree[num_fields - 1]))
-+ shuffle_length--;
-+
-+ shuffle(type, (tree *)newtree, shuffle_length);
-+
-+ /*
-+ * set up a bogus anonymous struct field designed to error out on unnamed struct initializers
-+ * as gcc provides no other way to detect such code
-+ */
-+ list = make_node(FIELD_DECL);
-+ TREE_CHAIN(list) = newtree[0];
-+ TREE_TYPE(list) = void_type_node;
-+ DECL_SIZE(list) = bitsize_zero_node;
-+ DECL_NONADDRESSABLE_P(list) = 1;
-+ DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node;
-+ DECL_SIZE_UNIT(list) = size_zero_node;
-+ DECL_FIELD_OFFSET(list) = size_zero_node;
-+ DECL_CONTEXT(list) = type;
-+ // to satisfy the constify plugin
-+ TREE_READONLY(list) = 1;
-+
-+ for (i = 0; i < num_fields - 1; i++)
-+ TREE_CHAIN(newtree[i]) = newtree[i+1];
-+ TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
-+
-+ for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
-+ TYPE_FIELDS(variant) = list;
-+ TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
-+ TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
-+ // force a re-layout
-+ TYPE_SIZE(variant) = NULL_TREE;
-+ layout_type(variant);
-+ }
-+
-+ return 1;
-+}
-+
-+/* from constify plugin */
-+static const_tree get_field_type(const_tree field)
-+{
-+ return strip_array_types(TREE_TYPE(field));
-+}
-+
-+/* from constify plugin */
-+static bool is_fptr(const_tree fieldtype)
-+{
-+ if (TREE_CODE(fieldtype) != POINTER_TYPE)
-+ return false;
-+
-+ return TREE_CODE(TREE_TYPE(fieldtype)) == FUNCTION_TYPE;
-+}
-+
-+/* derived from constify plugin */
-+static int is_pure_ops_struct(const_tree node)
-+{
-+ const_tree field;
-+
-+ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
-+
-+ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
-+ const_tree fieldtype = get_field_type(field);
-+ enum tree_code code = TREE_CODE(fieldtype);
-+
-+ if (node == fieldtype)
-+ continue;
-+
-+ if (!is_fptr(fieldtype))
-+ return 0;
-+
-+ if (code != RECORD_TYPE && code != UNION_TYPE)
-+ continue;
-+
-+ if (!is_pure_ops_struct(fieldtype))
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+
-+static void randomize_type(tree type)
-+{
-+ tree variant;
-+
-+ gcc_assert(TREE_CODE(type) == RECORD_TYPE);
-+
-+ if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
-+ return;
-+
-+ if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))) || is_pure_ops_struct(type))
-+ relayout_struct(type);
-+
-+ for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
-+ TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
-+ TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE, TYPE_ATTRIBUTES(type));
-+ }
-+#ifdef __DEBUG_PLUGIN
-+ fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type));
-+#ifdef __DEBUG_VERBOSE
-+ debug_tree(type);
-+#endif
-+#endif
-+}
-+
-+static void randomize_layout_finish_decl(void *event_data, void *data)
-+{
-+ tree decl = (tree)event_data;
-+ tree type;
-+
-+ if (decl == NULL_TREE || decl == error_mark_node)
-+ return;
-+
-+ type = TREE_TYPE(decl);
-+
-+ if (TREE_CODE(decl) != VAR_DECL)
-+ return;
-+
-+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
-+ return;
-+
-+ if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)))
-+ return;
-+
-+ relayout_decl(decl);
-+}
-+
-+static void finish_type(void *event_data, void *data)
-+{
-+ tree type = (tree)event_data;
-+
-+ if (type == NULL_TREE || type == error_mark_node)
-+ return;
-+
-+ if (TREE_CODE(type) != RECORD_TYPE)
-+ return;
-+
-+ if (TYPE_FIELDS(type) == NULL_TREE)
-+ return;
-+
-+ if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
-+ return;
-+
-+#ifdef __DEBUG_PLUGIN
-+ fprintf(stderr, "Calling randomize_type on %s\n", ORIG_TYPE_NAME(type));
-+#endif
-+#ifdef __DEBUG_VERBOSE
-+ debug_tree(type);
-+#endif
-+ randomize_type(type);
-+
-+ return;
-+}
-+
-+static struct attribute_spec randomize_layout_attr = {
-+ .name = "randomize_layout",
-+ // related to args
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ // need type declaration
-+ .type_required = true,
-+ .function_type_required = false,
-+ .handler = handle_randomize_layout_attr,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = true
-+#endif
-+};
-+
-+static struct attribute_spec no_randomize_layout_attr = {
-+ .name = "no_randomize_layout",
-+ // related to args
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ // need type declaration
-+ .type_required = true,
-+ .function_type_required = false,
-+ .handler = handle_randomize_layout_attr,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = true
-+#endif
-+};
-+
-+static struct attribute_spec randomize_considered_attr = {
-+ .name = "randomize_considered",
-+ // related to args
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ // need type declaration
-+ .type_required = true,
-+ .function_type_required = false,
-+ .handler = handle_randomize_considered_attr,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = false
-+#endif
-+};
-+
-+static struct attribute_spec randomize_performed_attr = {
-+ .name = "randomize_performed",
-+ // related to args
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ // need type declaration
-+ .type_required = true,
-+ .function_type_required = false,
-+ .handler = handle_randomize_performed_attr,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = false
-+#endif
-+};
-+
-+static void register_attributes(void *event_data, void *data)
-+{
-+ register_attribute(&randomize_layout_attr);
-+ register_attribute(&no_randomize_layout_attr);
-+ register_attribute(&randomize_considered_attr);
-+ register_attribute(&randomize_performed_attr);
-+}
-+
-+static void check_bad_casts_in_constructor(tree var, tree init)
-+{
-+ unsigned HOST_WIDE_INT idx;
-+ tree field, val;
-+ tree field_type, val_type;
-+
-+ FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(init), idx, field, val) {
-+ if (TREE_CODE(val) == CONSTRUCTOR) {
-+ check_bad_casts_in_constructor(var, val);
-+ continue;
-+ }
-+
-+ /* pipacs' plugin creates franken-arrays that differ from those produced by
-+ normal code which all have valid 'field' trees. work around this */
-+ if (field == NULL_TREE)
-+ continue;
-+ field_type = TREE_TYPE(field);
-+ val_type = TREE_TYPE(val);
-+
-+ if (TREE_CODE(field_type) != POINTER_TYPE || TREE_CODE(val_type) != POINTER_TYPE)
-+ continue;
-+
-+ if (field_type == val_type)
-+ continue;
-+
-+ field_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(field_type))));
-+ val_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(val_type))));
-+
-+ if (field_type == void_type_node)
-+ continue;
-+ if (field_type == val_type)
-+ continue;
-+ if (TREE_CODE(val_type) != RECORD_TYPE)
-+ continue;
-+
-+ if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(val_type)))
-+ continue;
-+ inform(DECL_SOURCE_LOCATION(var), "found mismatched struct pointer types: %qT and %qT\n", TYPE_MAIN_VARIANT(field_type), TYPE_MAIN_VARIANT(val_type));
-+ }
-+}
-+
-+/* derived from the constify plugin */
-+static void check_global_variables(void *event_data, void *data)
-+{
-+ struct varpool_node *node;
-+ tree init;
-+
-+ FOR_EACH_VARIABLE(node) {
-+ tree var = NODE_DECL(node);
-+ init = DECL_INITIAL(var);
-+ if (init == NULL_TREE)
-+ continue;
-+
-+ if (TREE_CODE(init) != CONSTRUCTOR)
-+ continue;
-+
-+ check_bad_casts_in_constructor(var, init);
-+ }
-+}
-+
-+static bool dominated_by_is_err(const_tree rhs, basic_block bb)
-+{
-+ basic_block dom;
-+ gimple dom_stmt;
-+ gimple call_stmt;
-+ const_tree dom_lhs;
-+ const_tree poss_is_err_cond;
-+ const_tree poss_is_err_func;
-+ const_tree is_err_arg;
-+
-+ dom = get_immediate_dominator(CDI_DOMINATORS, bb);
-+ if (!dom)
-+ return false;
-+
-+ dom_stmt = last_stmt(dom);
-+ if (!dom_stmt)
-+ return false;
-+
-+ if (gimple_code(dom_stmt) != GIMPLE_COND)
-+ return false;
-+
-+ if (gimple_cond_code(dom_stmt) != NE_EXPR)
-+ return false;
-+
-+ if (!integer_zerop(gimple_cond_rhs(dom_stmt)))
-+ return false;
-+
-+ poss_is_err_cond = gimple_cond_lhs(dom_stmt);
-+
-+ if (TREE_CODE(poss_is_err_cond) != SSA_NAME)
-+ return false;
-+
-+ call_stmt = SSA_NAME_DEF_STMT(poss_is_err_cond);
-+
-+ if (gimple_code(call_stmt) != GIMPLE_CALL)
-+ return false;
-+
-+ dom_lhs = gimple_get_lhs(call_stmt);
-+ poss_is_err_func = gimple_call_fndecl(call_stmt);
-+ if (!poss_is_err_func)
-+ return false;
-+ if (dom_lhs != poss_is_err_cond)
-+ return false;
-+ if (strcmp(DECL_NAME_POINTER(poss_is_err_func), "IS_ERR"))
-+ return false;
-+
-+ is_err_arg = gimple_call_arg(call_stmt, 0);
-+ if (!is_err_arg)
-+ return false;
-+
-+ if (is_err_arg != rhs)
-+ return false;
-+
-+ return true;
-+}
-+
-+static void handle_local_var_initializers(void)
-+{
-+ tree var;
-+ unsigned int i;
-+
-+ FOR_EACH_LOCAL_DECL(cfun, i, var) {
-+ tree init = DECL_INITIAL(var);
-+ if (!init)
-+ continue;
-+ if (TREE_CODE(init) != CONSTRUCTOR)
-+ continue;
-+ check_bad_casts_in_constructor(var, init);
-+ }
-+}
-+
-+/*
-+ * iterate over all statements to find "bad" casts:
-+ * those where the address of the start of a structure is cast
-+ * to a pointer of a structure of a different type, or a
-+ * structure pointer type is cast to a different structure pointer type
-+ */
-+static unsigned int find_bad_casts(void)
-+{
-+ basic_block bb;
-+
-+ handle_local_var_initializers();
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt;
-+ const_tree lhs;
-+ const_tree lhs_type;
-+ const_tree rhs1;
-+ const_tree rhs_type;
-+ const_tree ptr_lhs_type;
-+ const_tree ptr_rhs_type;
-+ const_tree op0;
-+ const_tree op0_type;
-+ enum tree_code rhs_code;
-+
-+ stmt = gsi_stmt(gsi);
-+
-+#ifdef __DEBUG_PLUGIN
-+#ifdef __DEBUG_VERBOSE
-+ debug_gimple_stmt(stmt);
-+ debug_tree(gimple_get_lhs(stmt));
-+#endif
-+#endif
-+
-+ if (gimple_code(stmt) != GIMPLE_ASSIGN)
-+ continue;
-+
-+#ifdef __DEBUG_PLUGIN
-+#ifdef __DEBUG_VERBOSE
-+ debug_tree(gimple_assign_rhs1(stmt));
-+#endif
-+#endif
-+
-+ rhs_code = gimple_assign_rhs_code(stmt);
-+
-+ if (rhs_code != ADDR_EXPR && rhs_code != SSA_NAME)
-+ continue;
-+
-+ lhs = gimple_get_lhs(stmt);
-+ lhs_type = TREE_TYPE(lhs);
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ rhs_type = TREE_TYPE(rhs1);
-+
-+ if (TREE_CODE(rhs_type) != POINTER_TYPE ||
-+ TREE_CODE(lhs_type) != POINTER_TYPE)
-+ continue;
-+
-+ ptr_lhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(lhs_type))));
-+ ptr_rhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(rhs_type))));
-+
-+ if (ptr_rhs_type == void_type_node)
-+ continue;
-+
-+ if (ptr_lhs_type == void_type_node)
-+ continue;
-+
-+ if (dominated_by_is_err(rhs1, bb))
-+ continue;
-+
-+ if (TREE_CODE(ptr_rhs_type) != RECORD_TYPE) {
-+#ifndef __DEBUG_PLUGIN
-+ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type)))
-+#endif
-+ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type);
-+ continue;
-+ }
-+
-+ if (rhs_code == SSA_NAME && ptr_lhs_type == ptr_rhs_type)
-+ continue;
-+
-+ if (rhs_code == ADDR_EXPR) {
-+ op0 = TREE_OPERAND(rhs1, 0);
-+
-+ if (op0 == NULL_TREE)
-+ continue;
-+
-+ if (TREE_CODE(op0) != VAR_DECL)
-+ continue;
-+
-+ op0_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(op0))));
-+ if (op0_type == ptr_lhs_type)
-+ continue;
-+
-+#ifndef __DEBUG_PLUGIN
-+ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type)))
-+#endif
-+ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, op0_type);
-+ } else {
-+ const_tree ssa_name_var = SSA_NAME_VAR(rhs1);
-+ /* skip bogus type casts introduced by container_of */
-+ if (ssa_name_var != NULL_TREE && DECL_NAME(ssa_name_var) &&
-+ !strcmp((const char *)DECL_NAME_POINTER(ssa_name_var), "__mptr"))
-+ continue;
-+#ifndef __DEBUG_PLUGIN
-+ if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type)))
-+#endif
-+ inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n", ptr_lhs_type, ptr_rhs_type);
-+ }
-+
-+ }
-+ }
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data randomize_layout_bad_cast_data = {
-+#else
-+static struct gimple_opt_pass randomize_layout_bad_cast = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "randomize_layout_bad_cast",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = find_bad_casts,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class randomize_layout_bad_cast : public gimple_opt_pass {
-+public:
-+ randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return find_bad_casts(); }
-+#else
-+ unsigned int execute() { return find_bad_casts(); }
-+#endif
-+};
-+}
-+#endif
-+
-+static struct opt_pass *make_randomize_layout_bad_cast(void)
-+{
-+#if BUILDING_GCC_VERSION >= 4009
-+ return new randomize_layout_bad_cast();
-+#else
-+ return &randomize_layout_bad_cast.pass;
-+#endif
-+}
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ int i;
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ bool enable = true;
-+ int obtained_seed = 0;
-+ struct register_pass_info randomize_layout_bad_cast_info;
-+
-+ randomize_layout_bad_cast_info.pass = make_randomize_layout_bad_cast();
-+ randomize_layout_bad_cast_info.reference_pass_name = "ssa";
-+ randomize_layout_bad_cast_info.ref_pass_instance_number = 1;
-+ randomize_layout_bad_cast_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ if (strncmp(lang_hooks.name, "GNU C", 5) && !strncmp(lang_hooks.name, "GNU C+", 6)) {
-+ inform(UNKNOWN_LOCATION, G_("%s supports C only, not %s"), plugin_name, lang_hooks.name);
-+ enable = false;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "disable")) {
-+ enable = false;
-+ continue;
-+ }
-+ if (!strcmp(argv[i].key, "performance-mode")) {
-+ performance_mode = 1;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ if (strlen(randstruct_seed) != 64) {
-+ error(G_("invalid seed value supplied for %s plugin"), plugin_name);
-+ return 1;
-+ }
-+ obtained_seed = sscanf(randstruct_seed, "%016llx%016llx%016llx%016llx",
-+ &shuffle_seed[0], &shuffle_seed[1], &shuffle_seed[2], &shuffle_seed[3]);
-+ if (obtained_seed != 4) {
-+ error(G_("Invalid seed supplied for %s plugin"), plugin_name);
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &randomize_layout_plugin_info);
-+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info);
-+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
-+ register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl, NULL);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/size_overflow_plugin/.gitignore b/tools/gcc/size_overflow_plugin/.gitignore
-new file mode 100644
-index 0000000..92d3b0c
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/.gitignore
-@@ -0,0 +1,2 @@
-+size_overflow_hash.h
-+size_overflow_hash_aux.h
-diff --git a/tools/gcc/size_overflow_plugin/Makefile b/tools/gcc/size_overflow_plugin/Makefile
-new file mode 100644
-index 0000000..1ae2ed5
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/Makefile
-@@ -0,0 +1,20 @@
-+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
-+always := $($(HOSTLIBS)-y)
-+
-+size_overflow_plugin-objs := $(patsubst $(srctree)/$(src)/%.c,%.o,$(wildcard $(srctree)/$(src)/*.c))
-+
-+$(patsubst $(srctree)/$(src)/%.c,$(obj)/%.o,$(wildcard $(srctree)/$(src)/*.c)): $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
-+
-+quiet_cmd_build_size_overflow_hash = GENHASH $@
-+ cmd_build_size_overflow_hash = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
-+ $(call if_changed,build_size_overflow_hash)
-+
-+quiet_cmd_build_size_overflow_hash_aux = GENHASH $@
-+ cmd_build_size_overflow_hash_aux = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
-+ $(call if_changed,build_size_overflow_hash_aux)
-+
-+targets += size_overflow_hash.h size_overflow_hash_aux.h
-diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
-new file mode 100644
-index 0000000..12b1e3b
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
-@@ -0,0 +1,102 @@
-+#!/bin/bash
-+
-+# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
-+
-+header1="size_overflow_hash.h"
-+database="size_overflow_hash.data"
-+n=65536
-+hashtable_name="size_overflow_hash"
-+
-+usage() {
-+cat <<EOF
-+usage: $0 options
-+OPTIONS:
-+ -h|--help help
-+ -o header file
-+ -d database file
-+ -n hash array size
-+ -s name of the hash table
-+EOF
-+ return 0
-+}
-+
-+while true
-+do
-+ case "$1" in
-+ -h|--help) usage && exit 0;;
-+ -n) n=$2; shift 2;;
-+ -o) header1="$2"; shift 2;;
-+ -d) database="$2"; shift 2;;
-+ -s) hashtable_name="$2"; shift 2;;
-+ --) shift 1; break ;;
-+ *) break ;;
-+ esac
-+done
-+
-+create_defines() {
-+ for i in `seq 0 31`
-+ do
-+ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
-+ done
-+ echo >> "$header1"
-+}
-+
-+create_structs() {
-+ rm -f "$header1"
-+
-+ create_defines
-+
-+ cat "$database" | while read data
-+ do
-+ data_array=($data)
-+ struct_hash_name="${data_array[0]}"
-+ funcn="${data_array[1]}"
-+ params="${data_array[2]}"
-+ next="${data_array[4]}"
-+
-+ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
-+
-+ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
-+ echo -en "\t.param\t= " >> "$header1"
-+ line=
-+ for param_num in ${params//-/ };
-+ do
-+ line="${line}PARAM"$param_num"|"
-+ done
-+
-+ echo -e "${line%?},\n};\n" >> "$header1"
-+ done
-+}
-+
-+create_headers() {
-+ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
-+}
-+
-+create_array_elements() {
-+ index=0
-+ grep -v "nohasharray" $database | sort -n -k 4 | while read data
-+ do
-+ data_array=($data)
-+ i="${data_array[3]}"
-+ hash="${data_array[0]}"
-+ while [[ $index -lt $i ]]
-+ do
-+ echo -e "\t["$index"]\t= NULL," >> "$header1"
-+ index=$(($index + 1))
-+ done
-+ index=$(($index + 1))
-+ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
-+ done
-+ echo '};' >> $header1
-+}
-+
-+size_overflow_plugin_dir=`dirname $header1`
-+if [ "$size_overflow_plugin_dir" != '.' ]; then
-+ mkdir -p "$size_overflow_plugin_dir" 2> /dev/null
-+fi
-+
-+create_structs
-+create_headers
-+create_array_elements
-+
-+exit 0
-diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
-new file mode 100644
-index 0000000..495983ff
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
-@@ -0,0 +1,762 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+static void search_size_overflow_attribute(gimple_set *visited, tree lhs);
-+static enum mark search_intentional(gimple_set *visited, const_tree lhs);
-+
-+// data for the size_overflow asm stmt
-+struct asm_data {
-+ gimple def_stmt;
-+ tree input;
-+ tree output;
-+};
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
-+#else
-+static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
-+#endif
-+{
-+ tree list;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *vec_list = NULL;
-+#else
-+ vec<tree, va_gc> *vec_list = NULL;
-+#endif
-+
-+ list = build_tree_list(NULL_TREE, string);
-+ list = chainon(NULL_TREE, build_tree_list(list, io));
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, gc, vec_list, list);
-+#else
-+ vec_safe_push(vec_list, list);
-+#endif
-+ return vec_list;
-+}
-+
-+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
-+{
-+ gasm *asm_stmt;
-+ gimple_stmt_iterator gsi;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *input, *output = NULL;
-+#else
-+ vec<tree, va_gc> *input, *output = NULL;
-+#endif
-+
-+ input = create_asm_io_list(str_input, asm_data->input);
-+
-+ if (asm_data->output)
-+ output = create_asm_io_list(str_output, asm_data->output);
-+
-+ asm_stmt = as_a_gasm(gimple_build_asm_vec(str, input, output, NULL, NULL));
-+ gsi = gsi_for_stmt(asm_data->def_stmt);
-+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
-+
-+ if (asm_data->output)
-+ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
-+}
-+
-+static void replace_call_lhs(const struct asm_data *asm_data)
-+{
-+ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
-+ update_stmt(asm_data->def_stmt);
-+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
-+}
-+
-+static enum mark search_intentional_phi(gimple_set *visited, const_tree result)
-+{
-+ enum mark cur_fndecl_attr;
-+ gphi *phi = as_a_gphi(get_def_stmt(result));
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, (gimple)phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ }
-+ return MARK_NO;
-+}
-+
-+static enum mark search_intentional_binary(gimple_set *visited, const_tree lhs)
-+{
-+ enum mark cur_fndecl_attr;
-+ const_tree rhs1, rhs2;
-+ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ cur_fndecl_attr = search_intentional(visited, rhs1);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ return search_intentional(visited, rhs2);
-+}
-+
-+// Look up the intentional_overflow attribute on the caller and the callee functions.
-+static enum mark search_intentional(gimple_set *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME)
-+ return get_intentional_attr_type(lhs);
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return MARK_NO;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return MARK_NO;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_intentional(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(def_stmt)))
-+ return MARK_TURN_OFF;
-+ return MARK_NO;
-+ case GIMPLE_CALL:
-+ return MARK_NO;
-+ case GIMPLE_PHI:
-+ return search_intentional_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_intentional(visited, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
-+ case 3:
-+ return search_intentional_binary(visited, lhs);
-+ }
-+ case GIMPLE_RETURN:
-+ return MARK_NO;
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
-+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+ gimple_set *visited;
-+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
-+
-+ fndecl = get_interesting_orig_fndecl(stmt, argnum);
-+ if (is_end_intentional_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_YES;
-+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ return MARK_TURN_OFF;
-+ }
-+
-+ visited = pointer_set_create();
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ pointer_set_destroy(visited);
-+
-+ switch (cur_fndecl_attr) {
-+ case MARK_NO:
-+ case MARK_TURN_OFF:
-+ return cur_fndecl_attr;
-+ default:
-+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
-+ return MARK_YES;
-+ }
-+}
-+
-+static void check_missing_size_overflow_attribute(tree var)
-+{
-+ tree orig_fndecl;
-+ unsigned int num;
-+
-+ if (is_a_return_check(var))
-+ orig_fndecl = DECL_ORIGIN(var);
-+ else
-+ orig_fndecl = DECL_ORIGIN(current_function_decl);
-+
-+ num = get_function_num(var, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return;
-+
-+ is_missing_function(orig_fndecl, num);
-+}
-+
-+static void search_size_overflow_attribute_phi(gimple_set *visited, const_tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ search_size_overflow_attribute(visited, arg);
-+ }
-+}
-+
-+static void search_size_overflow_attribute_binary(gimple_set *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt = get_def_stmt(lhs);
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ search_size_overflow_attribute(visited, rhs1);
-+ search_size_overflow_attribute(visited, rhs2);
-+}
-+
-+static void search_size_overflow_attribute(gimple_set *visited, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) == PARM_DECL) {
-+ check_missing_size_overflow_attribute(lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ check_missing_size_overflow_attribute(fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return search_size_overflow_attribute_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return search_size_overflow_attribute_binary(visited, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Search missing entries in the hash table (invoked from the gimple pass)
-+static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
-+{
-+ tree fndecl = NULL_TREE;
-+ tree lhs;
-+ gimple_set *visited;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
-+ return;
-+
-+ if (num == 0) {
-+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
-+ lhs = gimple_return_retval(as_a_const_greturn(stmt));
-+ } else {
-+ const gcall *call = as_a_const_gcall(stmt);
-+
-+ gcc_assert(is_gimple_call(call));
-+ lhs = gimple_call_arg(call, num - 1);
-+ fndecl = gimple_call_fndecl(call);
-+ }
-+
-+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
-+ return;
-+
-+ visited = pointer_set_create();
-+ search_size_overflow_attribute(visited, lhs);
-+ pointer_set_destroy(visited);
-+}
-+
-+static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+
-+ assign = gimple_build_assign(asm_data->input, asm_data->output);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ asm_data->def_stmt = assign;
-+
-+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->output = make_ssa_name(asm_data->output, stmt);
-+ if (gimple_code(stmt) == GIMPLE_RETURN)
-+ gimple_return_set_retval(as_a_greturn(stmt), asm_data->output);
-+ else
-+ gimple_call_set_arg(as_a_gcall(stmt), argnum - 1, asm_data->output);
-+ update_stmt(stmt);
-+}
-+
-+static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
-+{
-+ const char *fn_name;
-+ char *asm_comment;
-+ unsigned int len;
-+
-+ if (argnum == 0)
-+ fn_name = DECL_NAME_POINTER(current_function_decl);
-+ else
-+ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
-+
-+ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
-+ gcc_assert(len > 0);
-+
-+ return asm_comment;
-+}
-+
-+static const char *convert_mark_to_str(enum mark mark)
-+{
-+ switch (mark) {
-+ case MARK_NO:
-+ return OK_ASM_STR;
-+ case MARK_YES:
-+ case MARK_NOT_INTENTIONAL:
-+ return YES_ASM_STR;
-+ case MARK_TURN_OFF:
-+ return TURN_OFF_ASM_STR;
-+ }
-+
-+ gcc_unreachable();
-+}
-+
-+/* Create the input of the size_overflow asm stmt.
-+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
-+ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
-+ */
-+static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ if (!asm_data->def_stmt) {
-+ asm_data->input = NULL_TREE;
-+ return;
-+ }
-+
-+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
-+
-+ switch (gimple_code(asm_data->def_stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ replace_call_lhs(asm_data);
-+ break;
-+ case GIMPLE_PHI:
-+ create_output_from_phi(stmt, argnum, asm_data);
-+ break;
-+ case GIMPLE_NOP: {
-+ enum mark mark;
-+ const char *mark_str;
-+ char *asm_comment;
-+
-+ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
-+
-+ asm_data->input = asm_data->output;
-+ asm_data->output = NULL;
-+ asm_data->def_stmt = stmt;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+
-+ create_asm_stmt(asm_comment, build_string(3, "rm"), NULL, asm_data);
-+ free(asm_comment);
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_asm(as_a_const_gasm(asm_data->def_stmt))) {
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ default:
-+ debug_gimple_stmt(asm_data->def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
-+ * is of the right kind create the appropriate size_overflow asm stmts:
-+ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ */
-+static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
-+{
-+ struct asm_data asm_data;
-+ const char *mark_str;
-+ char *asm_comment;
-+ enum mark mark;
-+
-+ if (is_gimple_constant(output_node))
-+ return;
-+
-+ asm_data.output = output_node;
-+ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
-+ if (mark != MARK_TURN_OFF)
-+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
-+
-+ asm_data.def_stmt = get_def_stmt(asm_data.output);
-+ if (gimple_code(asm_data.def_stmt) == GIMPLE_ASM && is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(asm_data.def_stmt)))
-+ return;
-+
-+ create_asm_input(stmt, argnum, &asm_data);
-+ if (asm_data.input == NULL_TREE)
-+ return;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+ create_asm_stmt(asm_comment, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
-+ free(asm_comment);
-+}
-+
-+// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
-+static bool create_mark_asm(gimple stmt, enum mark mark)
-+{
-+ struct asm_data asm_data;
-+ const char *asm_str;
-+
-+ switch (mark) {
-+ case MARK_TURN_OFF:
-+ asm_str = TURN_OFF_ASM_STR;
-+ break;
-+ case MARK_NOT_INTENTIONAL:
-+ case MARK_YES:
-+ asm_str = YES_ASM_STR;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ asm_data.def_stmt = stmt;
-+ asm_data.output = gimple_call_lhs(stmt);
-+
-+ if (asm_data.output == NULL_TREE) {
-+ asm_data.input = gimple_call_arg(stmt, 0);
-+ if (is_gimple_constant(asm_data.input))
-+ return false;
-+ asm_data.output = NULL;
-+ create_asm_stmt(asm_str, build_string(3, "rm"), NULL, &asm_data);
-+ return true;
-+ }
-+
-+ create_asm_input(stmt, 0, &asm_data);
-+ gcc_assert(asm_data.input != NULL_TREE);
-+
-+ create_asm_stmt(asm_str, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
-+ return true;
-+}
-+
-+static void walk_use_def_ptr(gimple_set *visited, const_tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_CALL:
-+ break;
-+ case GIMPLE_PHI: {
-+ gphi *phi = as_a_gphi(def_stmt);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, def_stmt);
-+
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ walk_use_def_ptr(visited, arg);
-+ }
-+ break;
-+ }
-+ case GIMPLE_ASSIGN: {
-+ gassign *assign = as_a_gassign(def_stmt);
-+
-+ switch (gimple_num_ops(assign)) {
-+ case 2:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
-+ return;
-+ case 3:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
-+ walk_use_def_ptr(visited, gimple_assign_rhs2(assign));
-+ return;
-+ default:
-+ return;
-+ }
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
-+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
-+{
-+ gimple_set *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def_ptr(visited, arg);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Determine the return value and insert the asm stmt to mark the return stmt.
-+static void insert_asm_ret(greturn *stmt)
-+{
-+ tree ret;
-+
-+ ret = gimple_return_retval(stmt);
-+ create_size_overflow_asm(stmt, ret, 0);
-+}
-+
-+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
-+static void insert_asm_arg(gcall *stmt, unsigned int orig_argnum)
-+{
-+ tree arg;
-+ unsigned int argnum;
-+
-+ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
-+ gcc_assert(argnum != 0);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ gcc_assert(arg != NULL_TREE);
-+
-+ // skip all ptr - ptr expressions
-+ insert_mark_not_intentional_asm_at_ptr(arg);
-+
-+ create_size_overflow_asm(stmt, arg, argnum);
-+}
-+
-+// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
-+static void set_argnum_attribute(const_tree attr, bool *argnums)
-+{
-+ unsigned int argnum;
-+ tree attr_value;
-+
-+ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
-+ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
-+ argnums[argnum] = true;
-+ }
-+}
-+
-+// If a function arg or the return value is in the hash table then set its index in the array.
-+static void set_argnum_hash(tree fndecl, bool *argnums)
-+{
-+ unsigned int num;
-+ const struct size_overflow_hash *hash;
-+
-+ hash = get_function_hash(DECL_ORIGIN(fndecl));
-+ if (!hash)
-+ return;
-+
-+ for (num = 0; num <= MAX_PARAM; num++) {
-+ if (!(hash->param & (1U << num)))
-+ continue;
-+
-+ argnums[num] = true;
-+ }
-+}
-+
-+static bool is_all_the_argnums_empty(bool *argnums)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i <= MAX_PARAM; i++)
-+ if (argnums[i])
-+ return false;
-+ return true;
-+}
-+
-+// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
-+static void search_interesting_args(tree fndecl, bool *argnums)
-+{
-+ const_tree attr;
-+
-+ set_argnum_hash(fndecl, argnums);
-+ if (!is_all_the_argnums_empty(argnums))
-+ return;
-+
-+ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
-+ if (attr && TREE_VALUE(attr))
-+ set_argnum_attribute(attr, argnums);
-+}
-+
-+/*
-+ * Look up the intentional_overflow attribute that turns off ipa based duplication
-+ * on the callee function.
-+ */
-+static bool is_mark_turn_off_attribute(gcall *stmt)
-+{
-+ enum mark mark;
-+ const_tree fndecl = gimple_call_fndecl(stmt);
-+
-+ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
-+ if (mark == MARK_TURN_OFF)
-+ return true;
-+ return false;
-+}
-+
-+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
-+static void handle_interesting_function(gcall *stmt)
-+{
-+ unsigned int argnum;
-+ tree fndecl;
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ if (gimple_call_num_args(stmt) == 0)
-+ return;
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return;
-+ fndecl = DECL_ORIGIN(fndecl);
-+
-+ if (is_mark_turn_off_attribute(stmt)) {
-+ create_mark_asm(stmt, MARK_TURN_OFF);
-+ return;
-+ }
-+
-+ search_interesting_args(fndecl, orig_argnums);
-+
-+ for (argnum = 1; argnum < MAX_PARAM; argnum++)
-+ if (orig_argnums[argnum])
-+ insert_asm_arg(stmt, argnum);
-+}
-+
-+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
-+static void handle_interesting_ret(greturn *stmt)
-+{
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ search_interesting_args(current_function_decl, orig_argnums);
-+
-+ if (orig_argnums[0])
-+ insert_asm_ret(stmt);
-+}
-+
-+// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
-+static unsigned int search_interesting_functions(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ if (gimple_code(stmt) == GIMPLE_ASM && is_size_overflow_asm(as_a_const_gasm(stmt)))
-+ continue;
-+
-+ if (is_gimple_call(stmt))
-+ handle_interesting_function(as_a_gcall(stmt));
-+ else if (gimple_code(stmt) == GIMPLE_RETURN)
-+ handle_interesting_ret(as_a_greturn(stmt));
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
-+ * this pass inserts asm stmts to mark the interesting args
-+ * that the ipa pass will detect and insert the size overflow checks for.
-+ */
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data insert_size_overflow_asm_pass_data = {
-+#else
-+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "insert_size_overflow_asm",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_interesting_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class insert_size_overflow_asm_pass : public gimple_opt_pass {
-+public:
-+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return search_interesting_functions(); }
-+#else
-+ unsigned int execute() { return search_interesting_functions(); }
-+#endif
-+};
-+}
-+
-+opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return new insert_size_overflow_asm_pass();
-+}
-+#else
-+struct opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return &insert_size_overflow_asm_pass.pass;
-+}
-+#endif
-diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
-new file mode 100644
-index 0000000..0766e39
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
-@@ -0,0 +1,931 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+#define MIN_CHECK true
-+#define MAX_CHECK false
-+
-+static tree get_size_overflow_type(struct visited *visited, const_gimple stmt, const_tree node)
-+{
-+ const_tree type;
-+ tree new_type;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ type = TREE_TYPE(node);
-+
-+ if (pointer_set_contains(visited->my_stmts, stmt))
-+ return TREE_TYPE(node);
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = size_overflow_type_HI;
-+ break;
-+ case HImode:
-+ new_type = size_overflow_type_SI;
-+ break;
-+ case SImode:
-+ new_type = size_overflow_type_DI;
-+ break;
-+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
-+ else
-+ new_type = size_overflow_type_TI;
-+ break;
-+ case TImode:
-+ gcc_assert(!TYPE_UNSIGNED(type));
-+ new_type = size_overflow_type_TI;
-+ break;
-+ default:
-+ debug_tree((tree)node);
-+ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
-+ gcc_unreachable();
-+ }
-+
-+ if (TYPE_QUALS(type) != 0)
-+ return build_qualified_type(new_type, TYPE_QUALS(type));
-+ return new_type;
-+}
-+
-+static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before)
-+{
-+ gimple_stmt_iterator gsi;
-+ tree lhs;
-+ gimple new_stmt;
-+
-+ if (rhs == NULL_TREE)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ new_stmt = build_cast_stmt(visited, size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
-+ pointer_set_insert(visited->my_stmts, new_stmt);
-+
-+ lhs = get_lhs(new_stmt);
-+ gcc_assert(lhs != NULL_TREE);
-+ return lhs;
-+}
-+
-+tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before)
-+{
-+ tree lhs, dst_type;
-+ gimple_stmt_iterator gsi;
-+
-+ if (rhs1 == NULL_TREE) {
-+ debug_gimple_stmt(oldstmt);
-+ error("%s: rhs1 is NULL_TREE", __func__);
-+ gcc_unreachable();
-+ }
-+
-+ switch (gimple_code(oldstmt)) {
-+ case GIMPLE_ASM:
-+ lhs = rhs1;
-+ break;
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASSIGN:
-+ lhs = gimple_get_lhs(oldstmt);
-+ break;
-+ default:
-+ debug_gimple_stmt(oldstmt);
-+ gcc_unreachable();
-+ }
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ pointer_set_insert(visited->stmts, oldstmt);
-+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
-+ basic_block next_bb, cur_bb;
-+ const_edge e;
-+
-+ gcc_assert(before == false);
-+ gcc_assert(stmt_can_throw_internal(oldstmt));
-+ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ cur_bb = gimple_bb(oldstmt);
-+ next_bb = cur_bb->next_bb;
-+ e = find_edge(cur_bb, next_bb);
-+ gcc_assert(e != NULL);
-+ gcc_assert(e->flags & EDGE_FALLTHRU);
-+
-+ gsi = gsi_after_labels(next_bb);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ before = true;
-+ oldstmt = gsi_stmt(gsi);
-+ }
-+
-+ dst_type = get_size_overflow_type(visited, oldstmt, lhs);
-+
-+ if (is_gimple_constant(rhs1))
-+ return cast_a_tree(dst_type, rhs1);
-+ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before);
-+}
-+
-+tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
-+{
-+ gimple stmt;
-+ gimple_stmt_iterator gsi;
-+ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
-+
-+ if (pointer_set_contains(visited->my_stmts, oldstmt))
-+ return lhs;
-+
-+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
-+ rhs1 = gimple_assign_rhs1(oldstmt);
-+ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
-+ }
-+ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
-+ rhs2 = gimple_assign_rhs2(oldstmt);
-+ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
-+ }
-+
-+ stmt = gimple_copy(oldstmt);
-+ gimple_set_location(stmt, gimple_location(oldstmt));
-+ pointer_set_insert(visited->my_stmts, stmt);
-+
-+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
-+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
-+
-+ size_overflow_type = get_size_overflow_type(visited, oldstmt, node);
-+
-+ new_var = create_new_var(size_overflow_type);
-+ new_var = make_ssa_name(new_var, stmt);
-+ gimple_assign_set_lhs(stmt, new_var);
-+
-+ if (rhs1 != NULL_TREE)
-+ gimple_assign_set_rhs1(stmt, rhs1);
-+
-+ if (rhs2 != NULL_TREE)
-+ gimple_assign_set_rhs2(stmt, rhs2);
-+#if BUILDING_GCC_VERSION >= 4006
-+ if (rhs3 != NULL_TREE)
-+ gimple_assign_set_rhs3(stmt, rhs3);
-+#endif
-+ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
-+ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited->stmts, oldstmt);
-+ return gimple_assign_lhs(stmt);
-+}
-+
-+static tree cast_parm_decl(struct visited *visited, tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+ basic_block first_bb;
-+
-+ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
-+
-+ if (bb->index == 0) {
-+ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+ bb = first_bb;
-+ }
-+
-+ gsi = gsi_after_labels(bb);
-+ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, assign);
-+
-+ return get_lhs(assign);
-+}
-+
-+static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+ gimple def_stmt = get_def_stmt(new_arg);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI) {
-+ gsi = gsi_after_labels(gimple_bb(def_stmt));
-+ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
-+ } else {
-+ gsi = gsi_for_stmt(def_stmt);
-+ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
-+ }
-+
-+ pointer_set_insert(visited->my_stmts, assign);
-+ return get_lhs(assign);
-+}
-+
-+static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type)
-+{
-+ basic_block bb;
-+ gimple_stmt_iterator gsi;
-+ const_gimple def_stmt;
-+ gimple assign;
-+
-+ def_stmt = get_def_stmt(arg);
-+ bb = gimple_bb(def_stmt);
-+ gcc_assert(bb->index != 0);
-+ gsi = gsi_after_labels(bb);
-+
-+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, assign);
-+ return get_lhs(assign);
-+}
-+
-+static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gphi *oldstmt, unsigned int i)
-+{
-+ tree size_overflow_type, arg;
-+ const_gimple def_stmt;
-+
-+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
-+ return new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ def_stmt = get_def_stmt(arg);
-+ gcc_assert(def_stmt != NULL);
-+ size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return cast_visited_phi_arg(visited, ssa_name_var, arg, size_overflow_type);
-+ case GIMPLE_NOP: {
-+ basic_block bb;
-+
-+ bb = gimple_phi_arg_edge(as_a_gphi(oldstmt), i)->src;
-+ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb);
-+ }
-+ case GIMPLE_ASM: {
-+ gimple_stmt_iterator gsi;
-+ gimple assign, stmt = get_def_stmt(arg);
-+
-+ gsi = gsi_for_stmt(stmt);
-+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
-+ pointer_set_insert(visited->my_stmts, assign);
-+ return get_lhs(assign);
-+ }
-+ default:
-+ gcc_assert(new_arg != NULL_TREE);
-+ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
-+ return use_phi_ssa_name(visited, ssa_name_var, new_arg);
-+ }
-+}
-+
-+static gphi *overflow_create_phi_node(struct visited *visited, gphi *oldstmt, tree result)
-+{
-+ basic_block bb;
-+ gphi *phi;
-+ gimple_seq seq;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
-+
-+ bb = gsi_bb(gsi);
-+
-+ if (result == NULL_TREE) {
-+ tree old_result = gimple_phi_result(oldstmt);
-+ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, old_result);
-+
-+ result = create_new_var(size_overflow_type);
-+ }
-+
-+ phi = as_a_gphi(create_phi_node(result, bb));
-+ gimple_phi_set_result(phi, make_ssa_name(result, phi));
-+ seq = phi_nodes(bb);
-+ gsi = gsi_last(seq);
-+ gsi_remove(&gsi, false);
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
-+ gimple_set_bb(phi, bb);
-+ return phi;
-+}
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gphi *oldstmt)
-+#else
-+static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gphi *oldstmt)
-+#endif
-+{
-+ gphi *new_phi;
-+ unsigned int i;
-+ tree arg, result;
-+ location_t loc = gimple_location(oldstmt);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(!VEC_empty(tree, *args));
-+#else
-+ gcc_assert(!args->is_empty());
-+#endif
-+
-+ new_phi = overflow_create_phi_node(visited, oldstmt, ssa_name_var);
-+ result = gimple_phi_result(new_phi);
-+ ssa_name_var = SSA_NAME_VAR(result);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, *args, i, arg) {
-+#else
-+ FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
-+#endif
-+ arg = create_new_phi_arg(visited, ssa_name_var, arg, oldstmt, i);
-+ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, heap, *args);
-+#else
-+ vec_free(args);
-+#endif
-+ update_stmt(new_phi);
-+ pointer_set_insert(visited->my_stmts, new_phi);
-+ return result;
-+}
-+
-+static tree handle_phi(struct visited *visited, struct cgraph_node *caller_node, tree orig_result)
-+{
-+ tree ssa_name_var = NULL_TREE;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, heap) *args = NULL;
-+#else
-+ vec<tree, va_heap, vl_embed> *args = NULL;
-+#endif
-+ gphi *oldstmt = as_a_gphi(get_def_stmt(orig_result));
-+ unsigned int i, len = gimple_phi_num_args(oldstmt);
-+
-+ pointer_set_insert(visited->stmts, oldstmt);
-+ for (i = 0; i < len; i++) {
-+ tree arg, new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ new_arg = expand(visited, caller_node, arg);
-+
-+ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
-+ ssa_name_var = SSA_NAME_VAR(new_arg);
-+
-+ if (is_gimple_constant(arg)) {
-+ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
-+
-+ new_arg = cast_a_tree(size_overflow_type, arg);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, heap, args, new_arg);
-+#else
-+ vec_safe_push(args, new_arg);
-+#endif
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ return create_new_phi_node(visited, &args, ssa_name_var, oldstmt);
-+#else
-+ return create_new_phi_node(visited, args, ssa_name_var, oldstmt);
-+#endif
-+}
-+
-+static tree create_cast_assign(struct visited *visited, gassign *stmt)
-+{
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+ const_tree rhs1_type = TREE_TYPE(rhs1);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+
-+ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ return create_assign(visited, stmt, rhs1, AFTER_STMT);
-+}
-+
-+static bool skip_lhs_cast_check(const gassign *stmt)
-+{
-+ const_tree rhs = gimple_assign_rhs1(stmt);
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
-+ if (gimple_code(def_stmt) == GIMPLE_ASM)
-+ return true;
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
-+ return true;
-+
-+ return false;
-+}
-+
-+static tree create_string_param(tree string)
-+{
-+ tree i_type, a_type;
-+ const int length = TREE_STRING_LENGTH(string);
-+
-+ gcc_assert(length > 0);
-+
-+ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
-+ a_type = build_array_type(char_type_node, i_type);
-+
-+ TREE_TYPE(string) = a_type;
-+ TREE_CONSTANT(string) = 1;
-+ TREE_READONLY(string) = 1;
-+
-+ return build1(ADDR_EXPR, ptr_type_node, string);
-+}
-+
-+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
-+{
-+ gcond *cond_stmt;
-+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
-+
-+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
-+ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
-+ update_stmt(cond_stmt);
-+}
-+
-+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
-+{
-+ gcall *func_stmt;
-+ const_gimple def_stmt;
-+ const_tree loc_line;
-+ tree loc_file, ssa_name, current_func;
-+ expanded_location xloc;
-+ char *ssa_name_buf;
-+ int len;
-+ struct cgraph_edge *edge;
-+ struct cgraph_node *callee_node;
-+ int frequency;
-+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
-+
-+ def_stmt = get_def_stmt(arg);
-+ xloc = expand_location(gimple_location(def_stmt));
-+
-+ if (!gimple_has_location(def_stmt)) {
-+ xloc = expand_location(gimple_location(stmt));
-+ if (!gimple_has_location(stmt))
-+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
-+ }
-+
-+ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
-+
-+ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
-+ loc_file = create_string_param(loc_file);
-+
-+ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
-+ current_func = create_string_param(current_func);
-+
-+ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
-+ call_count++;
-+ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
-+ gcc_assert(len > 0);
-+ ssa_name = build_string(len + 1, ssa_name_buf);
-+ free(ssa_name_buf);
-+ ssa_name = create_string_param(ssa_name);
-+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+ func_stmt = as_a_gcall(gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name));
-+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
-+
-+ callee_node = cgraph_get_create_node(report_size_overflow_decl);
-+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
-+
-+ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
-+ gcc_assert(edge != NULL);
-+}
-+
-+static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
-+{
-+ basic_block cond_bb, join_bb, bb_true;
-+ edge e;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ cond_bb = gimple_bb(stmt);
-+ if (before)
-+ gsi_prev(&gsi);
-+ if (gsi_end_p(gsi))
-+ e = split_block_after_labels(cond_bb);
-+ else
-+ e = split_block(cond_bb, gsi_stmt(gsi));
-+ cond_bb = e->src;
-+ join_bb = e->dest;
-+ e->flags = EDGE_FALSE_VALUE;
-+ e->probability = REG_BR_PROB_BASE;
-+
-+ bb_true = create_empty_bb(cond_bb);
-+ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
-+ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
-+ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
-+
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
-+ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
-+
-+ if (current_loops != NULL) {
-+ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
-+ add_bb_to_loop(bb_true, cond_bb->loop_father);
-+ }
-+
-+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(caller_node, bb_true, stmt, arg, min);
-+
-+// print_the_code_insertions(stmt);
-+}
-+
-+void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
-+{
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
-+
-+ gcc_assert(rhs_type != NULL_TREE);
-+ if (TREE_CODE(rhs_type) == POINTER_TYPE)
-+ return;
-+
-+ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
-+ return;
-+
-+ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ // typemax (-1) < typemin (0)
-+ if (TREE_OVERFLOW(type_max))
-+ return;
-+
-+ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
-+
-+ cast_rhs_type = TREE_TYPE(cast_rhs);
-+ type_max_type = TREE_TYPE(type_max);
-+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+
-+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
-+
-+ // special case: get_size_overflow_type(), 32, u64->s
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
-+ return;
-+
-+ type_min_type = TREE_TYPE(type_min);
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
-+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
-+}
-+
-+static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gassign *stmt)
-+{
-+ bool cast_lhs, cast_rhs;
-+ tree lhs = gimple_assign_lhs(stmt);
-+ tree rhs = gimple_assign_rhs1(stmt);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
-+ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
-+ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
-+ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
-+
-+ static bool check_lhs[3][4] = {
-+ // ss su us uu
-+ { false, true, true, false }, // lhs > rhs
-+ { false, false, false, false }, // lhs = rhs
-+ { true, true, true, true }, // lhs < rhs
-+ };
-+
-+ static bool check_rhs[3][4] = {
-+ // ss su us uu
-+ { true, false, true, true }, // lhs > rhs
-+ { true, false, true, true }, // lhs = rhs
-+ { true, false, true, true }, // lhs < rhs
-+ };
-+
-+ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
-+ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (lhs_size > rhs_size) {
-+ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else if (lhs_size == rhs_size) {
-+ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else {
-+ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ }
-+
-+ if (!cast_lhs && !cast_rhs)
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ if (cast_lhs && !skip_lhs_cast_check(stmt))
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
-+
-+ if (cast_rhs)
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+}
-+
-+static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
-+{
-+ enum tree_code rhs_code;
-+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
-+
-+ if (pointer_set_contains(visited->my_stmts, stmt))
-+ return lhs;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+
-+ if (new_rhs1 == NULL_TREE)
-+ return create_cast_assign(visited, stmt);
-+
-+ if (pointer_set_contains(visited->no_cast_check, stmt))
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ rhs_code = gimple_assign_rhs_code(stmt);
-+ if (rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) {
-+ tree size_overflow_type = get_size_overflow_type(visited, stmt, rhs1);
-+
-+ new_rhs1 = cast_to_new_size_overflow_type(visited, stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
-+ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+ }
-+
-+ if (!gimple_assign_cast_p(stmt))
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
-+}
-+
-+static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
-+{
-+ tree rhs1, lhs = gimple_assign_lhs(stmt);
-+ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
-+
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+
-+ if (is_gimple_constant(rhs1))
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+
-+ switch (TREE_CODE(rhs1)) {
-+ case SSA_NAME: {
-+ tree ret = handle_unary_rhs(visited, caller_node, def_stmt);
-+
-+ if (gimple_assign_cast_p(stmt))
-+ unsigned_signed_cast_intentional_overflow(visited, stmt);
-+ return ret;
-+ }
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case ADDR_EXPR:
-+ case COMPONENT_REF:
-+ case INDIRECT_REF:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case TARGET_MEM_REF:
-+ case VIEW_CONVERT_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case PARM_DECL:
-+ case VAR_DECL:
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ debug_tree(rhs1);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static void __unused print_the_code_insertions(const_gimple stmt)
-+{
-+ location_t loc = gimple_location(stmt);
-+
-+ inform(loc, "Integer size_overflow check applied here.");
-+}
-+
-+static bool is_from_cast(const_tree node)
-+{
-+ gimple def_stmt = get_def_stmt(node);
-+
-+ if (!def_stmt)
-+ return false;
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ return true;
-+
-+ return false;
-+}
-+
-+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool is_a_ptr_minus(gassign *stmt)
-+{
-+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (!is_from_cast(rhs1))
-+ return false;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ if (!is_from_cast(rhs2))
-+ return false;
-+
-+ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
-+ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
-+
-+ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
-+ return false;
-+
-+ return true;
-+}
-+
-+static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ enum intentional_overflow_type res;
-+ tree rhs1, rhs2, new_lhs;
-+ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
-+ tree new_rhs1 = NULL_TREE;
-+ tree new_rhs2 = NULL_TREE;
-+
-+ if (is_a_ptr_minus(def_stmt))
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ /* no DImode/TImode division in the 32/64 bit kernel */
-+ switch (gimple_assign_rhs_code(def_stmt)) {
-+ case RDIV_EXPR:
-+ case TRUNC_DIV_EXPR:
-+ case CEIL_DIV_EXPR:
-+ case FLOOR_DIV_EXPR:
-+ case ROUND_DIV_EXPR:
-+ case TRUNC_MOD_EXPR:
-+ case CEIL_MOD_EXPR:
-+ case FLOOR_MOD_EXPR:
-+ case ROUND_MOD_EXPR:
-+ case EXACT_DIV_EXPR:
-+ case POINTER_PLUS_EXPR:
-+ case BIT_AND_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ default:
-+ break;
-+ }
-+
-+ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
-+ if (new_lhs != NULL_TREE)
-+ return new_lhs;
-+
-+ if (TREE_CODE(rhs1) == SSA_NAME)
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ if (TREE_CODE(rhs2) == SSA_NAME)
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ res = add_mul_intentional_overflow(def_stmt);
-+ if (res != NO_INTENTIONAL_OVERFLOW) {
-+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), res);
-+ return new_lhs;
-+ }
-+
-+ if (skip_expr_on_double_type(def_stmt)) {
-+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), NO_INTENTIONAL_OVERFLOW);
-+ return new_lhs;
-+ }
-+
-+ if (is_a_neg_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_neg_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
-+
-+
-+ if (is_a_constant_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_constant_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
-+
-+ // the const is between 0 and (signed) MAX
-+ if (is_gimple_constant(rhs1))
-+ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
-+ if (is_gimple_constant(rhs2))
-+ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4006
-+static tree get_new_rhs(struct visited *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
-+{
-+ if (is_gimple_constant(rhs))
-+ return cast_a_tree(size_overflow_type, rhs);
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return NULL_TREE;
-+ return expand(visited, caller_node, rhs);
-+}
-+
-+static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
-+ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
-+
-+ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
-+ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
-+ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
-+}
-+#endif
-+
-+static tree get_my_stmt_lhs(struct visited *visited, gimple stmt)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple next_stmt = NULL;
-+
-+ gsi = gsi_for_stmt(stmt);
-+
-+ do {
-+ gsi_next(&gsi);
-+ next_stmt = gsi_stmt(gsi);
-+
-+ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt))
-+ return NULL_TREE;
-+
-+ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt))
-+ break;
-+
-+ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt));
-+ } while (!gsi_end_p(gsi));
-+
-+ gcc_assert(next_stmt);
-+ return get_lhs(next_stmt);
-+}
-+
-+static tree expand_visited(struct visited *visited, gimple def_stmt)
-+{
-+ gimple_stmt_iterator gsi;
-+ enum gimple_code code = gimple_code(def_stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_next(&gsi);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
-+ return NULL_TREE;
-+ return get_my_stmt_lhs(visited, def_stmt);
-+}
-+
-+tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return NULL_TREE;
-+
-+ if (pointer_set_contains(visited->my_stmts, def_stmt))
-+ return lhs;
-+
-+ if (pointer_set_contains(visited->stmts, def_stmt))
-+ return expand_visited(visited, def_stmt);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return handle_phi(visited, caller_node, lhs);
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASM:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return handle_unary_ops(visited, caller_node, as_a_gassign(def_stmt));
-+ case 3:
-+ return handle_binary_ops(visited, caller_node, lhs);
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 4:
-+ return handle_ternary_ops(visited, caller_node, lhs);
-+#endif
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
-new file mode 100644
-index 0000000..e1e6e19
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
-@@ -0,0 +1,1157 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ * https://github.com/ephox-gcc-plugins
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+#define VEC_LEN 128
-+#define RET_CHECK NULL_TREE
-+#define WRONG_NODE 32
-+#define NOT_INTENTIONAL_ASM NULL
-+
-+unsigned int call_count;
-+
-+static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs);
-+static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs);
-+
-+struct visited_fns {
-+ struct visited_fns *next;
-+ const_tree fndecl;
-+ unsigned int num;
-+ const_gimple first_stmt;
-+};
-+
-+struct next_cgraph_node {
-+ struct next_cgraph_node *next;
-+ struct cgraph_node *current_function;
-+ tree callee_fndecl;
-+ unsigned int num;
-+};
-+
-+// Don't want to duplicate entries in next_cgraph_node
-+static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
-+{
-+ const_tree new_callee_fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ if (fndecl == RET_CHECK)
-+ new_callee_fndecl = NODE_DECL(node);
-+ else
-+ new_callee_fndecl = fndecl;
-+
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
-+ continue;
-+ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
-+ continue;
-+ if (num == cur_node->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Add a next_cgraph_node into the list for handle_function().
-+ * handle_function() iterates over all the next cgraph nodes and
-+ * starts the overflow check insertion process.
-+ */
-+static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
-+{
-+ struct next_cgraph_node *new_node;
-+
-+ if (is_in_next_cgraph_node(head, node, fndecl, num))
-+ return head;
-+
-+ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
-+ new_node->current_function = node;
-+ new_node->next = NULL;
-+ new_node->num = num;
-+ if (fndecl == RET_CHECK)
-+ new_node->callee_fndecl = NODE_DECL(node);
-+ else
-+ new_node->callee_fndecl = fndecl;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
-+{
-+ struct cgraph_edge *e;
-+
-+ if (num == 0)
-+ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
-+
-+ for (e = node->callers; e; e = e->next_caller) {
-+ tree fndecl = gimple_call_fndecl(e->call_stmt);
-+
-+ gcc_assert(fndecl != NULL_TREE);
-+ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
-+ }
-+
-+ return head;
-+}
-+
-+struct missing_functions {
-+ struct missing_functions *next;
-+ const_tree node;
-+ tree fndecl;
-+};
-+
-+static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
-+{
-+ struct missing_functions *new_function;
-+
-+ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
-+ new_function->node = node;
-+ new_function->next = NULL;
-+
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ new_function->fndecl = node;
-+ else
-+ new_function->fndecl = current_function_decl;
-+ gcc_assert(new_function->fndecl);
-+
-+ if (!missing_fn_head)
-+ return new_function;
-+
-+ new_function->next = missing_fn_head;
-+ return new_function;
-+}
-+
-+/* If the function is missing from the hash table and it is a static function
-+ * then create a next_cgraph_node from it for handle_function()
-+ */
-+static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
-+{
-+ unsigned int num;
-+ const_tree orig_fndecl;
-+ struct cgraph_node *next_node = NULL;
-+
-+ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
-+
-+ num = get_function_num(missing_fn_head->node, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return cnodes;
-+
-+ if (!is_missing_function(orig_fndecl, num))
-+ return cnodes;
-+
-+ next_node = cgraph_get_node(missing_fn_head->fndecl);
-+ if (next_node && next_node->local.local)
-+ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
-+ return cnodes;
-+}
-+
-+/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
-+ * into the next_cgraph_node list. They will be the next interesting returns or callees.
-+ */
-+static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree node;
-+ struct missing_functions *cur, *missing_fn_head = NULL;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
-+#endif
-+ switch (TREE_CODE(node)) {
-+ case PARM_DECL:
-+ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
-+ break;
-+ case FUNCTION_DECL:
-+ missing_fn_head = create_new_missing_function(missing_fn_head, node);
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ while (missing_fn_head) {
-+ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
-+
-+ cur = missing_fn_head->next;
-+ free(missing_fn_head);
-+ missing_fn_head = cur;
-+ }
-+
-+ return cnodes;
-+}
-+
-+static void walk_phi_set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree result)
-+{
-+ gphi *phi = as_a_gphi(get_def_stmt(result));
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ const_tree arg = gimple_phi_arg_def(phi, i);
-+
-+ set_conditions(visited, interesting_conditions, arg);
-+ }
-+}
-+
-+enum conditions {
-+ FROM_CONST, NOT_UNARY, CAST, RET, PHI
-+};
-+
-+// Search for constants, cast assignments and binary/ternary assignments
-+static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs)
-+{
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (is_gimple_constant(lhs)) {
-+ interesting_conditions[FROM_CONST] = true;
-+ return;
-+ }
-+
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_CALL:
-+ if (lhs == gimple_call_lhs(as_a_const_gcall(def_stmt)))
-+ interesting_conditions[RET] = true;
-+ return;
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_PHI:
-+ interesting_conditions[PHI] = true;
-+ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
-+ case GIMPLE_ASSIGN: {
-+ gassign *assign = as_a_gassign(def_stmt);
-+
-+ if (gimple_num_ops(assign) == 2) {
-+ const_tree rhs = gimple_assign_rhs1(assign);
-+
-+ if (gimple_assign_cast_p(assign))
-+ interesting_conditions[CAST] = true;
-+
-+ return set_conditions(visited, interesting_conditions, rhs);
-+ } else {
-+ interesting_conditions[NOT_UNARY] = true;
-+ return;
-+ }
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// determine whether duplication will be necessary or not.
-+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
-+{
-+ gimple_set *visited;
-+
-+ if (gimple_assign_cast_p(cur_node->first_stmt))
-+ interesting_conditions[CAST] = true;
-+ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
-+ interesting_conditions[NOT_UNARY] = true;
-+
-+ visited = pointer_set_create();
-+ set_conditions(visited, interesting_conditions, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
-+static void replace_size_overflow_asm_with_assign(gasm *asm_stmt, tree lhs, tree rhs)
-+{
-+ gassign *assign;
-+ gimple_stmt_iterator gsi;
-+
-+ // already removed
-+ if (gimple_bb(asm_stmt) == NULL)
-+ return;
-+ gsi = gsi_for_stmt(asm_stmt);
-+
-+ assign = gimple_build_assign(lhs, rhs);
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ SSA_NAME_DEF_STMT(lhs) = assign;
-+
-+ gsi_remove(&gsi, true);
-+}
-+
-+/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
-+ * stmt is a return otherwise it is the callee function.
-+ */
-+const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+
-+ if (argnum == 0)
-+ fndecl = current_function_decl;
-+ else
-+ fndecl = gimple_call_fndecl(stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ return DECL_ORIGIN(fndecl);
-+}
-+
-+// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
-+static bool skip_asm(const_tree arg)
-+{
-+ gimple def_stmt = get_def_stmt(arg);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ def_stmt = get_def_stmt(gimple_assign_rhs1(as_a_gassign(def_stmt)));
-+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
-+}
-+
-+static void walk_use_def_phi(gimple_set *visited, struct interesting_node *cur_node, tree result)
-+{
-+ gphi *phi = as_a_gphi(get_def_stmt(result));
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ walk_use_def(visited, cur_node, arg);
-+ }
-+}
-+
-+static void walk_use_def_binary(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ walk_use_def(visited, cur_node, rhs1);
-+ walk_use_def(visited, cur_node, rhs2);
-+}
-+
-+static void insert_last_node(struct interesting_node *cur_node, tree node)
-+{
-+ unsigned int i;
-+ tree element;
-+ enum tree_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ if (is_gimple_constant(node))
-+ return;
-+
-+ code = TREE_CODE(node);
-+ if (code == VAR_DECL) {
-+ node = DECL_ORIGIN(node);
-+ code = TREE_CODE(node);
-+ }
-+
-+ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
-+ return;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
-+#endif
-+ if (operand_equal_p(node, element, 0))
-+ return;
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
-+ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
-+#else
-+ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
-+ vec_safe_push(cur_node->last_nodes, node);
-+#endif
-+}
-+
-+// a size_overflow asm stmt in the control flow doesn't stop the recursion
-+static void handle_asm_stmt(gimple_set *visited, struct interesting_node *cur_node, tree lhs, const gasm *stmt)
-+{
-+ if (gimple_code(stmt) != GIMPLE_ASM || !is_size_overflow_asm(stmt))
-+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+}
-+
-+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
-+ * and component refs (for checking the intentional_overflow attribute).
-+ */
-+static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME) {
-+ insert_last_node(cur_node, lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return handle_asm_stmt(visited, cur_node, lhs, as_a_const_gasm(def_stmt));
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(as_a_const_gcall(def_stmt));
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ insert_last_node(cur_node, fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return walk_use_def_phi(visited, cur_node, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
-+ case 3:
-+ return walk_use_def_binary(visited, cur_node, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
-+static void set_last_nodes(struct interesting_node *cur_node)
-+{
-+ gimple_set *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def(visited, cur_node, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+enum precond {
-+ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
-+};
-+
-+/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
-+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
-+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
-+ * It skips the possible error codes too.
-+ */
-+static enum precond check_preconditions(struct interesting_node *cur_node)
-+{
-+ bool interesting_conditions[5] = {false, false, false, false, false};
-+
-+ set_last_nodes(cur_node);
-+
-+ check_intentional_attribute_ipa(cur_node);
-+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ search_interesting_conditions(cur_node, interesting_conditions);
-+
-+ // error code: a phi, unary assign (not cast) and returns only
-+ if (!interesting_conditions[NOT_UNARY] && interesting_conditions[PHI] && interesting_conditions[RET] && !interesting_conditions[CAST])
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ // error code: def_stmts trace back to a constant and there are no binary/ternary assigments
-+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ // unnecessary overflow check
-+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
-+ return NO_CHECK_INSERT;
-+
-+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
-+ return NO_CHECK_INSERT;
-+
-+ return NONE;
-+}
-+
-+static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node)
-+{
-+ const_gimple assign;
-+ tree orig_type = TREE_TYPE(orig_node);
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ return get_lhs(assign);
-+}
-+
-+static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node)
-+{
-+ void (*set_rhs)(gimple, tree);
-+ gimple stmt = cur_node->first_stmt;
-+ const_tree orig_node = cur_node->node;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_RETURN:
-+ gimple_return_set_retval(as_a_greturn(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_CALL:
-+ gimple_call_set_arg(as_a_gcall(stmt), cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_ASSIGN:
-+ switch (cur_node->num) {
-+ case 1:
-+ set_rhs = &gimple_assign_set_rhs1;
-+ break;
-+ case 2:
-+ set_rhs = &gimple_assign_set_rhs2;
-+ break;
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 3:
-+ set_rhs = &gimple_assign_set_rhs3;
-+ break;
-+#endif
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ set_rhs(as_a_gassign(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ }
-+
-+ update_stmt(stmt);
-+}
-+
-+static struct visited *create_visited(void)
-+{
-+ struct visited *new_node;
-+
-+ new_node = (struct visited *)xmalloc(sizeof(*new_node));
-+ new_node->stmts = pointer_set_create();
-+ new_node->my_stmts = pointer_set_create();
-+ new_node->skip_expr_casts = pointer_set_create();
-+ new_node->no_cast_check = pointer_set_create();
-+ return new_node;
-+}
-+
-+static void free_visited(struct visited *visited)
-+{
-+ pointer_set_destroy(visited->stmts);
-+ pointer_set_destroy(visited->my_stmts);
-+ pointer_set_destroy(visited->skip_expr_casts);
-+ pointer_set_destroy(visited->no_cast_check);
-+
-+ free(visited);
-+}
-+
-+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
-+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
-+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
-+ */
-+static struct next_cgraph_node *handle_interesting_stmt(struct visited *visited, struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
-+{
-+ enum precond ret;
-+ tree new_node, orig_node = cur_node->node;
-+
-+ ret = check_preconditions(cur_node);
-+ if (ret == NO_ATTRIBUTE_SEARCH)
-+ return cnodes;
-+
-+ cnodes = search_overflow_attribute(cnodes, cur_node);
-+
-+ if (ret == NO_CHECK_INSERT)
-+ return cnodes;
-+
-+ new_node = expand(visited, caller_node, orig_node);
-+ if (new_node == NULL_TREE)
-+ return cnodes;
-+
-+ change_orig_node(visited, cur_node, new_node);
-+ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
-+
-+ return cnodes;
-+}
-+
-+// Check visited_fns interesting nodes.
-+static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
-+{
-+ struct interesting_node *cur;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (!operand_equal_p(node, cur->node, 0))
-+ continue;
-+ if (num != cur->num)
-+ continue;
-+ if (first_stmt == cur->first_stmt)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
-+ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
-+ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
-+ the intentional_overflow attribute check. They are collected by set_last_nodes().
-+ num: arg count of a call stmt or 0 when it is a ret
-+ node: the recursion starts from here, it is a call arg or a return value
-+ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
-+ intentional_attr_decl: intentional_overflow attribute of the callee function
-+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
-+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
-+ */
-+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gasm *asm_stmt)
-+{
-+ struct interesting_node *new_node;
-+ tree fndecl;
-+ enum gimple_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+ code = gimple_code(first_stmt);
-+ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
-+
-+ if (num == CANNOT_FIND_ARG)
-+ return head;
-+
-+ if (skip_types(node))
-+ return head;
-+
-+ if (skip_asm(node))
-+ return head;
-+
-+ if (is_gimple_call(first_stmt))
-+ fndecl = gimple_call_fndecl(as_a_const_gcall(first_stmt));
-+ else
-+ fndecl = current_function_decl;
-+
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ if (is_in_interesting_node(head, first_stmt, node, num))
-+ return head;
-+
-+ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
-+
-+ new_node->next = NULL;
-+ new_node->first_stmt = first_stmt;
-+#if BUILDING_GCC_VERSION <= 4007
-+ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
-+#else
-+ vec_alloc(new_node->last_nodes, VEC_LEN);
-+#endif
-+ new_node->num = num;
-+ new_node->node = node;
-+ new_node->fndecl = fndecl;
-+ new_node->intentional_attr_decl = MARK_NO;
-+ new_node->intentional_attr_cur_fndecl = MARK_NO;
-+ new_node->intentional_mark_from_gimple = asm_stmt;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, greturn *stmt, struct next_cgraph_node *next_node)
-+{
-+ struct next_cgraph_node *cur_node;
-+ tree ret = gimple_return_retval(stmt);
-+
-+ if (ret == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
-+ continue;
-+ if (cur_node->num == 0)
-+ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the call stmt is in the next cgraph node list then it's an interesting call.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gcall *stmt, struct next_cgraph_node *next_node)
-+{
-+ unsigned int argnum;
-+ tree arg;
-+ const_tree fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
-+ continue;
-+ argnum = get_correct_arg_count(cur_node->num, fndecl);
-+ gcc_assert(argnum != CANNOT_FIND_ARG);
-+ if (argnum == 0)
-+ continue;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
-+{
-+ if (!operand_equal_p(orig_node, node, 0))
-+ return WRONG_NODE;
-+ if (skip_types(node))
-+ return WRONG_NODE;
-+ return ret_count;
-+}
-+
-+// Get the index of the rhs node in an assignment
-+static unsigned int get_assign_ops_count(const gassign *stmt, tree node)
-+{
-+ const_tree rhs1, rhs2;
-+ unsigned int ret;
-+
-+ gcc_assert(stmt);
-+ gcc_assert(is_gimple_assign(stmt));
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ gcc_assert(rhs1 != NULL_TREE);
-+
-+ switch (gimple_num_ops(stmt)) {
-+ case 2:
-+ return check_ops(node, rhs1, 1);
-+ case 3:
-+ ret = check_ops(node, rhs1, 1);
-+ if (ret != WRONG_NODE)
-+ return ret;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ gcc_assert(rhs2 != NULL_TREE);
-+ return check_ops(node, rhs2, 2);
-+ default:
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
-+static unsigned int find_arg_number_gimple(const_tree arg, const gcall *stmt)
-+{
-+ unsigned int i;
-+
-+ if (gimple_call_fndecl(stmt) == NULL_TREE)
-+ return CANNOT_FIND_ARG;
-+
-+ for (i = 0; i < gimple_call_num_args(stmt); i++) {
-+ tree node;
-+
-+ node = gimple_call_arg(stmt, i);
-+ if (!operand_equal_p(arg, node, 0))
-+ continue;
-+ if (!skip_types(node))
-+ return i + 1;
-+ }
-+
-+ return CANNOT_FIND_ARG;
-+}
-+
-+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
-+ * any of return, call or assignment stmts (because of inlining).
-+ */
-+static struct interesting_node *get_interesting_ret_or_call(tree_set *visited, struct interesting_node *head, tree node, gasm *intentional_asm)
-+{
-+ use_operand_p use_p;
-+ imm_use_iterator imm_iter;
-+ unsigned int argnum;
-+
-+ gcc_assert(TREE_CODE(node) == SSA_NAME);
-+
-+ if (pointer_set_insert(visited, node))
-+ return head;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ gimple stmt = USE_STMT(use_p);
-+
-+ if (stmt == NULL)
-+ return head;
-+ if (is_gimple_debug(stmt))
-+ continue;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_CALL:
-+ argnum = find_arg_number_gimple(node, as_a_gcall(stmt));
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_RETURN:
-+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
-+ break;
-+ case GIMPLE_ASSIGN:
-+ argnum = get_assign_ops_count(as_a_const_gassign(stmt), node);
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_PHI: {
-+ tree result = gimple_phi_result(as_a_gphi(stmt));
-+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
-+ break;
-+ }
-+ case GIMPLE_ASM: {
-+ gasm *asm_stmt = as_a_gasm(stmt);
-+
-+ if (gimple_asm_noutputs(asm_stmt) != 0)
-+ break;
-+ if (!is_size_overflow_asm(asm_stmt))
-+ break;
-+ head = create_new_interesting_node(head, asm_stmt, node, 1, intentional_asm);
-+ break;
-+ }
-+ case GIMPLE_COND:
-+ case GIMPLE_SWITCH:
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ break;
-+ }
-+ }
-+ return head;
-+}
-+
-+static void remove_size_overflow_asm(gimple stmt)
-+{
-+ gasm *asm_stmt;
-+ gimple_stmt_iterator gsi;
-+ tree input, output;
-+
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return;
-+
-+ asm_stmt = as_a_gasm(stmt);
-+ if (!is_size_overflow_asm(asm_stmt))
-+ return;
-+
-+ if (gimple_asm_noutputs(asm_stmt) == 0) {
-+ gsi = gsi_for_stmt(asm_stmt);
-+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), asm_stmt);
-+ gsi_remove(&gsi, true);
-+ return;
-+ }
-+
-+ input = gimple_asm_input_op(asm_stmt, 0);
-+ output = gimple_asm_output_op(asm_stmt, 0);
-+ replace_size_overflow_asm_with_assign(asm_stmt, TREE_VALUE(output), TREE_VALUE(input));
-+}
-+
-+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
-+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
-+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
-+ */
-+static struct interesting_node *handle_stmt_by_size_overflow_asm(gasm *asm_stmt, struct interesting_node *head)
-+{
-+ const_tree output;
-+ tree_set *visited;
-+ gasm *intentional_asm = NOT_INTENTIONAL_ASM;
-+
-+ if (!is_size_overflow_asm(asm_stmt))
-+ return head;
-+
-+ if (is_size_overflow_intentional_asm_yes(asm_stmt) || is_size_overflow_intentional_asm_turn_off(asm_stmt))
-+ intentional_asm = asm_stmt;
-+
-+ gcc_assert(gimple_asm_ninputs(asm_stmt) == 1);
-+
-+ if (gimple_asm_noutputs(asm_stmt) == 0 && is_size_overflow_intentional_asm_turn_off(asm_stmt))
-+ return head;
-+
-+ if (gimple_asm_noutputs(asm_stmt) == 0) {
-+ const_tree input;
-+
-+ if (!is_size_overflow_intentional_asm_turn_off(asm_stmt))
-+ return head;
-+
-+ input = gimple_asm_input_op(asm_stmt, 0);
-+ remove_size_overflow_asm(asm_stmt);
-+ if (is_gimple_constant(TREE_VALUE(input)))
-+ return head;
-+ visited = tree_pointer_set_create();
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+ }
-+
-+ if (!is_size_overflow_intentional_asm_yes(asm_stmt) && !is_size_overflow_intentional_asm_turn_off(asm_stmt))
-+ remove_size_overflow_asm(asm_stmt);
-+
-+ visited = tree_pointer_set_create();
-+ output = gimple_asm_output_op(asm_stmt, 0);
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+}
-+
-+/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
-+ * or a call stmt or a return stmt and store them in the interesting_node list
-+ */
-+static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
-+{
-+ basic_block bb;
-+ struct interesting_node *head = NULL;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ enum gimple_code code;
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ code = gimple_code(stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ head = handle_stmt_by_size_overflow_asm(as_a_gasm(stmt), head);
-+
-+ if (!next_node)
-+ continue;
-+ if (code == GIMPLE_CALL)
-+ head = handle_stmt_by_cgraph_nodes_call(head, as_a_gcall(stmt), next_node);
-+ if (code == GIMPLE_RETURN)
-+ head = handle_stmt_by_cgraph_nodes_ret(head, as_a_greturn(stmt), next_node);
-+ }
-+ }
-+ return head;
-+}
-+
-+static void free_interesting_node(struct interesting_node *head)
-+{
-+ struct interesting_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, gc, head->last_nodes);
-+#else
-+ vec_free(head->last_nodes);
-+#endif
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static struct visited_fns *insert_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
-+{
-+ struct visited_fns *new_visited_fns;
-+
-+ new_visited_fns = (struct visited_fns *)xmalloc(sizeof(*new_visited_fns));
-+ new_visited_fns->fndecl = cur_node->fndecl;
-+ new_visited_fns->num = cur_node->num;
-+ new_visited_fns->first_stmt = cur_node->first_stmt;
-+ new_visited_fns->next = NULL;
-+
-+ if (!head)
-+ return new_visited_fns;
-+
-+ new_visited_fns->next = head;
-+ return new_visited_fns;
-+}
-+
-+/* Check whether the function was already visited_fns. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
-+ * it is a visited_fns function.
-+ */
-+static bool is_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
-+{
-+ struct visited_fns *cur;
-+
-+ if (!head)
-+ return false;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (cur_node->first_stmt != cur->first_stmt)
-+ continue;
-+ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
-+ continue;
-+ if (cur_node->num == cur->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static void free_next_cgraph_node(struct next_cgraph_node *head)
-+{
-+ struct next_cgraph_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static void remove_all_size_overflow_asm(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ remove_size_overflow_asm(gsi_stmt(si));
-+ }
-+}
-+
-+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
-+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
-+ * the newly collected interesting functions (they are interesting if there is control flow between
-+ * the interesting stmts and them).
-+ */
-+static struct visited_fns *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited_fns *visited_fns)
-+{
-+ struct visited *visited;
-+ struct interesting_node *head, *cur_node;
-+ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
-+
-+ set_current_function_decl(NODE_DECL(node));
-+ call_count = 0;
-+
-+ head = collect_interesting_stmts(next_node);
-+
-+ visited = create_visited();
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (is_visited_fns_function(visited_fns, cur_node))
-+ continue;
-+ cnodes_head = handle_interesting_stmt(visited, cnodes_head, cur_node, node);
-+ visited_fns = insert_visited_fns_function(visited_fns, cur_node);
-+ }
-+
-+ free_visited(visited);
-+ free_interesting_node(head);
-+ remove_all_size_overflow_asm();
-+ unset_current_function_decl();
-+
-+ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
-+ visited_fns = handle_function(cur_cnodes->current_function, cur_cnodes, visited_fns);
-+
-+ free_next_cgraph_node(cnodes_head);
-+ return visited_fns;
-+}
-+
-+static void free_visited_fns(struct visited_fns *head)
-+{
-+ struct visited_fns *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
-+unsigned int search_function(void)
-+{
-+ struct cgraph_node *node;
-+ struct visited_fns *visited_fns = NULL;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(node->reachable);
-+#endif
-+
-+ visited_fns = handle_function(node, NULL, visited_fns);
-+ }
-+
-+ free_visited_fns(visited_fns);
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data insert_size_overflow_check_data = {
-+#else
-+static struct ipa_opt_pass_d insert_size_overflow_check = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "size_overflow",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_function,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class insert_size_overflow_check : public ipa_opt_pass_d {
-+public:
-+ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return search_function(); }
-+#else
-+ unsigned int execute() { return search_function(); }
-+#endif
-+};
-+}
-+
-+opt_pass *make_insert_size_overflow_check(void)
-+{
-+ return new insert_size_overflow_check();
-+}
-+#else
-+struct opt_pass *make_insert_size_overflow_check(void)
-+{
-+ return &insert_size_overflow_check.pass;
-+}
-+#endif
-diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-new file mode 100644
-index 0000000..eb62680
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-@@ -0,0 +1,748 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ * https://github.com/ephox-gcc-plugins
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+/* Get the param of the intentional_overflow attribute.
-+ * * 0: MARK_NOT_INTENTIONAL
-+ * * 1..MAX_PARAM: MARK_YES
-+ * * -1: MARK_TURN_OFF
-+ */
-+static tree get_attribute_param(const_tree decl)
-+{
-+ const_tree attr;
-+
-+ if (decl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
-+ if (!attr || !TREE_VALUE(attr))
-+ return NULL_TREE;
-+
-+ return TREE_VALUE(attr);
-+}
-+
-+// MARK_TURN_OFF
-+bool is_turn_off_intentional_attr(const_tree decl)
-+{
-+ const_tree param_head;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (tree_to_shwi(TREE_VALUE(param_head)) == -1)
-+ return true;
-+ return false;
-+}
-+
-+// MARK_NOT_INTENTIONAL
-+bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ const_tree param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
-+ return true;
-+ return false;
-+}
-+
-+// MARK_YES
-+bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ tree param, param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ for (param = param_head; param; param = TREE_CHAIN(param))
-+ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
-+ return true;
-+ return false;
-+}
-+
-+void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
-+{
-+ location_t loc;
-+
-+ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
-+ return;
-+
-+ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
-+ return;
-+
-+ loc = DECL_SOURCE_LOCATION(decl);
-+ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
-+}
-+
-+// Get the field decl of a component ref for intentional_overflow checking
-+static const_tree search_field_decl(const_tree comp_ref)
-+{
-+ const_tree field = NULL_TREE;
-+ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
-+
-+ for (i = 0; i < len; i++) {
-+ field = TREE_OPERAND(comp_ref, i);
-+ if (TREE_CODE(field) == FIELD_DECL)
-+ break;
-+ }
-+ gcc_assert(TREE_CODE(field) == FIELD_DECL);
-+ return field;
-+}
-+
-+/* Get the type of the intentional_overflow attribute of a node
-+ * * MARK_TURN_OFF
-+ * * MARK_YES
-+ * * MARK_NO
-+ * * MARK_NOT_INTENTIONAL
-+ */
-+enum mark get_intentional_attr_type(const_tree node)
-+{
-+ const_tree cur_decl;
-+
-+ if (node == NULL_TREE)
-+ return MARK_NO;
-+
-+ switch (TREE_CODE(node)) {
-+ case COMPONENT_REF:
-+ cur_decl = search_field_decl(node);
-+ if (is_turn_off_intentional_attr(cur_decl))
-+ return MARK_TURN_OFF;
-+ if (is_end_intentional_intentional_attr(cur_decl, 1))
-+ return MARK_YES;
-+ break;
-+ case PARM_DECL: {
-+ unsigned int argnum;
-+
-+ cur_decl = DECL_ORIGIN(current_function_decl);
-+ argnum = find_arg_number_tree(node, cur_decl);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return MARK_NO;
-+ if (is_yes_intentional_attr(cur_decl, argnum))
-+ return MARK_YES;
-+ if (is_end_intentional_intentional_attr(cur_decl, argnum))
-+ return MARK_NOT_INTENTIONAL;
-+ break;
-+ }
-+ case FUNCTION_DECL:
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
-+ return MARK_TURN_OFF;
-+ break;
-+ default:
-+ break;
-+ }
-+ return MARK_NO;
-+}
-+
-+// Search for the intentional_overflow attribute on the last nodes
-+static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree last_node;
-+ enum mark mark = MARK_NO;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
-+#endif
-+ mark = get_intentional_attr_type(last_node);
-+ if (mark != MARK_NO)
-+ break;
-+ }
-+ return mark;
-+}
-+
-+/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
-+ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
-+ */
-+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
-+{
-+ if (!cur_node->intentional_mark_from_gimple)
-+ return false;
-+
-+ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
-+ cur_node->intentional_attr_cur_fndecl = MARK_YES;
-+ else
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+
-+ // skip param decls
-+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
-+ return true;
-+ return true;
-+}
-+
-+/* Search intentional_overflow attribute on caller and on callee too.
-+ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
-+ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
-+ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
-+*/
-+void check_intentional_attribute_ipa(struct interesting_node *cur_node)
-+{
-+ const_tree fndecl;
-+
-+ if (is_intentional_attribute_from_gimple(cur_node))
-+ return;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
-+ return;
-+
-+ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
-+ if (is_turn_off_intentional_attr(fndecl)) {
-+ cur_node->intentional_attr_decl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_YES;
-+
-+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
-+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
-+}
-+
-+bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
-+{
-+ const_tree rhs1, lhs, rhs1_type, lhs_type;
-+ enum machine_mode lhs_mode, rhs_mode;
-+ const gassign *assign;
-+ gimple def_stmt = get_def_stmt(no_const_rhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ assign = as_a_const_gassign(def_stmt);
-+ rhs1 = gimple_assign_rhs1(assign);
-+ lhs = gimple_assign_lhs(assign);
-+ rhs1_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ rhs_mode = TYPE_MODE(rhs1_type);
-+ lhs_mode = TYPE_MODE(lhs_type);
-+ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
-+ return false;
-+
-+ return true;
-+}
-+
-+static unsigned int uses_num(tree node)
-+{
-+ imm_use_iterator imm_iter;
-+ use_operand_p use_p;
-+ unsigned int num = 0;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ gimple use_stmt = USE_STMT(use_p);
-+
-+ if (use_stmt == NULL)
-+ return num;
-+ if (is_gimple_debug(use_stmt))
-+ continue;
-+ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(as_a_const_gassign(use_stmt))))
-+ continue;
-+ num++;
-+ }
-+ return num;
-+}
-+
-+static bool no_uses(tree node)
-+{
-+ return !uses_num(node);
-+}
-+
-+// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
-+bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
-+{
-+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
-+ gassign *assign;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ assign = as_a_gassign(def_stmt);
-+ rhs1 = gimple_assign_rhs1(assign);
-+ rhs_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
-+ return false;
-+ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs1);
-+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
-+ return false;
-+
-+ assign = as_a_gassign(def_stmt);
-+ if (gimple_assign_rhs_code(assign) != PLUS_EXPR)
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(assign);
-+ rhs2 = gimple_assign_rhs2(assign);
-+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
-+ return false;
-+
-+ if (is_gimple_constant(rhs2))
-+ not_const_rhs = rhs1;
-+ else
-+ not_const_rhs = rhs2;
-+
-+ return no_uses(not_const_rhs);
-+}
-+
-+static bool is_lt_signed_type_max(const_tree rhs)
-+{
-+ const_tree new_type, type_max, type = TREE_TYPE(rhs);
-+
-+ if (!TYPE_UNSIGNED(type))
-+ return true;
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = intQI_type_node;
-+ break;
-+ case HImode:
-+ new_type = intHI_type_node;
-+ break;
-+ case SImode:
-+ new_type = intSI_type_node;
-+ break;
-+ case DImode:
-+ new_type = intDI_type_node;
-+ break;
-+ default:
-+ debug_tree((tree)type);
-+ gcc_unreachable();
-+ }
-+
-+ type_max = TYPE_MAX_VALUE(new_type);
-+ if (!tree_int_cst_lt(type_max, rhs))
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool is_gt_zero(const_tree rhs)
-+{
-+ const_tree type = TREE_TYPE(rhs);
-+
-+ if (TYPE_UNSIGNED(type))
-+ return true;
-+
-+ if (!tree_int_cst_lt(rhs, integer_zero_node))
-+ return true;
-+
-+ return false;
-+}
-+
-+bool is_a_constant_overflow(const gassign *stmt, const_tree rhs)
-+{
-+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
-+ return false;
-+ if (!is_gimple_constant(rhs))
-+ return false;
-+
-+ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow
-+ if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
-+ return false;
-+
-+ return true;
-+}
-+
-+static tree change_assign_rhs(struct visited *visited, gassign *stmt, const_tree orig_rhs, tree new_rhs)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
-+
-+ gcc_assert(is_gimple_assign(stmt));
-+
-+ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, assign);
-+ return get_lhs(assign);
-+}
-+
-+tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2)
-+{
-+ tree new_rhs, orig_rhs;
-+ void (*gimple_assign_set_rhs)(gimple, tree);
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+
-+ if (!check_overflow)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (change_rhs == NULL_TREE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (new_rhs2 == NULL_TREE) {
-+ orig_rhs = rhs1;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
-+ } else {
-+ orig_rhs = rhs2;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
-+ }
-+
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
-+
-+ new_rhs = change_assign_rhs(visited, stmt, orig_rhs, change_rhs);
-+ gimple_assign_set_rhs(stmt, new_rhs);
-+ update_stmt(stmt);
-+
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+}
-+
-+static bool is_subtraction_special(struct visited *visited, const gassign *stmt)
-+{
-+ gimple def_stmt_1, def_stmt_2;
-+ const gassign *rhs1_def_stmt, *rhs2_def_stmt;
-+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
-+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
-+ const_tree rhs1 = gimple_assign_rhs1(stmt);
-+ const_tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
-+ return false;
-+
-+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ def_stmt_1 = get_def_stmt(rhs1);
-+ def_stmt_2 = get_def_stmt(rhs2);
-+ if (!gimple_assign_cast_p(def_stmt_1) || !gimple_assign_cast_p(def_stmt_2))
-+ return false;
-+
-+ rhs1_def_stmt = as_a_const_gassign(def_stmt_1);
-+ rhs2_def_stmt = as_a_const_gassign(def_stmt_2);
-+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
-+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
-+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
-+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
-+
-+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
-+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
-+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
-+ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
-+ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
-+ return false;
-+ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
-+ return false;
-+
-+ pointer_set_insert(visited->no_cast_check, rhs1_def_stmt);
-+ pointer_set_insert(visited->no_cast_check, rhs2_def_stmt);
-+ return true;
-+}
-+
-+static gassign *create_binary_assign(struct visited *visited, enum tree_code code, gassign *stmt, tree rhs1, tree rhs2)
-+{
-+ gassign *assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree type = TREE_TYPE(rhs1);
-+ tree lhs = create_new_var(type);
-+
-+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
-+ assign = as_a_gassign(gimple_build_assign_with_ops(code, lhs, rhs1, rhs2));
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ pointer_set_insert(visited->my_stmts, assign);
-+ return assign;
-+}
-+
-+static tree cast_to_TI_type(struct visited *visited, gimple stmt, tree node)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple cast_stmt;
-+ tree type = TREE_TYPE(node);
-+
-+ if (types_compatible_p(type, intTI_type_node))
-+ return node;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, (gimple)cast_stmt);
-+ return get_lhs(cast_stmt);
-+}
-+
-+static tree get_def_stmt_rhs(const_tree var)
-+{
-+ tree rhs1, def_stmt_rhs1;
-+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
-+
-+ def_stmt = get_def_stmt(var);
-+ if (!gimple_assign_cast_p(def_stmt))
-+ return NULL_TREE;
-+
-+ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt))
-+ return rhs1;
-+
-+ def_stmt_rhs1 = gimple_assign_rhs1(as_a_const_gassign(rhs1_def_stmt));
-+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
-+
-+ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
-+ case GIMPLE_CALL:
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_PHI:
-+ return def_stmt_rhs1;
-+ case GIMPLE_ASSIGN:
-+ return rhs1;
-+ default:
-+ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs)
-+{
-+ tree new_rhs1, new_rhs2;
-+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ gassign *assign, *stmt = as_a_gassign(get_def_stmt(lhs));
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (!is_subtraction_special(visited, stmt))
-+ return NULL_TREE;
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
-+
-+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
-+ return NULL_TREE;
-+
-+ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
-+ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs1_def_stmt_rhs1);
-+ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs2_def_stmt_rhs1);
-+ }
-+
-+ assign = create_binary_assign(visited, MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
-+ new_lhs = gimple_assign_lhs(assign);
-+ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return false;
-+
-+ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
-+ return false;
-+
-+ return true;
-+}
-+
-+/* e.g., drivers/acpi/acpica/utids.c acpi_ut_execute_CID()
-+ * ((count - 1) * sizeof(struct acpi_pnp_dee_id_list) -> (count + fffffff) * 16
-+ * fffffff * 16 > signed max -> truncate
-+ */
-+static bool look_for_mult_and_add(const_gimple stmt)
-+{
-+ const_tree res;
-+ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs;
-+ const_gimple def_stmt;
-+ const gassign *assign, *def_assign;
-+
-+ if (!stmt || gimple_code(stmt) == GIMPLE_NOP)
-+ return false;
-+ if (!is_gimple_assign(stmt))
-+ return false;
-+ if (gimple_assign_rhs_code(stmt) != MULT_EXPR)
-+ return false;
-+
-+ assign = as_a_const_gassign(stmt);
-+ rhs1 = gimple_assign_rhs1(assign);
-+ rhs2 = gimple_assign_rhs2(assign);
-+ if (is_gimple_constant(rhs1)) {
-+ const_rhs = rhs1;
-+ def_stmt = get_def_stmt(rhs2);
-+ } else if (is_gimple_constant(rhs2)) {
-+ const_rhs = rhs2;
-+ def_stmt = get_def_stmt(rhs1);
-+ } else
-+ return false;
-+
-+ if (!is_gimple_assign(def_stmt))
-+ return false;
-+
-+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
-+ return false;
-+
-+ def_assign = as_a_const_gassign(def_stmt);
-+ def_rhs1 = gimple_assign_rhs1(def_assign);
-+ def_rhs2 = gimple_assign_rhs2(def_assign);
-+ if (is_gimple_constant(def_rhs1))
-+ def_const_rhs = def_rhs1;
-+ else if (is_gimple_constant(def_rhs2))
-+ def_const_rhs = def_rhs2;
-+ else
-+ return false;
-+
-+ res = fold_binary_loc(gimple_location(def_assign), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
-+ if (is_lt_signed_type_max(res) && is_gt_zero(res))
-+ return false;
-+ return true;
-+}
-+
-+enum intentional_overflow_type add_mul_intentional_overflow(const gassign *stmt)
-+{
-+ const_gimple def_stmt_1, def_stmt_2;
-+ const_tree rhs1, rhs2;
-+ bool add_mul_rhs1, add_mul_rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ def_stmt_1 = get_def_stmt(rhs1);
-+ add_mul_rhs1 = look_for_mult_and_add(def_stmt_1);
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ def_stmt_2 = get_def_stmt(rhs2);
-+ add_mul_rhs2 = look_for_mult_and_add(def_stmt_2);
-+
-+ if (add_mul_rhs1)
-+ return RHS1_INTENTIONAL_OVERFLOW;
-+ if (add_mul_rhs2)
-+ return RHS2_INTENTIONAL_OVERFLOW;
-+ return NO_INTENTIONAL_OVERFLOW;
-+}
-+
-+static gimple get_dup_stmt(struct visited *visited, gimple stmt)
-+{
-+ gimple my_stmt;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ gsi_next(&gsi);
-+ my_stmt = gsi_stmt(gsi);
-+
-+ gcc_assert(pointer_set_contains(visited->my_stmts, my_stmt));
-+ gcc_assert(gimple_assign_rhs_code(stmt) == gimple_assign_rhs_code(my_stmt));
-+
-+ return my_stmt;
-+}
-+
-+/* unsigned type -> unary or binary assign (rhs1 or rhs2 is constant)
-+ * unsigned type cast to signed type, unsigned type: no more uses
-+ * e.g., lib/vsprintf.c:simple_strtol()
-+ * _10 = (unsigned long int) _9
-+ * _11 = -_10;
-+ * _12 = (long int) _11; (_11_ no more uses)
-+ */
-+static bool is_call_or_cast(gimple stmt)
-+{
-+ return gimple_assign_cast_p(stmt) || is_gimple_call(stmt);
-+}
-+
-+static bool is_unsigned_cast_or_call_def_stmt(const_tree node)
-+{
-+ const_tree rhs;
-+ gimple def_stmt;
-+
-+ if (node == NULL_TREE)
-+ return true;
-+ if (is_gimple_constant(node))
-+ return true;
-+
-+ def_stmt = get_def_stmt(node);
-+ if (!def_stmt)
-+ return false;
-+
-+ if (is_call_or_cast(def_stmt))
-+ return true;
-+
-+ if (!is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 2)
-+ return false;
-+ rhs = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
-+ def_stmt = get_def_stmt(rhs);
-+ if (!def_stmt)
-+ return false;
-+ return is_call_or_cast(def_stmt);
-+}
-+
-+void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt)
-+{
-+ unsigned int use_num;
-+ gassign *so_stmt;
-+ const_gimple def_stmt;
-+ const_tree rhs1, rhs2;
-+ tree rhs = gimple_assign_rhs1(stmt);
-+ tree lhs_type = TREE_TYPE(gimple_assign_lhs(stmt));
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+
-+ if (!(TYPE_UNSIGNED(rhs_type) && !TYPE_UNSIGNED(lhs_type)))
-+ return;
-+ if (GET_MODE_BITSIZE(TYPE_MODE(rhs_type)) != GET_MODE_BITSIZE(TYPE_MODE(lhs_type)))
-+ return;
-+ use_num = uses_num(rhs);
-+ if (use_num != 1)
-+ return;
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (!def_stmt)
-+ return;
-+ if (!is_gimple_assign(def_stmt))
-+ return;
-+
-+ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
-+ if (!is_unsigned_cast_or_call_def_stmt(rhs1))
-+ return;
-+
-+ rhs2 = gimple_assign_rhs2(as_a_const_gassign(def_stmt));
-+ if (!is_unsigned_cast_or_call_def_stmt(rhs2))
-+ return;
-+ if (gimple_num_ops(def_stmt) == 3 && !is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
-+ return;
-+
-+ so_stmt = as_a_gassign(get_dup_stmt(visited, stmt));
-+ create_up_and_down_cast(visited, so_stmt, lhs_type, gimple_assign_rhs1(so_stmt));
-+}
-+
-diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c
-new file mode 100644
-index 0000000..253b4a8b
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/misc.c
-@@ -0,0 +1,219 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+void set_current_function_decl(tree fndecl)
-+{
-+ gcc_assert(fndecl != NULL_TREE);
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
-+ calculate_dominance_info(CDI_DOMINATORS);
-+ current_function_decl = fndecl;
-+}
-+
-+void unset_current_function_decl(void)
-+{
-+ free_dominance_info(CDI_DOMINATORS);
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+}
-+
-+tree get_lhs(const_gimple stmt)
-+{
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ return gimple_get_lhs(as_a_const_gassign(stmt));
-+ case GIMPLE_PHI:
-+ return gimple_phi_result(as_a_const_gphi(stmt));
-+ default:
-+ debug_gimple_stmt((gimple)stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static bool is_bool(const_tree node)
-+{
-+ const_tree type;
-+
-+ if (node == NULL_TREE)
-+ return false;
-+
-+ type = TREE_TYPE(node);
-+ if (!INTEGRAL_TYPE_P(type))
-+ return false;
-+ if (TREE_CODE(type) == BOOLEAN_TYPE)
-+ return true;
-+ if (TYPE_PRECISION(type) == 1)
-+ return true;
-+ return false;
-+}
-+
-+bool skip_types(const_tree var)
-+{
-+ tree type;
-+ enum tree_code code;
-+
-+ if (is_gimple_constant(var))
-+ return true;
-+
-+ switch (TREE_CODE(var)) {
-+ case ADDR_EXPR:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case INDIRECT_REF:
-+ case TARGET_MEM_REF:
-+ case COMPONENT_REF:
-+ case VAR_DECL:
-+ case VIEW_CONVERT_EXPR:
-+ return true;
-+ default:
-+ break;
-+ }
-+
-+ code = TREE_CODE(var);
-+ gcc_assert(code == SSA_NAME || code == PARM_DECL);
-+
-+ type = TREE_TYPE(var);
-+ switch (TREE_CODE(type)) {
-+ case INTEGER_TYPE:
-+ case ENUMERAL_TYPE:
-+ return false;
-+ case BOOLEAN_TYPE:
-+ return is_bool(var);
-+ default:
-+ return true;
-+ }
-+}
-+
-+gimple get_def_stmt(const_tree node)
-+{
-+ gcc_assert(node != NULL_TREE);
-+
-+ if (skip_types(node))
-+ return NULL;
-+
-+ if (TREE_CODE(node) != SSA_NAME)
-+ return NULL;
-+ return SSA_NAME_DEF_STMT(node);
-+}
-+
-+tree create_new_var(tree type)
-+{
-+ tree new_var = create_tmp_var(type, "cicus");
-+
-+ add_referenced_var(new_var);
-+ return new_var;
-+}
-+
-+static bool skip_cast(tree dst_type, const_tree rhs, bool force)
-+{
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ if (force)
-+ return false;
-+
-+ if (is_gimple_constant(rhs))
-+ return false;
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return false;
-+
-+ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
-+ return false;
-+
-+ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ return false;
-+
-+ return true;
-+}
-+
-+tree cast_a_tree(tree type, tree var)
-+{
-+ gcc_assert(type != NULL_TREE);
-+ gcc_assert(var != NULL_TREE);
-+ gcc_assert(fold_convertible_p(type, var));
-+
-+ return fold_convert(type, var);
-+}
-+
-+gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
-+{
-+ gimple def_stmt;
-+ gassign *assign;
-+
-+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
-+ gcc_assert(!is_gimple_constant(rhs));
-+ if (gsi_end_p(*gsi) && before == AFTER_STMT)
-+ gcc_unreachable();
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && pointer_set_contains(visited->my_stmts, def_stmt))
-+ return def_stmt;
-+
-+ if (lhs == CREATE_NEW_VAR)
-+ lhs = create_new_var(dst_type);
-+
-+ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
-+
-+ if (!gsi_end_p(*gsi)) {
-+ location_t loc = gimple_location(gsi_stmt(*gsi));
-+ gimple_set_location(assign, loc);
-+ }
-+
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ if (before)
-+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ return assign;
-+}
-+
-+bool is_size_overflow_type(const_tree var)
-+{
-+ const char *name;
-+ const_tree type_name, type;
-+
-+ if (var == NULL_TREE)
-+ return false;
-+
-+ type = TREE_TYPE(var);
-+ type_name = TYPE_NAME(type);
-+ if (type_name == NULL_TREE)
-+ return false;
-+
-+ if (DECL_P(type_name))
-+ name = DECL_NAME_POINTER(type_name);
-+ else
-+ name = IDENTIFIER_POINTER(type_name);
-+
-+ if (!strncmp(name, "size_overflow_type", 18))
-+ return true;
-+ return false;
-+}
-+
-diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
-new file mode 100644
-index 0000000..de5999d
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
-@@ -0,0 +1,139 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+bool skip_expr_on_double_type(const gassign *stmt)
-+{
-+ enum tree_code code = gimple_assign_rhs_code(stmt);
-+
-+ switch (code) {
-+ case RSHIFT_EXPR:
-+ case TRUNC_DIV_EXPR:
-+ case CEIL_DIV_EXPR:
-+ case FLOOR_DIV_EXPR:
-+ case ROUND_DIV_EXPR:
-+ case EXACT_DIV_EXPR:
-+ case RDIV_EXPR:
-+ case TRUNC_MOD_EXPR:
-+ case CEIL_MOD_EXPR:
-+ case FLOOR_MOD_EXPR:
-+ case ROUND_MOD_EXPR:
-+ return true;
-+ default:
-+ return false;
-+ }
-+}
-+
-+void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs)
-+{
-+ const_tree orig_rhs1;
-+ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs);
-+ const_gimple down_cast, up_cast;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt);
-+
-+ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ down_lhs = get_lhs(down_cast);
-+
-+ gsi = gsi_for_stmt(use_stmt);
-+ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ new_lhs = get_lhs(up_cast);
-+
-+ orig_rhs1 = gimple_assign_rhs1(use_stmt);
-+ if (operand_equal_p(orig_rhs1, rhs, 0))
-+ gimple_assign_set_rhs1(use_stmt, new_lhs);
-+ else
-+ gimple_assign_set_rhs2(use_stmt, new_lhs);
-+ update_stmt(use_stmt);
-+
-+ pointer_set_insert(visited->my_stmts, up_cast);
-+ pointer_set_insert(visited->my_stmts, down_cast);
-+ pointer_set_insert(visited->skip_expr_casts, up_cast);
-+ pointer_set_insert(visited->skip_expr_casts, down_cast);
-+}
-+
-+static tree get_proper_unsigned_half_type(const_tree node)
-+{
-+ tree new_type, type;
-+
-+ gcc_assert(is_size_overflow_type(node));
-+
-+ type = TREE_TYPE(node);
-+ switch (TYPE_MODE(type)) {
-+ case HImode:
-+ new_type = unsigned_intQI_type_node;
-+ break;
-+ case SImode:
-+ new_type = unsigned_intHI_type_node;
-+ break;
-+ case DImode:
-+ new_type = unsigned_intSI_type_node;
-+ break;
-+ case TImode:
-+ new_type = unsigned_intDI_type_node;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ if (TYPE_QUALS(type) != 0)
-+ return build_qualified_type(new_type, TYPE_QUALS(type));
-+ return new_type;
-+}
-+
-+static void insert_cast_rhs(struct visited *visited, gassign *stmt, tree rhs)
-+{
-+ tree type;
-+
-+ if (rhs == NULL_TREE)
-+ return;
-+ if (!is_size_overflow_type(rhs))
-+ return;
-+
-+ type = get_proper_unsigned_half_type(rhs);
-+ if (is_gimple_constant(rhs))
-+ return;
-+ create_up_and_down_cast(visited, stmt, type, rhs);
-+}
-+
-+static void insert_cast(struct visited *visited, gassign *stmt, tree rhs)
-+{
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs))
-+ return;
-+ gcc_assert(is_size_overflow_type(rhs));
-+ insert_cast_rhs(visited, stmt, rhs);
-+}
-+
-+void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type)
-+{
-+ tree rhs1, rhs2;
-+
-+ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS1_INTENTIONAL_OVERFLOW) {
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ insert_cast(visited, stmt, rhs1);
-+ }
-+
-+ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS2_INTENTIONAL_OVERFLOW) {
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ insert_cast(visited, stmt, rhs2);
-+ }
-+}
-+
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
-new file mode 100644
-index 0000000..20732b1
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow.h
-@@ -0,0 +1,183 @@
-+#ifndef SIZE_OVERFLOW_H
-+#define SIZE_OVERFLOW_H
-+
-+#define CREATE_NEW_VAR NULL_TREE
-+#define CANNOT_FIND_ARG 32
-+#define MAX_PARAM 31
-+#define BEFORE_STMT true
-+#define AFTER_STMT false
-+
-+#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
-+#define YES_ASM_STR "# size_overflow MARK_YES "
-+#define OK_ASM_STR "# size_overflow "
-+
-+enum mark {
-+ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
-+};
-+
-+enum intentional_overflow_type {
-+ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW
-+};
-+
-+
-+#if BUILDING_GCC_VERSION >= 5000
-+typedef struct hash_set<const_gimple> gimple_set;
-+
-+static inline bool pointer_set_insert(gimple_set *visited, const_gimple stmt)
-+{
-+ return visited->add(stmt);
-+}
-+
-+static inline bool pointer_set_contains(gimple_set *visited, const_gimple stmt)
-+{
-+ return visited->contains(stmt);
-+}
-+
-+static inline gimple_set* pointer_set_create(void)
-+{
-+ return new hash_set<const_gimple>;
-+}
-+
-+static inline void pointer_set_destroy(gimple_set *visited)
-+{
-+ delete visited;
-+}
-+
-+typedef struct hash_set<tree> tree_set;
-+
-+static inline bool pointer_set_insert(tree_set *visited, tree node)
-+{
-+ return visited->add(node);
-+}
-+
-+static inline bool pointer_set_contains(tree_set *visited, tree node)
-+{
-+ return visited->contains(node);
-+}
-+
-+static inline tree_set *tree_pointer_set_create(void)
-+{
-+ return new hash_set<tree>;
-+}
-+
-+static inline void pointer_set_destroy(tree_set *visited)
-+{
-+ delete visited;
-+}
-+#else
-+typedef struct pointer_set_t gimple_set;
-+typedef struct pointer_set_t tree_set;
-+
-+static inline tree_set *tree_pointer_set_create(void)
-+{
-+ return pointer_set_create();
-+}
-+#endif
-+
-+struct visited {
-+ gimple_set *stmts;
-+ gimple_set *my_stmts;
-+ gimple_set *skip_expr_casts;
-+ gimple_set *no_cast_check;
-+};
-+
-+// size_overflow_plugin.c
-+extern GTY(()) tree report_size_overflow_decl;
-+extern GTY(()) tree size_overflow_type_HI;
-+extern GTY(()) tree size_overflow_type_SI;
-+extern GTY(()) tree size_overflow_type_DI;
-+extern GTY(()) tree size_overflow_type_TI;
-+
-+
-+// size_overflow_plugin_hash.c
-+struct size_overflow_hash {
-+ const struct size_overflow_hash * const next;
-+ const char * const name;
-+ const unsigned int param;
-+};
-+
-+struct interesting_node {
-+ struct interesting_node *next;
-+ gimple first_stmt;
-+ const_tree fndecl;
-+ tree node;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *last_nodes;
-+#else
-+ vec<tree, va_gc> *last_nodes;
-+#endif
-+ unsigned int num;
-+ enum mark intentional_attr_decl;
-+ enum mark intentional_attr_cur_fndecl;
-+ gasm *intentional_mark_from_gimple;
-+};
-+
-+extern bool is_size_overflow_asm(const gasm *stmt);
-+extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl);
-+extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl);
-+extern bool is_missing_function(const_tree orig_fndecl, unsigned int num);
-+extern bool is_a_return_check(const_tree node);
-+extern const struct size_overflow_hash *get_function_hash(const_tree fndecl);
-+extern unsigned int find_arg_number_tree(const_tree arg, const_tree func);
-+
-+
-+// size_overflow_debug.c
-+extern struct opt_pass *make_dump_pass(void);
-+
-+
-+// intentional_overflow.c
-+extern enum mark get_intentional_attr_type(const_tree node);
-+extern bool is_size_overflow_intentional_asm_yes(const gasm *stmt);
-+extern bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt);
-+extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum);
-+extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum);
-+extern bool is_turn_off_intentional_attr(const_tree decl);
-+extern void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum);
-+extern void check_intentional_attribute_ipa(struct interesting_node *cur_node);
-+extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs);
-+extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs);
-+extern bool is_a_constant_overflow(const gassign *stmt, const_tree rhs);
-+extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2);
-+extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs);
-+extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs);
-+extern enum intentional_overflow_type add_mul_intentional_overflow(const gassign *def_stmt);
-+extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt);
-+
-+
-+// insert_size_overflow_check_ipa.c
-+extern unsigned int search_function(void);
-+extern unsigned int call_count;
-+extern struct opt_pass *make_insert_size_overflow_check(void);
-+extern const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum);
-+
-+
-+// insert_size_overflow_asm.c
-+extern struct opt_pass *make_insert_size_overflow_asm_pass(void);
-+
-+
-+// misc.c
-+extern void set_current_function_decl(tree fndecl);
-+extern void unset_current_function_decl(void);
-+extern tree get_lhs(const_gimple stmt);
-+extern gimple get_def_stmt(const_tree node);
-+extern tree create_new_var(tree type);
-+extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force);
-+extern bool skip_types(const_tree var);
-+extern tree cast_a_tree(tree type, tree var);
-+extern bool is_size_overflow_type(const_tree var);
-+
-+
-+// insert_size_overflow_check_core.c
-+extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs);
-+extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
-+extern tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
-+extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before);
-+
-+
-+// remove_unnecessary_dup.c
-+extern struct opt_pass *make_remove_unnecessary_dup_pass(void);
-+extern void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type);
-+extern bool skip_expr_on_double_type(const gassign *stmt);
-+extern void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs);
-+
-+#endif
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
-new file mode 100644
-index 0000000..176c32f
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
-@@ -0,0 +1,123 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+
-+static unsigned int __unused dump_functions(void)
-+{
-+ struct cgraph_node *node;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ basic_block bb;
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
-+ current_function_decl = NODE_DECL(node);
-+
-+ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ fprintf(stderr, "<bb %u>:\n", bb->index);
-+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ fprintf(stderr, "\n");
-+ }
-+
-+ fprintf(stderr, "-------------------------------------------------------------------------\n");
-+
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+ }
-+
-+ fprintf(stderr, "###############################################################################\n");
-+
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data dump_pass_data = {
-+#else
-+static struct ipa_opt_pass_d dump_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "dump",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = dump_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class dump_pass : public ipa_opt_pass_d {
-+public:
-+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return dump_functions(); }
-+#else
-+ unsigned int execute() { return dump_functions(); }
-+#endif
-+};
-+}
-+
-+opt_pass *make_dump_pass(void)
-+{
-+ return new dump_pass();
-+}
-+#else
-+struct opt_pass *make_dump_pass(void)
-+{
-+ return &dump_pass.pass;
-+}
-+#endif
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-new file mode 100644
-index 0000000..cd3c18f
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5138 @@
-+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
-+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
-+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
-+carl9170_alloc_27 carl9170_alloc 1 27 NULL
-+sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
-+padzero_55 padzero 1 55 &sel_read_policyvers_55
-+cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
-+vis_data_count_prim_sec_64 vis_data_count_prim_sec 0 64 NULL
-+__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
-+snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
-+load_msg_95 load_msg 2 95 NULL
-+ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL
-+init_q_132 init_q 4 132 NULL
-+memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
-+ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray
-+tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153
-+iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
-+device_add_bin_attributes_205 device_add_bin_attributes 0 205 NULL
-+store_cpufv_215 store_cpufv 4 215 NULL
-+tcp_skb_seglen_221 tcp_skb_seglen 0 221 NULL
-+proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
-+generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
-+read_file_war_stats_292 read_file_war_stats 3 292 NULL
-+platform_device_add_data_310 platform_device_add_data 3 310 NULL
-+iwl_dbgfs_tx_statistics_read_314 iwl_dbgfs_tx_statistics_read 3 314 NULL nohasharray
-+dn_setsockopt_314 dn_setsockopt 5 314 &iwl_dbgfs_tx_statistics_read_314
-+ath9k_wmi_cmd_327 ath9k_wmi_cmd 4 327 NULL
-+map_urb_for_dma_332 map_urb_for_dma 0 332 NULL
-+cmtp_send_interopmsg_376 cmtp_send_interopmsg 7 376 NULL
-+sysfs_create_dir_398 sysfs_create_dir 0 398 NULL
-+btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
-+lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
-+snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
-+_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
-+pidlist_resize_496 pidlist_resize 2 496 NULL
-+iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL
-+smp_send_cmd_512 smp_send_cmd 3 512 NULL
-+ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
-+ipv6_skip_exthdr_536 ipv6_skip_exthdr 0-2 536 NULL
-+iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
-+dle_count_543 dle_count 0 543 NULL
-+devres_alloc_551 devres_alloc 2 551 NULL
-+lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL
-+snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
-+start_isoc_chain_565 start_isoc_chain 2 565 NULL nohasharray
-+dev_hard_header_565 dev_hard_header 0 565 &start_isoc_chain_565
-+compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
-+ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
-+sysfs_acpi_set_625 sysfs_acpi_set 3 625 NULL
-+viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL
-+unlink_queued_645 unlink_queued 3-4 645 NULL
-+iwl_legacy_dbgfs_force_reset_read_649 iwl_legacy_dbgfs_force_reset_read 3 649 NULL
-+dtim_interval_read_654 dtim_interval_read 3 654 NULL
-+ceph_copy_user_to_page_vector_656 ceph_copy_user_to_page_vector 4-3 656 NULL
-+xfrm_aevent_msgsize_674 xfrm_aevent_msgsize 0 674 NULL
-+rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
-+sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
-+ddp_set_map_751 ddp_set_map 4 751 NULL
-+dvb_video_write_754 dvb_video_write 3 754 NULL
-+iwl_read_targ_mem_772 iwl_read_targ_mem 0 772 NULL
-+jbd2_journal_dirty_metadata_784 jbd2_journal_dirty_metadata 0 784 NULL
-+if_writecmd_815 if_writecmd 2 815 NULL
-+aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
-+read_fifo_826 read_fifo 3 826 NULL
-+o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
-+iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
-+snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
-+btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
-+v4l2_ctrl_handler_init_928 v4l2_ctrl_handler_init 2 928 NULL
-+carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
-+__nodes_weight_956 __nodes_weight 2-0 956 NULL
-+sys_msgrcv_959 sys_msgrcv 3 959 NULL
-+hdlcdev_rx_997 hdlcdev_rx 3 997 NULL
-+free_ind_block_999 free_ind_block 0 999 NULL
-+readreg_1017 readreg 0-1 1017 NULL
-+pohmelfs_name_alloc_1036 pohmelfs_name_alloc 1 1036 NULL
-+gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
-+Read_hfc16_1070 Read_hfc16 0 1070 NULL
-+mce_request_packet_1073 mce_request_packet 3 1073 NULL
-+agp_create_memory_1075 agp_create_memory 1 1075 NULL
-+_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
-+llc_mac_hdr_init_1094 llc_mac_hdr_init 0 1094 NULL nohasharray
-+inode_ref_info_1094 inode_ref_info 0 1094 &llc_mac_hdr_init_1094
-+__arch_hweight8_1105 __arch_hweight8 0 1105 NULL
-+__btrfs_cow_block_1125 __btrfs_cow_block 0 1125 NULL
-+i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
-+pfkey_xfrm_policy2msg_size_1176 pfkey_xfrm_policy2msg_size 0 1176 NULL
-+ipc_alloc_1192 ipc_alloc 1 1192 NULL
-+ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
-+i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
-+i2cdev_read_1206 i2cdev_read 3 1206 NULL
-+ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
-+acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
-+ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
-+ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
-+ffs_1322 ffs 0 1322 NULL
-+push_node_left_1327 push_node_left 0 1327 NULL
-+carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
-+btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
-+snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
-+ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
-+ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
-+sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
-+do_msgsnd_1387 do_msgsnd 4 1387 NULL
-+file_read_actor_1401 file_read_actor 4 1401 NULL
-+hci_si_event_1404 hci_si_event 3 1404 NULL
-+init_rs_internal_1436 init_rs_internal 1 1436 NULL
-+stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
-+tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
-+xprt_alloc_1475 xprt_alloc 2 1475 NULL
-+sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
-+fpregs_set_1497 fpregs_set 4 1497 NULL
-+tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
-+ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
-+ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL
-+fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
-+packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
-+i915_gem_execbuffer_wait_for_flips_1612 i915_gem_execbuffer_wait_for_flips 0 1612 NULL
-+btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
-+v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
-+bluetooth_proc_write_1630 bluetooth_proc_write 3 1630 NULL
-+btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
-+ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
-+configfs_read_file_1683 configfs_read_file 3 1683 NULL
-+pdu_write_u_1710 pdu_write_u 3 1710 NULL
-+coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
-+wl1271_rx_handle_data_1714 wl1271_rx_handle_data 3 1714 NULL
-+internal_create_group_1733 internal_create_group 0 1733 NULL
-+ieee80211_new_mesh_header_1761 ieee80211_new_mesh_header 0 1761 NULL
-+cosa_write_1774 cosa_write 3 1774 NULL
-+__nodelist_scnprintf_1815 __nodelist_scnprintf 2-0 1815 NULL
-+hidp_queue_report_1881 hidp_queue_report 3 1881 NULL
-+sb_issue_zeroout_1884 sb_issue_zeroout 0 1884 NULL
-+iwl_legacy_dbgfs_rxon_flags_read_1894 iwl_legacy_dbgfs_rxon_flags_read 3 1894 NULL
-+ext3_fiemap_1936 ext3_fiemap 4 1936 NULL
-+ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
-+ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
-+store_iwmct_log_level_fw_1974 store_iwmct_log_level_fw 4 1974 NULL
-+sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
-+xfs_trans_count_vecs_1991 xfs_trans_count_vecs 0 1991 NULL nohasharray
-+gpio_power_write_1991 gpio_power_write 3 1991 &xfs_trans_count_vecs_1991
-+__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
-+ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
-+write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
-+BcmCopySection_2035 BcmCopySection 0-5 2035 NULL
-+ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
-+ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
-+subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
-+__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL
-+diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL
-+iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
-+idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
-+audit_expand_2098 audit_expand 2-0 2098 NULL
-+iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
-+ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
-+enable_read_2117 enable_read 3 2117 NULL
-+pcf50633_write_block_2124 pcf50633_write_block 3 2124 NULL
-+check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
-+mlx4_init_icm_table_2151 mlx4_init_icm_table 5-4 2151 NULL
-+iov_iter_count_2152 iov_iter_count 0 2152 NULL
-+__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL
-+_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
-+picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL
-+u32_array_read_2219 u32_array_read 3 2219 NULL
-+vhci_write_2224 vhci_write 3 2224 NULL
-+__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL
-+ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
-+netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
-+sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
-+do_update_counters_2259 do_update_counters 4 2259 NULL
-+kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
-+picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
-+__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
-+create_subvol_2347 create_subvol 4 2347 NULL
-+zr364xx_read_2354 zr364xx_read 3 2354 NULL
-+viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
-+rose_recvmsg_2368 rose_recvmsg 4 2368 NULL
-+rts51x_read_ppbuf_2389 rts51x_read_ppbuf 3 2389 NULL
-+rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
-+isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
-+hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
-+tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
-+__sock_recvmsg_2467 __sock_recvmsg 0 2467 NULL
-+b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
-+xfrm_spdinfo_msgsize_2474 xfrm_spdinfo_msgsize 0 2474 NULL
-+fc_fcp_send_data_2479 fc_fcp_send_data 4-3 2479 NULL
-+update_pmkid_2481 update_pmkid 4 2481 NULL
-+wiphy_new_2482 wiphy_new 2 2482 NULL
-+squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
-+dm_write_2513 dm_write 3 2513 NULL
-+v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
-+ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
-+gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
-+pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
-+store_pwm1_enable_2577 store_pwm1_enable 4 2577 NULL
-+smk_write_logging_2618 smk_write_logging 3 2618 NULL
-+nlmsg_msg_size_2623 nlmsg_msg_size 0-1 2623 NULL
-+iwl4965_ucode_general_stats_read_2639 iwl4965_ucode_general_stats_read 3 2639 NULL
-+lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
-+ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL
-+oti6858_write_2692 oti6858_write 4 2692 NULL
-+memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
-+hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
-+mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
-+usbatm_pdu_length_2786 usbatm_pdu_length 0-1 2786 NULL
-+device_add_attrs_2789 device_add_attrs 0 2789 NULL
-+iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write 3 2804 NULL
-+sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
-+wait_for_avail_2847 wait_for_avail 0 2847 NULL
-+move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
-+nla_padlen_2883 nla_padlen 1 2883 NULL
-+cmm_write_2896 cmm_write 3 2896 NULL
-+count_esp_combs_2926 count_esp_combs 0 2926 NULL
-+nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
-+tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
-+ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
-+p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
-+store_cardr_2997 store_cardr 4 2997 NULL
-+depth_write_3021 depth_write 3 3021 NULL
-+snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
-+xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
-+iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 NULL nohasharray
-+lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 &iwl_dbgfs_sleep_level_override_read_3038
-+nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
-+calculate_min_size_3053 calculate_min_size 0 3053 NULL
-+__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL
-+dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
-+rb_alloc_3102 rb_alloc 1 3102 NULL
-+simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
-+fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
-+b1_get_slice_3145 b1_get_slice 0 3145 NULL
-+CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
-+compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
-+encrypted_instantiate_3168 encrypted_instantiate 3 3168 NULL
-+uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
-+compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
-+alloc_context_3194 alloc_context 1 3194 NULL nohasharray
-+ep_aio_read_3194 ep_aio_read 3 3194 &alloc_context_3194
-+codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL
-+ath6kl_mgmt_tx_3230 ath6kl_mgmt_tx 9 3230 NULL
-+btrfs_next_leaf_3232 btrfs_next_leaf 0 3232 NULL
-+kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
-+write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
-+ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
-+key_key_read_3241 key_key_read 3 3241 NULL
-+__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
-+iwl_legacy_dbgfs_traffic_log_write_3296 iwl_legacy_dbgfs_traffic_log_write 3 3296 NULL
-+arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
-+__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 4-3-0 3314 NULL
-+i915_gem_gtt_bind_object_3319 i915_gem_gtt_bind_object 0 3319 NULL
-+compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
-+de600_read_byte_3332 de600_read_byte 0 3332 NULL
-+sctp_make_init_ack_3335 sctp_make_init_ack 4 3335 NULL
-+sysfs_create_group_3339 sysfs_create_group 0 3339 NULL
-+noack_write_3343 noack_write 3 3343 NULL
-+gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
-+scnprintf_3360 scnprintf 0-2 3360 NULL
-+ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL
-+send_stream_3397 send_stream 4 3397 NULL
-+isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
-+pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
-+crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
-+pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
-+s3fb_ddc_read_3451 s3fb_ddc_read 0 3451 NULL
-+softsynth_write_3455 softsynth_write 3 3455 NULL
-+jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 NULL nohasharray
-+snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 &jffs2_acl_setxattr_3464
-+alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
-+security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
-+xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1-0 3496 NULL
-+ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 NULL
-+btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
-+b43legacy_read16_3561 b43legacy_read16 0 3561 NULL
-+get_interface_3562 get_interface 0 3562 NULL
-+alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
-+evtchn_read_3569 evtchn_read 3 3569 NULL
-+vc_resize_3585 vc_resize 3-2 3585 NULL
-+compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
-+sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
-+aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
-+cm_copy_private_data_3649 cm_copy_private_data 2 3649 NULL
-+i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
-+btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 NULL nohasharray
-+snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 &btmrvl_psmode_write_3703
-+ci_ll_write_3740 ci_ll_write 4 3740 NULL
-+ping_sendmsg_3782 ping_sendmsg 4 3782 NULL
-+sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
-+ncp_file_write_3813 ncp_file_write 3 3813 NULL
-+llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
-+read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
-+__buf_prepare_3846 __buf_prepare 0 3846 NULL
-+ubi_eba_read_leb_3847 ubi_eba_read_leb 0 3847 NULL
-+smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
-+get_fd_set_3866 get_fd_set 1 3866 NULL
-+apei_res_sub_3873 apei_res_sub 0 3873 NULL
-+garp_attr_create_3883 garp_attr_create 3 3883 NULL
-+uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
-+nvram_write_3894 nvram_write 3 3894 NULL
-+comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
-+vcs_write_3910 vcs_write 3 3910 NULL
-+pm860x_read_device_3958 pm860x_read_device 3 3958 NULL
-+i915_gem_object_get_fence_3981 i915_gem_object_get_fence 0 3981 NULL
-+do_add_counters_3992 do_add_counters 3 3992 NULL
-+userspace_status_4004 userspace_status 4 4004 NULL
-+xfs_check_block_4005 xfs_check_block 4 4005 NULL nohasharray
-+mei_write_4005 mei_write 3 4005 &xfs_check_block_4005
-+snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
-+i915_gem_object_unbind_4016 i915_gem_object_unbind 0 4016 NULL
-+blk_end_request_4024 blk_end_request 3 4024 NULL
-+ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
-+b1_get_word_4035 b1_get_word 0 4035 NULL
-+i915_gpu_idle_4062 i915_gpu_idle 0 4062 NULL
-+get_dmabuf_4065 get_dmabuf 2 4065 NULL
-+sctp_make_asconf_4078 sctp_make_asconf 3 4078 NULL
-+fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
-+ab8500_address_write_4099 ab8500_address_write 3 4099 NULL
-+tm6000_read_4151 tm6000_read 3 4151 NULL
-+mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
-+msg_bits_4158 msg_bits 0-3-4 4158 NULL
-+get_alua_req_4166 get_alua_req 3 4166 NULL
-+blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
-+read_file_bool_4180 read_file_bool 3 4180 NULL
-+f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
-+_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
-+__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
-+dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL
-+ath6kl_wmi_tcmd_test_report_rx_4314 ath6kl_wmi_tcmd_test_report_rx 3 4314 NULL
-+count_strings_4315 count_strings 0 4315 NULL
-+snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
-+lookup_string_4365 lookup_string 0 4365 NULL nohasharray
-+__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 &lookup_string_4365
-+sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
-+irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
-+cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 NULL nohasharray
-+access_process_vm_4412 access_process_vm 0 4412 &cxacru_cm_get_array_4412
-+libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
-+do_pages_stat_4437 do_pages_stat 2 4437 NULL
-+memparse_4444 memparse 0 4444 NULL
-+dn_alloc_send_pskb_4465 dn_alloc_send_pskb 2 4465 NULL
-+at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
-+recv_control_msg_4476 recv_control_msg 5 4476 NULL
-+snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL
-+sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
-+udp_sendmsg_4492 udp_sendmsg 4 4492 NULL
-+vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
-+l1oip_socket_parse_4507 l1oip_socket_parse 4 4507 NULL
-+sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
-+Read_4560 Read 0 4560 NULL
-+btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
-+bch_alloc_4593 bch_alloc 1 4593 NULL
-+rbd_create_rw_ops_4605 rbd_create_rw_ops 2 4605 NULL
-+iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
-+virtqueue_add_buf_gfp_4662 virtqueue_add_buf_gfp 4-3 4662 NULL
-+map_addr_4666 map_addr 6 4666 NULL
-+skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
-+cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
-+short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
-+kone_receive_4690 kone_receive 4 4690 NULL
-+round_pipe_size_4701 round_pipe_size 0 4701 NULL
-+cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
-+trusted_instantiate_4710 trusted_instantiate 3 4710 NULL
-+btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
-+ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
-+show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
-+ip6_ufo_append_data_4780 ip6_ufo_append_data 5-6-7 4780 NULL
-+ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
-+__iio_allocate_sw_ring_buffer_4843 __iio_allocate_sw_ring_buffer 3-2 4843 NULL
-+gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
-+key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
-+ocfs2_defrag_extent_4873 ocfs2_defrag_extent 3-2 4873 NULL
-+hid_register_field_4874 hid_register_field 2-3 4874 NULL
-+vga_arb_read_4886 vga_arb_read 3 4886 NULL
-+sys_ipc_4889 sys_ipc 3 4889 NULL
-+del_ptr_4894 del_ptr 0 4894 NULL
-+sys_process_vm_writev_4928 sys_process_vm_writev 3-5 4928 NULL
-+ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
-+devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
-+compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
-+skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
-+do_mincore_5018 do_mincore 0-2-1 5018 NULL
-+mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
-+__ip_select_ident_5046 __ip_select_ident 2 5046 NULL
-+ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 2-3 5066 NULL
-+__kmalloc_track_caller_5071 __kmalloc_track_caller 1 5071 NULL
-+snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
-+snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
-+get_random_bytes_5091 get_random_bytes 2 5091 NULL nohasharray
-+blk_rq_sectors_5091 blk_rq_sectors 0 5091 &get_random_bytes_5091 nohasharray
-+kfifo_copy_from_user_5091 kfifo_copy_from_user 3-4-0 5091 &blk_rq_sectors_5091
-+sound_write_5102 sound_write 3 5102 NULL
-+qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL
-+__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
-+iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
-+acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
-+pipe_set_size_5204 pipe_set_size 2 5204 NULL
-+ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
-+isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
-+usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL
-+r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL
-+pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
-+ad714x_i2c_read_5345 ad714x_i2c_read 4 5345 NULL
-+ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
-+cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
-+xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
-+xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
-+cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
-+tty_write_5494 tty_write 3 5494 NULL
-+tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
-+ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
-+__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
-+spidev_message_5518 spidev_message 3 5518 NULL
-+sctp_make_op_error_space_5528 sctp_make_op_error_space 3 5528 NULL
-+ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
-+brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
-+le_readq_5557 le_readq 0 5557 NULL
-+inw_5558 inw 0 5558 NULL
-+fir16_create_5574 fir16_create 3 5574 NULL
-+bioset_create_5580 bioset_create 1 5580 NULL
-+inet_sk_attr_size_5584 inet_sk_attr_size 0 5584 NULL
-+do_msgrcv_5590 do_msgrcv 4 5590 NULL
-+hidp_output_raw_report_5629 hidp_output_raw_report 3 5629 NULL
-+parse_arg_5657 parse_arg 2 5657 NULL
-+ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
-+posix_clock_register_5662 posix_clock_register 2 5662 NULL
-+get_arg_5694 get_arg 3 5694 NULL
-+vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
-+rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
-+get_packet_5747 get_packet 3 5747 NULL
-+sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
-+mlx4_alloc_resize_buf_5778 mlx4_alloc_resize_buf 3 5778 NULL
-+compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
-+__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
-+skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
-+ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
-+ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
-+ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
-+setup_req_5848 setup_req 3 5848 NULL
-+rx_q_entry_to_length_5855 rx_q_entry_to_length 0-1 5855 NULL
-+compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL
-+config_buf_5862 config_buf 0 5862 NULL
-+ext4_ext_correct_indexes_5865 ext4_ext_correct_indexes 0 5865 NULL
-+port_show_regs_5904 port_show_regs 3 5904 NULL
-+uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
-+lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
-+edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
-+tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
-+__apu_get_register_5967 __apu_get_register 0 5967 NULL
-+ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
-+device_add_attributes_6058 device_add_attributes 0 6058 NULL
-+sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL nohasharray
-+send_video_command_6073 send_video_command 4 6073 &sctp_setsockopt_connectx_6073
-+ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
-+dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
-+netfs_trans_alloc_6136 netfs_trans_alloc 2-4 6136 NULL
-+ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
-+wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
-+i915_gem_execbuffer_move_to_gpu_6197 i915_gem_execbuffer_move_to_gpu 0 6197 NULL
-+nfc_alloc_skb_6216 nfc_alloc_skb 1 6216 NULL
-+v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
-+mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
-+f_hidg_read_6238 f_hidg_read 3 6238 NULL
-+fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
-+snd_hda_override_conn_list_6282 snd_hda_override_conn_list 0 6282 NULL nohasharray
-+xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
-+iwl4965_rs_sta_dbgfs_stats_table_read_6289 iwl4965_rs_sta_dbgfs_stats_table_read 3 6289 NULL
-+set_local_name_6310 set_local_name 4 6310 NULL
-+hfa384x_inw_6329 hfa384x_inw 0 6329 NULL
-+_proc_do_string_6376 _proc_do_string 2 6376 NULL
-+osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
-+BcmFlash2xBulkRead_6395 BcmFlash2xBulkRead 0 6395 NULL
-+bt_skb_alloc_6404 bt_skb_alloc 1 6404 NULL
-+l2up_create_6430 l2up_create 3 6430 NULL
-+ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
-+__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
-+ceph_sync_write_6466 ceph_sync_write 3 6466 NULL
-+ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
-+cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
-+dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
-+ttm_get_pages_6504 ttm_get_pages 4 6504 NULL
-+mei_read_6507 mei_read 3 6507 NULL
-+read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
-+rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
-+wdm_read_6549 wdm_read 3 6549 NULL
-+fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
-+bt_skb_send_alloc_6581 bt_skb_send_alloc 2 6581 NULL
-+ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
-+dn_alloc_skb_6631 dn_alloc_skb 2 6631 NULL
-+virtscsi_alloc_tgt_6643 virtscsi_alloc_tgt 2 6643 NULL
-+process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
-+iwl_dbgfs_clear_traffic_statistics_write_6681 iwl_dbgfs_clear_traffic_statistics_write 3 6681 NULL
-+ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL
-+ieee80211_build_preq_ies_6691 ieee80211_build_preq_ies 0 6691 NULL
-+btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
-+ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
-+mpeg_read_6708 mpeg_read 3 6708 NULL
-+ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL
-+video_proc_write_6724 video_proc_write 3 6724 NULL
-+posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL nohasharray
-+rts51x_transfer_data_rcc_6725 rts51x_transfer_data_rcc 4 6725 &posix_acl_xattr_count_6725
-+rds_rdma_pages_6735 rds_rdma_pages 0 6735 NULL
-+device_queue_depth_6771 device_queue_depth 0 6771 NULL
-+kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
-+iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
-+ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
-+hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
-+lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL
-+calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
-+mon_bin_read_6841 mon_bin_read 3 6841 NULL
-+snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
-+ip_select_ident_segs_6862 ip_select_ident_segs 3 6862 NULL
-+ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 NULL nohasharray
-+raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 &ieee80211_if_fmt_path_refresh_time_6888
-+dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
-+proc_sessionid_read_6911 proc_sessionid_read 3 6911 NULL nohasharray
-+spi_show_regs_6911 spi_show_regs 3 6911 &proc_sessionid_read_6911
-+__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
-+ieee80211_rx_mgmt_probe_resp_6918 ieee80211_rx_mgmt_probe_resp 3 6918 NULL
-+ieee80211_send_probe_req_6924 ieee80211_send_probe_req 6-4 6924 NULL
-+cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
-+ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
-+qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
-+ab3100_get_register_page_interruptible_6951 ab3100_get_register_page_interruptible 4 6951 NULL
-+tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 NULL nohasharray
-+dn_ifaddr_nlmsg_size_6955 dn_ifaddr_nlmsg_size 0 6955 &tg3_nvram_write_block_unbuffered_6955
-+pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
-+request_key_async_6990 request_key_async 4 6990 NULL
-+r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
-+cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
-+tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
-+ld_usb_write_7022 ld_usb_write 3 7022 NULL
-+wimax_msg_7030 wimax_msg 4 7030 NULL
-+ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
-+snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
-+sctp_make_op_error_7057 sctp_make_op_error 6-5 7057 NULL
-+hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
-+event_enable_read_7074 event_enable_read 3 7074 NULL
-+beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
-+qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL
-+send_mpa_reject_7135 send_mpa_reject 3 7135 NULL
-+utf16_strsize_7203 utf16_strsize 0 7203 NULL nohasharray
-+__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 &utf16_strsize_7203
-+sys32_ipc_7238 sys32_ipc 3 7238 NULL
-+hdlc_loop_7255 hdlc_loop 0 7255 NULL
-+f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL
-+get_string_7302 get_string 0 7302 NULL
-+ieee80211_compatible_rates_7318 ieee80211_compatible_rates 0 7318 NULL
-+wait_on_sync_kiocb_7327 wait_on_sync_kiocb 0 7327 NULL
-+mgmt_control_7349 mgmt_control 3 7349 NULL
-+t1_get_slice_7350 t1_get_slice 0 7350 NULL
-+ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
-+hweight_long_7388 hweight_long 0-1 7388 NULL
-+sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
-+readb_7401 readb 0 7401 NULL
-+drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
-+kvm_pv_mmu_op_7436 kvm_pv_mmu_op 3-2 7436 NULL
-+ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
-+rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL
-+ms_rw_multi_sector_7459 ms_rw_multi_sector 3-4 7459 NULL
-+__mutex_lock_common_7469 __mutex_lock_common 0 7469 NULL
-+garp_request_join_7471 garp_request_join 4 7471 NULL
-+compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
-+get_stats_7483 get_stats 0 7483 NULL
-+snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
-+ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 NULL nohasharray
-+sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 &ahash_instance_headroom_7509
-+ext4_ext_insert_extent_7576 ext4_ext_insert_extent 0 7576 NULL
-+groups_alloc_7614 groups_alloc 1 7614 NULL nohasharray
-+create_dir_7614 create_dir 0 7614 &groups_alloc_7614
-+cpumask_first_7648 cpumask_first 0 7648 NULL
-+set_connectable_7649 set_connectable 4 7649 NULL
-+acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 NULL nohasharray
-+skb_copy_expand_7685 skb_copy_expand 2-3 7685 &acpi_ex_allocate_name_string_7685
-+acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
-+dev_write_7708 dev_write 3 7708 NULL
-+pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL
-+manip_pkt_7741 manip_pkt 3 7741 NULL
-+vxge_device_register_7752 vxge_device_register 4 7752 NULL
-+pohmelfs_path_length_7758 pohmelfs_path_length 0 7758 NULL
-+osdv2_attr_list_elem_size_7763 osdv2_attr_list_elem_size 0-1 7763 NULL
-+ubi_io_read_vid_hdr_7766 ubi_io_read_vid_hdr 0 7766 NULL
-+paths_from_inode_7774 paths_from_inode 0 7774 NULL
-+alloc_candev_7776 alloc_candev 1-2 7776 NULL
-+dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
-+bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
-+diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
-+config_desc_7878 config_desc 0 7878 NULL
-+dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
-+xd_read_data_from_ppb_7897 xd_read_data_from_ppb 4 7897 NULL
-+xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
-+gfs2_tune_get_i_7903 gfs2_tune_get_i 0 7903 NULL
-+libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
-+do_surface_dirty_sou_7920 do_surface_dirty_sou 7 7920 NULL
-+f_hidg_write_7932 f_hidg_write 3 7932 NULL
-+smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
-+sys_mbind_7990 sys_mbind 5 7990 NULL
-+sep_lock_user_pages_8000 sep_lock_user_pages 2-3 8000 NULL
-+vcs_read_8017 vcs_read 3 8017 NULL
-+normalize_up_8037 normalize_up 0-2-1 8037 NULL
-+vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
-+iser_rcv_completion_8048 iser_rcv_completion 2 8048 NULL
-+ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
-+leb_read_lock_8070 leb_read_lock 0 8070 NULL
-+ext4_ext_map_blocks_8078 ext4_ext_map_blocks 0 8078 NULL
-+venus_lookup_8121 venus_lookup 4 8121 NULL
-+ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
-+CalcCalPLL_8136 CalcCalPLL 0 8136 NULL
-+ext_sd_execute_write_data_8175 ext_sd_execute_write_data 9-11 8175 NULL
-+__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
-+ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
-+__nf_nat_mangle_tcp_packet_8190 __nf_nat_mangle_tcp_packet 5-7 8190 NULL
-+recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
-+rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
-+ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
-+play_iframe_8219 play_iframe 3 8219 NULL
-+sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
-+check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
-+add_rx_skb_8257 add_rx_skb 3 8257 NULL
-+t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
-+init_cdev_8274 init_cdev 1 8274 NULL
-+qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
-+construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
-+ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
-+__c4iw_init_resource_fifo_8334 __c4iw_init_resource_fifo 3 8334 NULL
-+tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
-+ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
-+ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL
-+xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
-+zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
-+uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 NULL
-+snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
-+generic_bin_search_8440 generic_bin_search 0 8440 NULL
-+afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
-+fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL
-+dev_config_8506 dev_config 3 8506 NULL
-+ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
-+utf16_strnlen_8513 utf16_strnlen 0 8513 NULL
-+snd_malloc_sgbuf_pages_8532 snd_malloc_sgbuf_pages 2 8532 NULL
-+ocfs2_read_virt_blocks_8538 ocfs2_read_virt_blocks 2-3 8538 NULL
-+profile_remove_8556 profile_remove 3 8556 NULL
-+cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
-+isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
-+tower_write_8580 tower_write 3 8580 NULL
-+rtllib_MFIE_rate_len_8606 rtllib_MFIE_rate_len 0 8606 NULL
-+shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
-+it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
-+scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
-+fuse_send_write_pages_8636 fuse_send_write_pages 0 8636 NULL
-+nf_nat_mangle_tcp_packet_8643 nf_nat_mangle_tcp_packet 5-7 8643 NULL
-+generic_acl_set_8658 generic_acl_set 4 8658 NULL
-+ath6kl_tm_rx_report_event_8660 ath6kl_tm_rx_report_event 3 8660 NULL
-+lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
-+skb_frag_size_8695 skb_frag_size 0 8695 NULL
-+arcfb_write_8702 arcfb_write 3 8702 NULL
-+i_size_read_8703 i_size_read 0 8703 NULL nohasharray
-+init_header_8703 init_header 0 8703 &i_size_read_8703
-+cifs_writedata_alloc_8710 cifs_writedata_alloc 1 8710 NULL
-+ctrl_out_8712 ctrl_out 3-5 8712 NULL
-+tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL
-+jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
-+em28xx_init_isoc_8755 em28xx_init_isoc 3-2-4-0 8755 NULL
-+yurex_write_8761 yurex_write 3 8761 NULL
-+joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
-+kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL
-+__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
-+cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
-+intel_ring_begin_8808 intel_ring_begin 0 8808 NULL
-+metronomefb_write_8823 metronomefb_write 3 8823 NULL
-+get_queue_depth_8833 get_queue_depth 0 8833 NULL
-+dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
-+usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
-+wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
-+iwl_dbgfs_debug_level_write_8871 iwl_dbgfs_debug_level_write 3 8871 NULL
-+compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
-+ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray
-+tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890
-+sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
-+sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL
-+write_file_ani_8918 write_file_ani 3 8918 NULL
-+layout_commit_8926 layout_commit 3 8926 NULL
-+adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
-+driver_stats_read_8944 driver_stats_read 3 8944 NULL
-+read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
-+usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
-+qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
-+venus_mkdir_8967 venus_mkdir 4 8967 NULL
-+seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray
-+vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968
-+bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
-+snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
-+fd_ioctl_9028 fd_ioctl 3 9028 NULL
-+nla_put_9042 nla_put 3 9042 NULL
-+snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
-+snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
-+fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
-+create_queues_9088 create_queues 2-3 9088 NULL
-+ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
-+caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL
-+pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
-+brcmf_sdbrcm_send_buf_9129 brcmf_sdbrcm_send_buf 6 9129 NULL
-+apei_resources_merge_9149 apei_resources_merge 0 9149 NULL
-+dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
-+isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
-+altera_swap_ir_9194 altera_swap_ir 2 9194 NULL nohasharray
-+alloc_group_attrs_9194 alloc_group_attrs 2 9194 &altera_swap_ir_9194
-+sep_prepare_input_output_dma_table_9200 sep_prepare_input_output_dma_table 4-3-2 9200 NULL
-+snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
-+l2cap_create_connless_pdu_9222 l2cap_create_connless_pdu 3 9222 NULL
-+sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
-+ext4_mark_iloc_dirty_9239 ext4_mark_iloc_dirty 0 9239 NULL
-+schedule_erase_9240 schedule_erase 0 9240 NULL
-+cmtp_add_msgpart_9252 cmtp_add_msgpart 4 9252 NULL
-+ocfs2_clear_ext_refcount_9256 ocfs2_clear_ext_refcount 4 9256 NULL
-+tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
-+btrfs_search_slot_9264 btrfs_search_slot 0 9264 NULL
-+sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
-+hdpvr_read_9273 hdpvr_read 3 9273 NULL
-+iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
-+ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
-+sk_rmem_schedule_9331 sk_rmem_schedule 2 9331 NULL
-+ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
-+get_request_type_9393 get_request_type 0 9393 NULL
-+read_9397 read 3 9397 NULL
-+set_gpio_9412 set_gpio 0 9412 NULL
-+bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
-+ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
-+kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
-+ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
-+mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
-+ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
-+agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
-+rbd_coll_end_req_9472 rbd_coll_end_req 3 9472 NULL
-+__alloc_preds_9492 __alloc_preds 2 9492 NULL
-+sock_recvmsg_9500 sock_recvmsg 0 9500 NULL
-+lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL
-+lp_write_9511 lp_write 3 9511 NULL
-+mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL
-+scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
-+read_file_dma_9530 read_file_dma 3 9530 NULL
-+nlmsg_parse_9536 nlmsg_parse 2 9536 NULL
-+pohmelfs_send_readpages_9537 pohmelfs_send_readpages 3 9537 NULL
-+audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
-+fw_node_create_9559 fw_node_create 2 9559 NULL
-+kobj_map_9566 kobj_map 2-3 9566 NULL
-+biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
-+ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
-+do_sync_9604 do_sync 1 9604 NULL
-+snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
-+saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
-+acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL
-+compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
-+ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
-+queue_received_packet_9657 queue_received_packet 5 9657 NULL
-+snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
-+dns_query_9676 dns_query 3-0 9676 NULL nohasharray
-+ks8842_read16_9676 ks8842_read16 0 9676 &dns_query_9676
-+qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
-+__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
-+is_hole_9694 is_hole 2 9694 NULL
-+vx_transfer_end_9701 vx_transfer_end 0 9701 NULL
-+ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 NULL
-+ddb_input_read_9743 ddb_input_read 3-0 9743 NULL
-+do_sigpending_9766 do_sigpending 2 9766 NULL
-+__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
-+snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
-+parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL
-+tpm_data_in_9802 tpm_data_in 0 9802 NULL
-+ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
-+ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 NULL nohasharray
-+udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 &ieee80211_if_read_state_9813
-+cfg80211_send_deauth_9862 cfg80211_send_deauth 3 9862 NULL
-+get_blk_table_len_9863 get_blk_table_len 0 9863 NULL
-+pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
-+snd_midi_event_new_9893 snd_midi_event_new 1 9893 NULL nohasharray
-+bm_register_write_9893 bm_register_write 3 9893 &snd_midi_event_new_9893
-+snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 3-5 9895 NULL
-+iwm_rx_packet_alloc_9898 iwm_rx_packet_alloc 3 9898 NULL
-+receive_DataRequest_9904 receive_DataRequest 3 9904 NULL
-+ext4_map_blocks_9916 ext4_map_blocks 0 9916 NULL
-+root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL
-+read_file_misc_9948 read_file_misc 3 9948 NULL
-+set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
-+csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
-+btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
-+gameport_read_9983 gameport_read 0 9983 NULL
-+nfs_readdata_alloc_9990 nfs_readdata_alloc 1 9990 NULL
-+kovaplus_send_10009 kovaplus_send 4 10009 NULL
-+aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
-+handle_request_10024 handle_request 9 10024 NULL
-+rbd_coll_end_req_index_10041 rbd_coll_end_req_index 5 10041 NULL
-+userpolicy_type_attrsize_10067 userpolicy_type_attrsize 0 10067 NULL
-+cifs_llseek_10091 cifs_llseek 2 10091 NULL
-+get_elem_size_10110 get_elem_size 0-2 10110 NULL
-+aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
-+rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
-+asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
-+kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL
-+proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
-+jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
-+cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
-+snd_pcm_lib_preallocate_pages1_10273 snd_pcm_lib_preallocate_pages1 2 10273 NULL
-+snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
-+read_emulate_10310 read_emulate 2-4 10310 NULL
-+ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
-+tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
-+em28xx_read_reg_req_len_10340 em28xx_read_reg_req_len 0 10340 NULL
-+ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
-+whci_add_cap_10350 whci_add_cap 0 10350 NULL
-+dbAllocAny_10354 dbAllocAny 0 10354 NULL
-+ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
-+sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
-+ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
-+do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
-+event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
-+qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
-+rbd_get_segment_10511 rbd_get_segment 0-3-4 10511 NULL nohasharray
-+sel_write_disable_10511 sel_write_disable 3 10511 &rbd_get_segment_10511
-+osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
-+rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
-+ocfs2_add_refcounted_extent_10526 ocfs2_add_refcounted_extent 6 10526 NULL
-+snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 NULL nohasharray
-+kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 &snd_pcm_lib_read_10536
-+bcm_ioctl_fw_download_10548 bcm_ioctl_fw_download 0 10548 NULL
-+i915_write_fence_reg_10551 i915_write_fence_reg 0 10551 NULL
-+otp_read_10594 otp_read 2-4-5 10594 NULL
-+supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
-+ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
-+cxgb3_get_cpl_reply_skb_10620 cxgb3_get_cpl_reply_skb 2 10620 NULL
-+write_file_rx_chainmask_10636 write_file_rx_chainmask 3 10636 NULL
-+__qbuf_mmap_10642 __qbuf_mmap 0 10642 NULL
-+br_nlmsg_size_10645 br_nlmsg_size 0 10645 NULL
-+ubi_io_write_vid_hdr_10660 ubi_io_write_vid_hdr 0 10660 NULL
-+efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL
-+ni65_alloc_mem_10664 ni65_alloc_mem 3 10664 NULL
-+parport_write_10669 parport_write 0 10669 NULL
-+tcp_push_10680 tcp_push 3 10680 NULL
-+edge_write_10692 edge_write 4 10692 NULL
-+selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL nohasharray
-+inl_10708 inl 0 10708 &selinux_inode_setxattr_10708
-+shash_async_setkey_10720 shash_async_setkey 3 10720 NULL nohasharray
-+pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 &shash_async_setkey_10720
-+__iscsi_complete_pdu_10726 __iscsi_complete_pdu 4 10726 NULL
-+spi_sync_10731 spi_sync 0 10731 NULL
-+sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray
-+apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737
-+compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
-+ttm_ref_object_add_10748 ttm_ref_object_add 0 10748 NULL
-+vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
-+kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
-+posix_acl_to_xattr_10767 posix_acl_to_xattr 0 10767 NULL
-+loopback_bytepos_update_10776 loopback_bytepos_update 2 10776 NULL
-+i915_gem_wait_for_error_10791 i915_gem_wait_for_error 0 10791 NULL
-+sys_bind_10799 sys_bind 3 10799 NULL
-+diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL
-+send_command_10832 send_command 4 10832 NULL
-+lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
-+fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
-+ol_chunk_blocks_10864 ol_chunk_blocks 0 10864 NULL
-+snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
-+drm_ht_insert_item_10877 drm_ht_insert_item 0 10877 NULL
-+get_scq_10897 get_scq 2 10897 NULL
-+cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
-+tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
-+__copy_from_user_10918 __copy_from_user 3-0 10918 NULL
-+kobject_add_10919 kobject_add 0 10919 NULL
-+iwl_calib_set_10944 iwl_calib_set 3 10944 NULL
-+bm_entry_read_10976 bm_entry_read 3 10976 NULL
-+sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
-+xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
-+tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
-+carl9170_handle_mpdu_11056 carl9170_handle_mpdu 3 11056 NULL
-+tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
-+count_argc_11083 count_argc 0 11083 NULL
-+kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
-+tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
-+page_offset_11120 page_offset 0 11120 NULL
-+tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
-+alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
-+ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL nohasharray
-+snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 3-4 11172 &ioat2_alloc_ring_11172
-+__swab16p_11220 __swab16p 0 11220 NULL
-+hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
-+ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
-+construct_key_11329 construct_key 3 11329 NULL nohasharray
-+__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
-+next_segment_11330 next_segment 0-2-1 11330 NULL
-+i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
-+sel_write_create_11353 sel_write_create 3 11353 NULL
-+drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
-+qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
-+dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
-+___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
-+str_to_user_11411 str_to_user 2 11411 NULL
-+trace_options_read_11419 trace_options_read 3 11419 NULL
-+xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
-+bttv_read_11432 bttv_read 3 11432 NULL
-+pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray
-+sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479
-+sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
-+kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
-+skb_cow_data_11565 skb_cow_data 0-2 11565 NULL
-+mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
-+lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL nohasharray
-+rts51x_write_ppbuf_11576 rts51x_write_ppbuf 3 11576 &lpfc_idiag_ctlacc_write_11576
-+oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
-+snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
-+fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
-+hycapi_rx_capipkt_11602 hycapi_rx_capipkt 3 11602 NULL
-+sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
-+nla_total_size_11658 nla_total_size 0-1 11658 NULL
-+ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
-+btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
-+iwm_ntf_calib_res_11686 iwm_ntf_calib_res 3 11686 NULL
-+sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
-+split_11691 split 2 11691 NULL
-+snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
-+blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
-+i2c_master_recv_11734 i2c_master_recv 0-3 11734 NULL
-+tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
-+nfsd4_get_drc_mem_11748 nfsd4_get_drc_mem 0-1-2 11748 NULL
-+iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
-+rd_regl_11767 rd_regl 0 11767 NULL
-+ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
-+pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
-+umc_device_register_11824 umc_device_register 0 11824 NULL
-+zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
-+sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
-+rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
-+shmem_xattr_set_11843 shmem_xattr_set 4 11843 NULL
-+unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
-+ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
-+l2cap_chan_send_11878 l2cap_chan_send 3 11878 NULL
-+_l2_alloc_skb_11883 _l2_alloc_skb 1 11883 NULL
-+xstateregs_get_11906 xstateregs_get 4 11906 NULL
-+ti_write_11916 ti_write 4 11916 NULL
-+kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
-+fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
-+bitmap_remap_11929 bitmap_remap 5 11929 NULL
-+atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
-+dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
-+kvm_set_msr_common_11953 kvm_set_msr_common 3 11953 NULL
-+f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
-+split_node_11976 split_node 0 11976 NULL
-+BeceemFlashBulkRead_11979 BeceemFlashBulkRead 0 11979 NULL
-+atmel_read16_11981 atmel_read16 0 11981 NULL
-+ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
-+pyra_send_12061 pyra_send 4 12061 NULL
-+ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
-+i915_gem_object_pin_12083 i915_gem_object_pin 0 12083 NULL
-+alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
-+xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
-+rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
-+generic_file_llseek_12139 generic_file_llseek 2 12139 NULL
-+iwl4965_ucode_tx_stats_read_12143 iwl4965_ucode_tx_stats_read 3 12143 NULL
-+rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
-+btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
-+vmbus_open_12154 vmbus_open 2-3 12154 NULL
-+tt_update_changes_12155 tt_update_changes 3 12155 NULL
-+ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
-+compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
-+ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
-+pair_device_12188 pair_device 4 12188 NULL
-+qt2160_read_block_12198 qt2160_read_block 4 12198 NULL
-+bl_is_sector_init_12199 bl_is_sector_init 2 12199 NULL
-+receive_copy_12216 receive_copy 3 12216 NULL
-+snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
-+aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 NULL
-+ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
-+ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
-+shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
-+add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
-+roccat_common_send_12284 roccat_common_send 4 12284 NULL
-+note_last_dentry_12285 note_last_dentry 3 12285 NULL
-+roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL
-+iwm_notif_send_12295 iwm_notif_send 6 12295 NULL
-+__einj_error_trigger_12304 __einj_error_trigger 0 12304 NULL
-+bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
-+alloc_trace_probe_12323 alloc_trace_probe 6 12323 NULL
-+tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
-+pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
-+mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
-+receive_packet_12367 receive_packet 2 12367 NULL
-+xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
-+btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL nohasharray
-+populate_dir_12391 populate_dir 0 12391 &btrfs_file_extent_ram_bytes_12391
-+gfs2_llseek_12464 gfs2_llseek 2 12464 NULL
-+skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
-+x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
-+rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
-+nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
-+qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
-+iwl_legacy_dbgfs_rx_statistics_read_12545 iwl_legacy_dbgfs_rx_statistics_read 3 12545 NULL
-+WriteRegs_12569 WriteRegs 0 12569 NULL
-+ceph_osdc_wait_request_12572 ceph_osdc_wait_request 0 12572 NULL
-+hvc_alloc_12579 hvc_alloc 4 12579 NULL
-+pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
-+vhci_put_user_12604 vhci_put_user 4 12604 NULL
-+fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
-+pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
-+nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
-+ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
-+trusted_update_12664 trusted_update 3 12664 NULL
-+sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
-+sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
-+ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
-+inet6_prefix_nlmsg_size_12722 inet6_prefix_nlmsg_size 0 12722 NULL
-+key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
-+ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
-+listxattr_12769 listxattr 3 12769 NULL
-+sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
-+ip_ufo_append_data_12775 ip_ufo_append_data 6-7-8 12775 NULL
-+platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
-+scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
-+xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
-+TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
-+spidev_sync_12842 spidev_sync 0 12842 NULL
-+spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
-+get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
-+get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
-+rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
-+free_tind_blocks_12926 free_tind_blocks 0 12926 NULL
-+iwl_legacy_dbgfs_sram_write_12932 iwl_legacy_dbgfs_sram_write 3 12932 NULL
-+do_inode_permission_12946 do_inode_permission 0 12946 NULL
-+bcsp_prepare_pkt_12961 bcsp_prepare_pkt 3 12961 NULL
-+bm_status_write_12964 bm_status_write 3 12964 NULL
-+sctp_make_chunk_12986 sctp_make_chunk 4 12986 NULL
-+TransmitTcb_12989 TransmitTcb 4 12989 NULL
-+__get_extent_inline_ref_13021 __get_extent_inline_ref 0 13021 NULL
-+subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
-+generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
-+ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
-+ctnetlink_timestamp_size_13060 ctnetlink_timestamp_size 0 13060 NULL nohasharray
-+__dn_setsockopt_13060 __dn_setsockopt 5 13060 &ctnetlink_timestamp_size_13060
-+sandybridge_write_fence_reg_13080 sandybridge_write_fence_reg 0 13080 NULL
-+xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
-+blk_rq_map_sg_13092 blk_rq_map_sg 0 13092 NULL
-+snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
-+snd_pcm_lib_preallocate_pages_for_all_13112 snd_pcm_lib_preallocate_pages_for_all 4 13112 NULL
-+bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
-+ip_make_skb_13129 ip_make_skb 5-6 13129 NULL
-+blk_update_request_13146 blk_update_request 3 13146 NULL
-+caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
-+pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
-+comedi_read_13199 comedi_read 3 13199 NULL
-+mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
-+svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
-+wait_events_13243 wait_events 0 13243 NULL
-+asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
-+snd_emu10k1_fx8010_tram_setup_13248 snd_emu10k1_fx8010_tram_setup 2 13248 NULL
-+fw_download_code_13249 fw_download_code 3 13249 NULL
-+init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
-+hostap_80211_get_hdrlen_13255 hostap_80211_get_hdrlen 0 13255 NULL
-+bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
-+simple_attr_write_13260 simple_attr_write 3 13260 NULL
-+smctr_process_rx_packet_13270 smctr_process_rx_packet 2 13270 NULL
-+carl9170_rx_13272 carl9170_rx 3 13272 NULL
-+pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
-+lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
-+platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
-+nf_nat_mangle_udp_packet_13321 nf_nat_mangle_udp_packet 5-7 13321 NULL
-+us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
-+kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
-+iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
-+wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
-+BcmSetActiveSection_13389 BcmSetActiveSection 0 13389 NULL
-+sky2_receive_13407 sky2_receive 2 13407 NULL
-+encrypted_update_13414 encrypted_update 3 13414 NULL
-+netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL nohasharray
-+i915_gem_execbuffer_sync_rings_13417 i915_gem_execbuffer_sync_rings 0 13417 &netxen_alloc_sds_rings_13417
-+keyring_read_13438 keyring_read 3 13438 NULL
-+sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL
-+ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 7-8-9 13443 NULL
-+ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 2 13512 NULL
-+core_status_13515 core_status 4 13515 NULL
-+sctp_tsnmap_mark_13527 sctp_tsnmap_mark 2 13527 NULL
-+bm_init_13529 bm_init 2 13529 NULL
-+usb_hcd_link_urb_to_ep_13560 usb_hcd_link_urb_to_ep 0 13560 NULL
-+read_file_antenna_13574 read_file_antenna 3 13574 NULL
-+cache_write_13589 cache_write 3 13589 NULL
-+mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
-+irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 NULL
-+Rd_Indx_13602 Rd_Indx 3-2 13602 NULL
-+wm8994_bulk_write_13615 wm8994_bulk_write 3 13615 NULL
-+pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
-+iio_device_add_event_sysfs_13627 iio_device_add_event_sysfs 0 13627 NULL
-+packet_snd_13634 packet_snd 3 13634 NULL
-+__qbuf_userptr_13636 __qbuf_userptr 0 13636 NULL
-+blk_msg_write_13655 blk_msg_write 3 13655 NULL
-+nfs_idmap_lookup_id_13665 nfs_idmap_lookup_id 2 13665 NULL
-+cache_downcall_13666 cache_downcall 3 13666 NULL
-+ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL
-+usb_get_string_13693 usb_get_string 0 13693 NULL
-+cfg80211_testmode_alloc_event_skb_13739 cfg80211_testmode_alloc_event_skb 2 13739 NULL
-+audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
-+fb_sys_read_13778 fb_sys_read 3 13778 NULL
-+CalcMainPLL_13811 CalcMainPLL 0 13811 NULL
-+bat_ogm_aggregate_new_13813 bat_ogm_aggregate_new 2 13813 NULL
-+random_read_13815 random_read 3 13815 NULL
-+mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL
-+mtd_do_readoob_13850 mtd_do_readoob 4 13850 NULL
-+evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
-+compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL
-+snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
-+qla2x00_get_ctx_sp_13912 qla2x00_get_ctx_sp 3 13912 NULL
-+ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
-+ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 NULL
-+iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
-+ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
-+lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
-+snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
-+qcam_read_13977 qcam_read 3 13977 NULL
-+dsp_read_13980 dsp_read 2 13980 NULL
-+dvb_demux_read_13981 dvb_demux_read 3 13981 NULL
-+ieee80211_bss_info_update_13991 ieee80211_bss_info_update 4 13991 NULL
-+create_files_14003 create_files 0 14003 NULL
-+sddr09_write_data_14014 sddr09_write_data 3 14014 NULL
-+btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
-+_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
-+dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
-+read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
-+ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
-+utf8_to_utf16le_14057 utf8_to_utf16le 0 14057 NULL
-+sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
-+do_tcp_sendpages_14083 do_tcp_sendpages 3-4 14083 NULL
-+do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
-+compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
-+nlmsg_len_14115 nlmsg_len 0 14115 NULL
-+gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
-+print_input_mask_14168 print_input_mask 3-0 14168 NULL
-+ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
-+datafab_read_data_14186 datafab_read_data 4 14186 NULL
-+tcp_manip_pkt_14202 tcp_manip_pkt 2 14202 NULL
-+alloc_async_14208 alloc_async 1 14208 NULL
-+ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL
-+sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
-+dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL
-+snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 4 14245 NULL
-+ext4_journal_restart_14251 ext4_journal_restart 0 14251 NULL
-+ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
-+add_numbered_child_14273 add_numbered_child 5 14273 NULL
-+OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
-+em28xx_i2c_eeprom_14280 em28xx_i2c_eeprom 3 14280 NULL
-+snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
-+audit_send_reply_14292 audit_send_reply 7 14292 NULL
-+rr_status_14293 rr_status 5 14293 NULL
-+read_default_ldt_14302 read_default_ldt 2 14302 NULL
-+i915_gem_object_finish_gpu_14312 i915_gem_object_finish_gpu 0 14312 NULL
-+oo_objects_14319 oo_objects 0 14319 NULL
-+iwl_legacy_dbgfs_interrupt_read_14324 iwl_legacy_dbgfs_interrupt_read 3 14324 NULL
-+p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
-+ep_aio_rwtail_14347 ep_aio_rwtail 6 14347 NULL
-+snd_pcm_lib_readv_14363 snd_pcm_lib_readv 3-0 14363 NULL
-+ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
-+smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
-+mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
-+get_kcore_size_14425 get_kcore_size 0 14425 NULL
-+block_size_14443 block_size 0 14443 NULL
-+snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
-+udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
-+ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
-+cmd_complete_14502 cmd_complete 5 14502 NULL
-+ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
-+dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 NULL nohasharray
-+ep0_write_14536 ep0_write 3 14536 &dataflash_read_user_otp_14536 nohasharray
-+prepare_data_14536 prepare_data 3 14536 &ep0_write_14536
-+l2cap_send_cmd_14548 l2cap_send_cmd 4 14548 NULL
-+picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
-+nfqnl_mangle_14583 nfqnl_mangle 2 14583 NULL
-+idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
-+dbJoin_14644 dbJoin 0 14644 NULL
-+profile_replace_14652 profile_replace 3 14652 NULL
-+min_bytes_needed_14675 min_bytes_needed 0 14675 NULL
-+ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
-+u_audio_playback_14709 u_audio_playback 3 14709 NULL
-+vfd_write_14717 vfd_write 3 14717 NULL
-+__blk_end_request_14729 __blk_end_request 3 14729 NULL
-+rh_urb_enqueue_14733 rh_urb_enqueue 0 14733 NULL
-+store_camera_14751 store_camera 4 14751 NULL
-+sta_dev_read_14782 sta_dev_read 3 14782 NULL
-+keys_proc_write_14792 keys_proc_write 3 14792 NULL nohasharray
-+cp_tm1217_read_14792 cp_tm1217_read 3 14792 &keys_proc_write_14792
-+ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
-+__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
-+nfs_parse_server_name_14800 nfs_parse_server_name 2 14800 NULL
-+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 NULL nohasharray
-+hpet_readl_14801 hpet_readl 0 14801 &snd_als300_gcr_read_14801
-+__i2400ms_rx_get_size_14826 __i2400ms_rx_get_size 0 14826 NULL
-+__mutex_fastpath_lock_retval_14844 __mutex_fastpath_lock_retval 0 14844 NULL
-+__krealloc_14857 __krealloc 2 14857 NULL nohasharray
-+lcd_write_14857 lcd_write 3 14857 &__krealloc_14857
-+get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
-+acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
-+krealloc_14908 krealloc 2 14908 NULL
-+__arch_hweight64_14923 __arch_hweight64 0 14923 NULL
-+store_sys_wmi_14934 store_sys_wmi 4 14934 NULL
-+ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
-+queue_cnt_14951 queue_cnt 0 14951 NULL
-+unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
-+videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
-+help_14971 help 4 14971 NULL
-+mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
-+setkey_14987 setkey 3 14987 NULL
-+store_touchpad_15003 store_touchpad 4 15003 NULL
-+blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
-+store_lslvl_15059 store_lslvl 4 15059 NULL
-+nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
-+ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL
-+pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
-+hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
-+start_port_15124 start_port 0 15124 NULL
-+ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
-+iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
-+bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
-+nlmsg_total_size_15230 nlmsg_total_size 0-1 15230 NULL
-+variax_alloc_sysex_buffer_15237 variax_alloc_sysex_buffer 3 15237 NULL
-+iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL
-+simple_strtol_15273 simple_strtol 0 15273 NULL
-+fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL
-+sys_connect_15291 sys_connect 3 15291 NULL
-+fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
-+__ocfs2_remove_xattr_range_15330 __ocfs2_remove_xattr_range 4-3-5 15330 NULL
-+ioread16_15342 ioread16 0 15342 NULL
-+alloc_ring_15345 alloc_ring 2-4 15345 NULL
-+acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
-+compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
-+alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
-+get_modalias_15406 get_modalias 2 15406 NULL
-+__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL
-+tcp_mtu_to_mss_15438 tcp_mtu_to_mss 0-2 15438 NULL
-+hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
-+iwl_legacy_dbgfs_wd_timeout_write_15478 iwl_legacy_dbgfs_wd_timeout_write 3 15478 NULL
-+zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
-+ifx_spi_write_15531 ifx_spi_write 3 15531 NULL
-+p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
-+ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
-+xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL
-+mlx4_buf_alloc_15572 mlx4_buf_alloc 2 15572 NULL
-+persistent_status_15574 persistent_status 4 15574 NULL
-+bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
-+vme_user_write_15587 vme_user_write 3 15587 NULL
-+ocfs2_truncate_rec_15595 ocfs2_truncate_rec 7 15595 NULL
-+get_event_length_15598 get_event_length 0 15598 NULL
-+compat_fillonedir_15620 compat_fillonedir 3 15620 NULL
-+dsp_cmx_send_member_15625 dsp_cmx_send_member 2 15625 NULL
-+proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
-+tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL
-+joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL
-+xsd_read_15653 xsd_read 3 15653 NULL
-+unix_bind_15668 unix_bind 3 15668 NULL
-+dm_read_15674 dm_read 3 15674 NULL
-+i915_gem_object_set_to_cpu_domain_15705 i915_gem_object_set_to_cpu_domain 0 15705 NULL
-+inet6_if_nlmsg_size_15711 inet6_if_nlmsg_size 0 15711 NULL
-+ocfs2_split_tree_15716 ocfs2_split_tree 5 15716 NULL
-+HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
-+sk_wmem_schedule_15759 sk_wmem_schedule 2 15759 NULL
-+smk_read_direct_15803 smk_read_direct 3 15803 NULL
-+gnttab_expand_15817 gnttab_expand 1 15817 NULL
-+afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
-+table_size_15851 table_size 0-1-2 15851 NULL
-+ubi_io_write_15870 ubi_io_write 0 15870 NULL nohasharray
-+media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
-+ddr_init_15874 ddr_init 0 15874 NULL
-+__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
-+nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
-+native_read_msr_15905 native_read_msr 0 15905 NULL
-+parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
-+power_read_15939 power_read 3 15939 NULL
-+lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL nohasharray
-+i2c_write_15948 i2c_write 0 15948 &lpfc_idiag_drbacc_read_15948
-+snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL
-+calculate_max_size_15977 calculate_max_size 0 15977 NULL
-+get_entry_16003 get_entry 4 16003 NULL
-+viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
-+got_frame_16028 got_frame 2 16028 NULL
-+dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
-+snd_sgbuf_aligned_pages_16063 snd_sgbuf_aligned_pages 0-1 16063 NULL
-+isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
-+isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 NULL nohasharray
-+dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 &isr_hw_pm_mode_changes_read_16110
-+rd_mem_16117 rd_mem 0 16117 NULL
-+snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
-+compat_sys_select_16131 compat_sys_select 1 16131 NULL
-+fsm_init_16134 fsm_init 2 16134 NULL
-+hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
-+ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
-+cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
-+create_table_16213 create_table 2 16213 NULL
-+atomic_read_file_16227 atomic_read_file 3 16227 NULL
-+BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
-+btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL
-+mark_written_sectors_16262 mark_written_sectors 2 16262 NULL
-+reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
-+ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
-+nand_bch_init_16280 nand_bch_init 3-2 16280 NULL nohasharray
-+drbd_setsockopt_16280 drbd_setsockopt 5 16280 &nand_bch_init_16280
-+account_16283 account 0-4-2 16283 NULL
-+jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL
-+stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
-+rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
-+sst_allocate_decode_buf_16349 sst_allocate_decode_buf 3 16349 NULL
-+total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
-+iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
-+nl80211_send_unprot_deauth_16378 nl80211_send_unprot_deauth 4 16378 NULL
-+scsi_nl_send_vendor_msg_16394 scsi_nl_send_vendor_msg 5 16394 NULL
-+alloc_trdev_16399 alloc_trdev 1 16399 NULL
-+ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
-+rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
-+calculate_inocache_hashsize_16449 calculate_inocache_hashsize 0-1 16449 NULL
-+netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
-+sock_wmalloc_16472 sock_wmalloc 2 16472 NULL
-+ab8500_val_write_16473 ab8500_val_write 3 16473 NULL
-+tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
-+start_this_handle_16519 start_this_handle 0 16519 NULL
-+snd_interval_max_16529 snd_interval_max 0 16529 NULL
-+lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
-+agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
-+__cfg80211_testmode_alloc_skb_16611 __cfg80211_testmode_alloc_skb 2 16611 NULL
-+packet_recv_error_16669 packet_recv_error 3 16669 NULL
-+dlm_new_lockspace_16688 dlm_new_lockspace 2 16688 NULL
-+calc_layout_16690 calc_layout 4 16690 NULL
-+em28xx_v4l2_read_16701 em28xx_v4l2_read 3 16701 NULL
-+iscsi_recv_pdu_16755 iscsi_recv_pdu 4 16755 NULL
-+arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
-+blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
-+i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
-+get_server_iovec_16804 get_server_iovec 2 16804 NULL
-+tipc_send2name_16809 tipc_send2name 6 16809 NULL
-+drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL nohasharray
-+mled_proc_write_16831 mled_proc_write 3 16831 &drm_malloc_ab_16831
-+scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
-+hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
-+alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
-+carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
-+st_write_16874 st_write 3 16874 NULL
-+__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
-+ext4_ext_zeroout_16895 ext4_ext_zeroout 0 16895 NULL
-+mwifiex_update_curr_bss_params_16908 mwifiex_update_curr_bss_params 5 16908 NULL
-+ivtv_v4l2_ioctl_16915 ivtv_v4l2_ioctl 2 16915 NULL
-+snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 NULL nohasharray
-+psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 &snd_gf1_mem_proc_dump_16926
-+paranoid_check_vid_hdr_16932 paranoid_check_vid_hdr 0 16932 NULL
-+ip_append_data_16942 ip_append_data 5-6 16942 NULL
-+_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
-+squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
-+cfg80211_send_unprot_disassoc_16951 cfg80211_send_unprot_disassoc 3 16951 NULL
-+wrm_16966 wrm 0 16966 NULL
-+keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
-+ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
-+copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL nohasharray
-+iwm_if_alloc_17027 iwm_if_alloc 1 17027 &copy_counters_to_user_17027
-+jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
-+__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
-+sddr55_read_data_17072 sddr55_read_data 4 17072 NULL
-+dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
-+simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
-+carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
-+entry_length_17093 entry_length 0 17093 NULL
-+sys_preadv_17100 sys_preadv 3 17100 NULL
-+write_mem_17114 write_mem 3 17114 NULL
-+pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
-+mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
-+wrmaltWithLock_17139 wrmaltWithLock 0 17139 NULL
-+jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL
-+befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
-+pm860x_page_bulk_read_17174 pm860x_page_bulk_read 3 17174 NULL
-+access_remote_vm_17189 access_remote_vm 0 17189 NULL nohasharray
-+iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189
-+iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 NULL nohasharray
-+driver_state_read_17194 driver_state_read 3 17194 &iscsit_find_cmd_from_itt_or_dump_17194
-+dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
-+ms_rw_17220 ms_rw 3-4 17220 NULL
-+__be16_to_cpup_17261 __be16_to_cpup 0 17261 NULL
-+alloc_ep_17269 alloc_ep 1 17269 NULL
-+pg_read_17276 pg_read 3 17276 NULL
-+raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
-+neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
-+minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
-+skb_pad_17302 skb_pad 2 17302 NULL
-+mb_cache_create_17307 mb_cache_create 2 17307 NULL
-+iwm_umac_set_config_var_17320 iwm_umac_set_config_var 4 17320 NULL
-+ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
-+lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
-+compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL
-+sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
-+snd_hammerfall_get_buffer_17441 snd_hammerfall_get_buffer 3 17441 NULL
-+nla_get_u32_17455 nla_get_u32 0 17455 NULL
-+__ref_totlen_17461 __ref_totlen 0 17461 NULL nohasharray
-+__send_request_17461 __send_request 0 17461 &__ref_totlen_17461
-+probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
-+TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
-+lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL
-+restore_i387_fxsave_17528 restore_i387_fxsave 2 17528 NULL
-+__cfg80211_roamed_17529 __cfg80211_roamed 5-7 17529 NULL
-+__copy_to_user_17551 __copy_to_user 3-0 17551 NULL
-+copy_from_user_17559 copy_from_user 3-0 17559 NULL
-+acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
-+neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
-+rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
-+brcmf_process_nvram_vars_17601 brcmf_process_nvram_vars 0 17601 NULL nohasharray
-+iwl_dump_nic_event_log_17601 iwl_dump_nic_event_log 0 17601 &brcmf_process_nvram_vars_17601
-+__inode_info_17603 __inode_info 0 17603 NULL
-+osst_execute_17607 osst_execute 7-6 17607 NULL
-+ocfs2_mark_extent_written_17615 ocfs2_mark_extent_written 6 17615 NULL
-+dma_map_page_17628 dma_map_page 0 17628 NULL
-+packet_setsockopt_17662 packet_setsockopt 5 17662 NULL nohasharray
-+ubi_io_read_data_17662 ubi_io_read_data 0 17662 &packet_setsockopt_17662
-+dsp_tone_hw_message_17678 dsp_tone_hw_message 3 17678 NULL
-+pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
-+venus_rename_17707 venus_rename 4-5 17707 NULL
-+intel_wait_ring_buffer_17727 intel_wait_ring_buffer 0 17727 NULL
-+exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
-+sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
-+gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
-+cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
-+count_leafs_17842 count_leafs 0 17842 NULL
-+tcp_left_out_17860 tcp_left_out 0 17860 NULL
-+sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
-+alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
-+ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
-+orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL
-+init_per_cpu_17880 init_per_cpu 1 17880 NULL
-+ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
-+compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
-+ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
-+ocfs2_clusters_to_blocks_17896 ocfs2_clusters_to_blocks 0-2 17896 NULL
-+dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
-+xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
-+srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
-+scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
-+beacon_interval_write_17952 beacon_interval_write 3 17952 NULL
-+calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
-+smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
-+pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
-+alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
-+fill_read_18019 fill_read 0 18019 NULL
-+cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
-+ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
-+fpregs_get_18066 fpregs_get 4 18066 NULL
-+packet_came_18072 packet_came 3 18072 NULL
-+kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
-+netlink_kernel_create_18110 netlink_kernel_create 3 18110 NULL
-+dfs_file_read_18116 dfs_file_read 3 18116 NULL
-+svc_getnl_18120 svc_getnl 0 18120 NULL
-+selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
-+_has_tag_18169 _has_tag 2 18169 NULL
-+pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
-+cfpkt_create_18197 cfpkt_create 1 18197 NULL
-+orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
-+gsm_control_message_18209 gsm_control_message 4 18209 NULL
-+do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
-+koneplus_send_18226 koneplus_send 4 18226 NULL
-+gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
-+rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
-+__sysfs_add_one_18258 __sysfs_add_one 0 18258 NULL
-+qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
-+gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
-+alloc_ring_18278 alloc_ring 2-4 18278 NULL
-+mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
-+um_idi_write_18293 um_idi_write 3 18293 NULL
-+ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
-+vga_r_18310 vga_r 0 18310 NULL
-+alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL
-+ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
-+bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
-+lcd_proc_write_18351 lcd_proc_write 3 18351 NULL
-+pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
-+xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
-+ep_io_18367 ep_io 0 18367 NULL
-+crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
-+snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
-+fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
-+regset_tls_set_18459 regset_tls_set 4 18459 NULL
-+udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL nohasharray
-+write_file_tx_chainmask_18487 write_file_tx_chainmask 3 18487 &udpv6_setsockopt_18487
-+__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
-+snd_vx_inb_18514 snd_vx_inb 0 18514 NULL
-+snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
-+seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
-+sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
-+vb2_streamon_18562 vb2_streamon 0 18562 NULL
-+debug_output_18575 debug_output 3 18575 NULL
-+__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
-+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
-+slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
-+iowarrior_write_18604 iowarrior_write 3 18604 NULL
-+from_buffer_18625 from_buffer 3 18625 NULL
-+f1x_map_sysaddr_to_csrow_18628 f1x_map_sysaddr_to_csrow 2 18628 NULL
-+cfg80211_send_rx_assoc_18638 cfg80211_send_rx_assoc 3 18638 NULL
-+snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
-+xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL nohasharray
-+edge_tty_recv_18667 edge_tty_recv 4 18667 &xfs_iext_insert_18667
-+iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
-+ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
-+ocfs2_trim_extent_18711 ocfs2_trim_extent 3-4 18711 NULL
-+blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
-+snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
-+o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
-+__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
-+wep_packets_read_18751 wep_packets_read 3 18751 NULL
-+read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
-+ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
-+alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
-+sd_write_data_18803 sd_write_data 9 18803 NULL
-+ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
-+iio_allocate_device_18821 iio_allocate_device 1 18821 NULL
-+sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
-+mtf_test_write_18844 mtf_test_write 3 18844 NULL
-+drm_ht_create_18853 drm_ht_create 2 18853 NULL
-+sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
-+ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
-+xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
-+ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
-+snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
-+sctp_tsnmap_num_gabs_18952 sctp_tsnmap_num_gabs 0 18952 NULL
-+fdb_nlmsg_size_18957 fdb_nlmsg_size 0 18957 NULL
-+__nla_reserve_18974 __nla_reserve 3 18974 NULL
-+alc_auto_create_extra_outs_18975 alc_auto_create_extra_outs 2 18975 NULL
-+layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
-+huge_page_size_19008 huge_page_size 0 19008 NULL
-+revalidate_19043 revalidate 2 19043 NULL
-+drm_fb_helper_init_19044 drm_fb_helper_init 3-4 19044 NULL
-+afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL
-+create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
-+ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
-+copy_and_check_19089 copy_and_check 3 19089 NULL
-+sys_process_vm_readv_19090 sys_process_vm_readv 3-5 19090 NULL
-+sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
-+cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
-+skb_gro_offset_19123 skb_gro_offset 0 19123 NULL
-+snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
-+alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
-+iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
-+vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
-+__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL
-+dev_counters_read_19216 dev_counters_read 3 19216 NULL
-+snd_mask_max_19224 snd_mask_max 0 19224 NULL
-+qc_capture_19298 qc_capture 3 19298 NULL
-+ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 3-4 19303 NULL
-+event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
-+debug_read_19322 debug_read 3 19322 NULL
-+cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL nohasharray
-+lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 &cfg80211_inform_bss_19332
-+firmware_data_write_19360 firmware_data_write 6-5 19360 NULL
-+read_zero_19366 read_zero 3 19366 NULL
-+interpret_user_input_19393 interpret_user_input 2 19393 NULL
-+get_n_events_by_type_19401 get_n_events_by_type 0 19401 NULL
-+pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
-+dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
-+xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
-+gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
-+sky2_read16_19475 sky2_read16 0 19475 NULL
-+refill_pool_19477 refill_pool 2 19477 NULL
-+efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
-+__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL
-+kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
-+v4l2_event_subscribe_19510 v4l2_event_subscribe 3 19510 NULL
-+skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
-+atm_alloc_charge_19517 atm_alloc_charge 2 19517 NULL nohasharray
-+dev_alloc_skb_19517 dev_alloc_skb 1 19517 &atm_alloc_charge_19517
-+apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL
-+ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
-+ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
-+nfsd_read_19568 nfsd_read 5 19568 NULL
-+cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
-+bm_status_read_19583 bm_status_read 3 19583 NULL
-+load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
-+buffRdbkVerify_19644 buffRdbkVerify 0 19644 NULL
-+LoadBitmap_19658 LoadBitmap 2 19658 NULL
-+rbd_snap_add_19678 rbd_snap_add 4 19678 NULL
-+delay_status_19685 delay_status 4 19685 NULL
-+read_reg_19723 read_reg 0 19723 NULL
-+memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
-+snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
-+p9_client_read_19750 p9_client_read 5-0 19750 NULL
-+pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
-+jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
-+__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
-+saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
-+madgemc_sifreadw_19811 madgemc_sifreadw 0 19811 NULL
-+irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
-+vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
-+security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
-+cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
-+__nla_put_19857 __nla_put 3 19857 NULL
-+aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
-+ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
-+cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
-+iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
-+ax25_send_frame_19964 ax25_send_frame 2 19964 NULL
-+attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
-+ip_send_reply_19987 ip_send_reply 5 19987 NULL
-+diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
-+alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
-+rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
-+fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
-+aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL
-+qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
-+root_nfs_copy_20111 root_nfs_copy 3 20111 NULL
-+hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
-+kmem_cache_create_20124 kmem_cache_create 3 20124 NULL
-+tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
-+read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
-+wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
-+create_trace_probe_20175 create_trace_probe 1 20175 NULL
-+crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
-+ext4_llseek_20183 ext4_llseek 2 20183 NULL
-+pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
-+rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
-+tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
-+_rtl92s_get_h2c_cmdlen_20312 _rtl92s_get_h2c_cmdlen 0 20312 NULL
-+vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL
-+snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
-+ip_idents_reserve_20328 ip_idents_reserve 2 20328 NULL
-+gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
-+snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL
-+__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
-+interface_rx_20404 interface_rx 4 20404 NULL
-+find_skb_20431 find_skb 2 20431 NULL
-+fmc_send_cmd_20435 fmc_send_cmd 5 20435 NULL
-+tcp_fragment_20436 tcp_fragment 3 20436 NULL
-+nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
-+ip_vs_icmp_xmit_v6_20464 ip_vs_icmp_xmit_v6 4 20464 NULL
-+compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
-+read_buf_20469 read_buf 2 20469 NULL
-+hidraw_report_event_20503 hidraw_report_event 3 20503 NULL
-+xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
-+drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
-+amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
-+venus_create_20555 venus_create 4 20555 NULL
-+crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
-+i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
-+lirc_write_20604 lirc_write 3 20604 NULL
-+qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL
-+regcache_lzo_block_count_20628 regcache_lzo_block_count 0 20628 NULL
-+snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
-+kfifo_copy_to_user_20646 kfifo_copy_to_user 3-4 20646 NULL
-+cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
-+ceph_osdc_new_request_20654 ceph_osdc_new_request 15-4 20654 NULL
-+snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
-+dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL
-+cpumask_size_20683 cpumask_size 0 20683 NULL
-+read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
-+__maestro_read_20700 __maestro_read 0 20700 NULL
-+cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
-+pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
-+ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
-+security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
-+vring_add_indirect_20737 vring_add_indirect 3-4 20737 NULL
-+fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL
-+vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
-+ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
-+ubi_io_read_20767 ubi_io_read 0 20767 NULL
-+fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
-+iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
-+sys_sendto_20809 sys_sendto 6 20809 NULL
-+ext4_convert_unwritten_extents_endio_20812 ext4_convert_unwritten_extents_endio 0 20812 NULL
-+strndup_user_20819 strndup_user 2 20819 NULL
-+iwl_legacy_dbgfs_qos_read_20825 iwl_legacy_dbgfs_qos_read 3 20825 NULL
-+wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
-+uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
-+p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL
-+snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
-+ocfs2_bmap_20874 ocfs2_bmap 2 20874 NULL
-+iwl3945_ucode_tx_stats_read_20879 iwl3945_ucode_tx_stats_read 3 20879 NULL
-+rb_simple_write_20890 rb_simple_write 3 20890 NULL
-+sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
-+key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
-+compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
-+ixj_write_20912 ixj_write 3 20912 NULL
-+lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL
-+htable_bits_20933 htable_bits 0 20933 NULL
-+check_eofblocks_fl_20942 check_eofblocks_fl 0 20942 NULL
-+altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
-+snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
-+brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
-+alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
-+qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL
-+ocfs2_free_clusters_21001 ocfs2_free_clusters 4 21001 NULL
-+btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
-+snd_pcm_lib_preallocate_pages_21031 snd_pcm_lib_preallocate_pages 4 21031 NULL
-+lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
-+proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
-+rose_create_facilities_21067 rose_create_facilities 0 21067 NULL
-+event_calibration_read_21083 event_calibration_read 3 21083 NULL
-+__cfg80211_send_disassoc_21096 __cfg80211_send_disassoc 3 21096 NULL
-+ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
-+i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
-+new_skb_21148 new_skb 1 21148 NULL
-+cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
-+ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
-+_ocfs2_free_clusters_21220 _ocfs2_free_clusters 4 21220 NULL
-+get_numpages_21227 get_numpages 0-1-2 21227 NULL
-+input_ff_create_21240 input_ff_create 2 21240 NULL
-+cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
-+sock_alloc_send_pskb_21246 sock_alloc_send_pskb 2 21246 NULL
-+ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
-+store_bluetooth_21320 store_bluetooth 4 21320 NULL
-+get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
-+ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
-+iwl_legacy_tx_queue_init_21332 iwl_legacy_tx_queue_init 3 21332 NULL
-+alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
-+split_leaf_21378 split_leaf 0 21378 NULL
-+video_ioctl2_21380 video_ioctl2 2 21380 NULL
-+diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
-+snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
-+snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
-+tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
-+tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
-+concat_writev_21451 concat_writev 3 21451 NULL
-+ReadISAR_21453 ReadISAR 0 21453 NULL
-+read_file_xmit_21487 read_file_xmit 3 21487 NULL
-+mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
-+btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
-+cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
-+rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
-+snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
-+ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
-+ndisc_addr_option_pad_21630 ndisc_addr_option_pad 0 21630 NULL
-+__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
-+carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
-+atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
-+ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
-+rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
-+evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
-+drm_sman_init_21710 drm_sman_init 2-4-3 21710 NULL
-+mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
-+l2down_create_21755 l2down_create 4 21755 NULL
-+usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL
-+gen_pool_add_21776 gen_pool_add 3 21776 NULL
-+xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
-+kmalloc_order_trace_21788 kmalloc_order_trace 1 21788 NULL
-+libipw_get_hdrlen_21792 libipw_get_hdrlen 0 21792 NULL
-+dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
-+lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
-+tcp_cookie_size_check_21873 tcp_cookie_size_check 0-1 21873 NULL nohasharray
-+sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 &tcp_cookie_size_check_21873
-+dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
-+qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
-+rbd_req_read_21952 rbd_req_read 4-5 21952 NULL
-+rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL
-+ti_recv_22027 ti_recv 4 22027 NULL
-+zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
-+ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
-+iwl_legacy_dbgfs_clear_ucode_statistics_write_22072 iwl_legacy_dbgfs_clear_ucode_statistics_write 3 22072 NULL
-+btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL
-+mem_rw_22085 mem_rw 3 22085 NULL
-+rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL
-+snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL
-+__kfifo_alloc_22173 __kfifo_alloc 2-3 22173 NULL
-+snd_soc_lzo_block_count_22210 snd_soc_lzo_block_count 0 22210 NULL
-+bio_chain_clone_22227 bio_chain_clone 4 22227 NULL nohasharray
-+rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 &bio_chain_clone_22227
-+mem_write_22232 mem_write 3 22232 NULL
-+p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
-+compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
-+__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
-+__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL
-+mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
-+udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
-+atomic_read_22342 atomic_read 0 22342 NULL
-+snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
-+evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
-+btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
-+alloc_private_22399 alloc_private 2 22399 NULL
-+zoran_write_22404 zoran_write 3 22404 NULL
-+queue_reply_22416 queue_reply 3 22416 NULL
-+__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
-+queue_max_segments_22441 queue_max_segments 0 22441 NULL
-+handle_received_packet_22457 handle_received_packet 3 22457 NULL
-+rt6_nlmsg_size_22473 rt6_nlmsg_size 0 22473 NULL
-+ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL
-+cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
-+mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
-+pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
-+ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
-+agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
-+dbFindCtl_22587 dbFindCtl 0 22587 NULL
-+snapshot_read_22601 snapshot_read 3 22601 NULL
-+sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
-+ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
-+pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
-+sysfs_attr_ns_22645 sysfs_attr_ns 0 22645 NULL
-+l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
-+ocfs2_get_block_22687 ocfs2_get_block 2 22687 NULL
-+sys_ppoll_22688 sys_ppoll 2 22688 NULL
-+alloc_libipw_22708 alloc_libipw 1 22708 NULL
-+brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
-+aa_features_read_22730 aa_features_read 3 22730 NULL
-+cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL
-+ax25_output_22736 ax25_output 2 22736 NULL
-+ceph_decode_32_22738 ceph_decode_32 0 22738 NULL
-+print_frame_22769 print_frame 0 22769 NULL
-+ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
-+__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL
-+can_nocow_odirect_22854 can_nocow_odirect 4-3 22854 NULL nohasharray
-+read_file_rcstat_22854 read_file_rcstat 3 22854 &can_nocow_odirect_22854
-+create_attr_set_22861 create_attr_set 1 22861 NULL
-+usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
-+mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
-+virtqueue_add_buf_22924 virtqueue_add_buf 3-4 22924 NULL
-+xstateregs_set_22932 xstateregs_set 4 22932 NULL
-+pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
-+alloc_sglist_22960 alloc_sglist 1-2-3 22960 NULL
-+caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
-+vme_get_size_22964 vme_get_size 0 22964 NULL
-+usb_get_langid_22983 usb_get_langid 0 22983 NULL
-+remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
-+viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
-+st_status_23032 st_status 5 23032 NULL
-+reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
-+unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
-+vivi_read_23073 vivi_read 3 23073 NULL
-+kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
-+raw_sendmsg_23078 raw_sendmsg 4 23078 NULL
-+isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
-+rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
-+ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
-+pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
-+dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
-+mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
-+nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
-+__clear_user_23118 __clear_user 0-2 23118 NULL
-+drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL nohasharray
-+iwl_legacy_dbgfs_interrupt_write_23122 iwl_legacy_dbgfs_interrupt_write 3 23122 &drm_mode_create_tv_properties_23122
-+ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL
-+cfg80211_rx_mgmt_23138 cfg80211_rx_mgmt 4 23138 NULL nohasharray
-+em28xx_write_regs_req_23138 em28xx_write_regs_req 0 23138 &cfg80211_rx_mgmt_23138
-+read_file_ani_23161 read_file_ani 3 23161 NULL
-+usblp_write_23178 usblp_write 3 23178 NULL
-+gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
-+ieee80211_get_mesh_hdrlen_23183 ieee80211_get_mesh_hdrlen 0 23183 NULL
-+tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL
-+xlog_get_bp_23229 xlog_get_bp 2 23229 NULL nohasharray
-+__read_status_pci_23229 __read_status_pci 0 23229 &xlog_get_bp_23229
-+__kmalloc_23231 __kmalloc 1 23231 NULL
-+rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
-+sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL nohasharray
-+ad799x_single_channel_from_ring_23265 ad799x_single_channel_from_ring 2 23265 &sctp_recvmsg_23265
-+uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
-+diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
-+i2cdev_write_23310 i2cdev_write 3 23310 NULL
-+nl_pid_hash_zalloc_23314 nl_pid_hash_zalloc 1 23314 NULL
-+page_readlink_23346 page_readlink 3 23346 NULL
-+get_dst_timing_23358 get_dst_timing 0 23358 NULL
-+ip_nat_sdp_media_23386 ip_nat_sdp_media 8 23386 NULL
-+iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
-+vga_mm_r_23419 vga_mm_r 0 23419 NULL
-+ulog_alloc_skb_23427 ulog_alloc_skb 1 23427 NULL
-+__cxio_init_resource_fifo_23447 __cxio_init_resource_fifo 3 23447 NULL nohasharray
-+ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 &__cxio_init_resource_fifo_23447
-+hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
-+dn_nsp_send_disc_23469 dn_nsp_send_disc 2 23469 NULL
-+__ata_change_queue_depth_23484 __ata_change_queue_depth 3 23484 NULL
-+linear_conf_23485 linear_conf 2 23485 NULL
-+si4713_send_command_23493 si4713_send_command 6 23493 NULL
-+event_filter_read_23494 event_filter_read 3 23494 NULL
-+ext4_remove_blocks_23497 ext4_remove_blocks 0 23497 NULL
-+write_led_23517 write_led 2 23517 NULL
-+__fill_vb2_buffer_23521 __fill_vb2_buffer 0 23521 NULL
-+ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
-+tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
-+tcp_match_skb_to_sack_23568 tcp_match_skb_to_sack 4-3 23568 NULL
-+venus_symlink_23570 venus_symlink 6-4 23570 NULL
-+iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
-+l2cap_parse_conf_req_23575 l2cap_parse_conf_req 0 23575 NULL
-+xfpregs_get_23586 xfpregs_get 4 23586 NULL
-+cifs_spnego_key_instantiate_23588 cifs_spnego_key_instantiate 3 23588 NULL
-+snd_interval_min_23590 snd_interval_min 0 23590 NULL
-+cfpkt_create_pfx_23594 cfpkt_create_pfx 1-2 23594 NULL
-+_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
-+islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
-+ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL
-+__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
-+sInW_23663 sInW 0 23663 NULL
-+nftl_partscan_23688 nftl_partscan 0 23688 NULL
-+cx18_read_23699 cx18_read 3 23699 NULL
-+sock_alloc_send_skb_23720 sock_alloc_send_skb 2 23720 NULL
-+pack_sg_list_p_23739 pack_sg_list_p 0-2 23739 NULL
-+__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
-+tt_save_orig_buffer_23779 tt_save_orig_buffer 4 23779 NULL
-+security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
-+rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
-+__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
-+xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
-+iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
-+p54_init_common_23850 p54_init_common 1 23850 NULL
-+ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
-+ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
-+ieee80211_if_read_channel_type_23884 ieee80211_if_read_channel_type 3 23884 NULL
-+iwch_reject_cr_23901 iwch_reject_cr 3 23901 NULL
-+device_create_bin_file_23914 device_create_bin_file 0 23914 NULL
-+ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL
-+i915_gem_object_bind_to_gtt_23921 i915_gem_object_bind_to_gtt 0 23921 NULL
-+kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
-+__alloc_skb_23940 __alloc_skb 1 23940 NULL
-+cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
-+ixj_enhanced_write_23973 ixj_enhanced_write 3 23973 NULL
-+sddr55_write_data_23983 sddr55_write_data 4 23983 NULL
-+zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
-+brcmf_sdcard_recv_buf_24006 brcmf_sdcard_recv_buf 6 24006 NULL
-+cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
-+ocfs2_mark_extent_refcounted_24035 ocfs2_mark_extent_refcounted 6 24035 NULL
-+afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
-+blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
-+request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
-+iwl_legacy_dbgfs_stations_read_24121 iwl_legacy_dbgfs_stations_read 3 24121 NULL
-+mpu401_read_24126 mpu401_read 0-3 24126 NULL
-+_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL
-+irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
-+UpdateReg_24148 UpdateReg 0 24148 NULL
-+adu_read_24177 adu_read 3 24177 NULL
-+safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
-+shrink_tnc_24190 shrink_tnc 0 24190 NULL
-+get_order_24203 get_order 0 24203 NULL
-+ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
-+tcpprobe_sprint_24222 tcpprobe_sprint 2-0 24222 NULL
-+pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL
-+pci_num_vf_24235 pci_num_vf 0 24235 NULL
-+sel_read_bool_24236 sel_read_bool 3 24236 NULL
-+esp6_get_mtu_24264 esp6_get_mtu 0-2 24264 NULL
-+calculate_sizes_24273 calculate_sizes 2 24273 NULL
-+msg_size_24288 msg_size 0 24288 NULL
-+gserial_connect_24302 gserial_connect 0 24302 NULL
-+btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
-+ocfs2_direct_IO_get_blocks_24333 ocfs2_direct_IO_get_blocks 2 24333 NULL
-+kzalloc_node_24352 kzalloc_node 1 24352 NULL
-+qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
-+cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
-+btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
-+igetword_24373 igetword 0 24373 NULL
-+max_io_len_24384 max_io_len 0-1 24384 NULL
-+getxattr_24398 getxattr 4 24398 NULL nohasharray
-+pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 &getxattr_24398
-+blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
-+b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
-+xenbus_file_read_24427 xenbus_file_read 3 24427 NULL
-+ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
-+evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
-+lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL
-+ocfs2_write_cluster_by_desc_24466 ocfs2_write_cluster_by_desc 6-5 24466 NULL nohasharray
-+skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 &ocfs2_write_cluster_by_desc_24466
-+pd_video_read_24510 pd_video_read 3 24510 NULL
-+request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
-+named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
-+rtnl_port_size_24537 rtnl_port_size 0 24537 NULL
-+write_cache_pages_24562 write_cache_pages 0 24562 NULL
-+printer_set_config_24568 printer_set_config 0 24568 NULL
-+netlbl_domhsh_init_24576 netlbl_domhsh_init 1 24576 NULL
-+ath6kl_wmi_startscan_cmd_24580 ath6kl_wmi_startscan_cmd 7 24580 NULL
-+udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
-+ip6addrlbl_msgsize_24595 ip6addrlbl_msgsize 0 24595 NULL
-+count_preds_24600 count_preds 0 24600 NULL
-+alloc_wr_24635 alloc_wr 1-2 24635 NULL
-+context_alloc_24645 context_alloc 3 24645 NULL
-+blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
-+datafab_write_data_24696 datafab_write_data 4 24696 NULL
-+simple_attr_read_24738 simple_attr_read 3 24738 NULL
-+qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
-+ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
-+get_dma_residue_24749 get_dma_residue 0 24749 NULL
-+kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
-+nfsd4_sanitize_slot_size_24756 nfsd4_sanitize_slot_size 0-1 24756 NULL
-+mI_alloc_skb_24770 mI_alloc_skb 1 24770 NULL
-+i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
-+ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
-+skb_make_writable_24783 skb_make_writable 2 24783 NULL
-+datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
-+cache_read_24790 cache_read 3 24790 NULL
-+user_regset_copyout_24796 user_regset_copyout 7 24796 NULL
-+unpack_str_24798 unpack_str 0 24798 NULL
-+kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL
-+ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL
-+net2272_read_24825 net2272_read 0 24825 NULL
-+snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
-+snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
-+pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
-+put_data_to_circ_buf_24869 put_data_to_circ_buf 3 24869 &pnp_alloc_24869
-+bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 3-2 24873 NULL
-+queues_read_24877 queues_read 3 24877 NULL
-+iwm_rx_handle_24899 iwm_rx_handle 3 24899 NULL
-+codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
-+ocfs2_fiemap_24949 ocfs2_fiemap 3-4 24949 NULL
-+packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
-+sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
-+llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
-+key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
-+iwl3945_ucode_general_stats_read_25009 iwl3945_ucode_general_stats_read 3 25009 NULL
-+ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
-+gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
-+cxio_hal_init_rhdl_resource_25104 cxio_hal_init_rhdl_resource 1 25104 NULL
-+ubifs_dir_llseek_25106 ubifs_dir_llseek 2 25106 NULL nohasharray
-+snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 &ubifs_dir_llseek_25106
-+oom_adjust_read_25127 oom_adjust_read 3 25127 NULL
-+sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
-+sethdraddr_25167 sethdraddr 0 25167 NULL nohasharray
-+ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 &sethdraddr_25167
-+sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
-+ks8851_rdreg32_25187 ks8851_rdreg32 0 25187 NULL
-+mon_stat_read_25238 mon_stat_read 3 25238 NULL
-+tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
-+compat_rw_copy_check_uvector_25242 compat_rw_copy_check_uvector 0-3 25242 NULL
-+snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
-+crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
-+vfs_writev_25278 vfs_writev 3 25278 NULL
-+l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
-+ceph_calc_object_layout_25305 ceph_calc_object_layout 0 25305 NULL
-+ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL
-+rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
-+i915_gem_execbuffer_relocate_slow_25355 i915_gem_execbuffer_relocate_slow 7-0 25355 NULL
-+unix_mkname_25368 unix_mkname 0-2 25368 NULL
-+sel_read_mls_25369 sel_read_mls 3 25369 NULL
-+rh_queue_status_25378 rh_queue_status 0 25378 NULL
-+ThermometerRead_25393 ThermometerRead 0 25393 NULL
-+et61x251_read_25420 et61x251_read 3 25420 NULL
-+dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
-+generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
-+ipath_decode_err_25468 ipath_decode_err 3 25468 NULL
-+crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
-+ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL
-+snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
-+ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
-+ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
-+c4iw_init_resource_fifo_random_25547 c4iw_init_resource_fifo_random 3 25547 NULL
-+wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
-+taskstats_packet_size_25553 taskstats_packet_size 0 25553 NULL
-+ht_print_chan_25556 ht_print_chan 0 25556 NULL
-+skb_tailroom_25567 skb_tailroom 0 25567 NULL
-+realloc_packet_buffer_25569 realloc_packet_buffer 2 25569 NULL
-+ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
-+__devres_alloc_25598 __devres_alloc 2 25598 NULL
-+ddp_ppod_write_idata_25610 ddp_ppod_write_idata 5 25610 NULL
-+copy_user_generic_25611 copy_user_generic 0 25611 NULL
-+proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
-+befs_utf2nls_25628 befs_utf2nls 3 25628 NULL nohasharray
-+__get_user_pages_25628 __get_user_pages 0 25628 &befs_utf2nls_25628
-+aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
-+lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL
-+sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
-+ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
-+rx_queue_entry_next_25715 rx_queue_entry_next 0 25715 NULL
-+sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
-+__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
-+mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
-+cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
-+event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
-+sg_read_25799 sg_read 3 25799 NULL
-+sys32_rt_sigpending_25814 sys32_rt_sigpending 2 25814 NULL
-+system_enable_read_25815 system_enable_read 3 25815 NULL
-+realloc_buffer_25816 realloc_buffer 2 25816 NULL
-+pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
-+parport_read_25855 parport_read 0 25855 NULL
-+xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
-+ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
-+run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL
-+sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
-+lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
-+do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
-+rcname_read_25919 rcname_read 3 25919 NULL
-+_get_word_25929 _get_word 0 25929 NULL
-+snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
-+key_flags_read_25931 key_flags_read 3 25931 NULL
-+copy_play_buf_25932 copy_play_buf 3 25932 NULL
-+udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
-+xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
-+mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
-+selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
-+keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
-+__strnlen_user_26117 __strnlen_user 0-2 26117 NULL nohasharray
-+intel_wrap_ring_buffer_26117 intel_wrap_ring_buffer 0 26117 &__strnlen_user_26117
-+user_instantiate_26131 user_instantiate 3 26131 NULL
-+skb_cow_26138 skb_cow 2 26138 NULL
-+copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
-+gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL
-+disk_devt_26180 disk_devt 0 26180 NULL
-+get_registers_26187 get_registers 3 26187 NULL
-+ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
-+xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
-+mce_write_26201 mce_write 3 26201 NULL
-+mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL nohasharray
-+store_sys_hwmon_26225 store_sys_hwmon 3 26225 &mwifiex_regrdwr_write_26225
-+_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
-+cxio_num_stags_26233 cxio_num_stags 0 26233 NULL nohasharray
-+rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 &cxio_num_stags_26233
-+bio_split_26235 bio_split 2 26235 NULL
-+crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
-+apei_resources_request_26279 apei_resources_request 0 26279 NULL
-+snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL
-+tled_proc_write_26315 tled_proc_write 3 26315 NULL
-+pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
-+tcp_sacktag_walk_26339 tcp_sacktag_walk 5-6 26339 NULL
-+snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL
-+ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 6-3-5 26357 NULL
-+dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
-+invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
-+ntty_write_26404 ntty_write 3 26404 NULL
-+tcp_shift_skb_data_26405 tcp_shift_skb_data 5 26405 NULL
-+iwl_legacy_dbgfs_sram_read_26419 iwl_legacy_dbgfs_sram_read 3 26419 NULL
-+__vb2_get_done_vb_26426 __vb2_get_done_vb 0 26426 NULL
-+pagemap_read_26441 pagemap_read 3 26441 NULL
-+tower_read_26461 tower_read 3 26461 NULL
-+ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
-+ulong_write_file_26485 ulong_write_file 3 26485 NULL
-+dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
-+l2cap_build_conf_req_26513 l2cap_build_conf_req 0 26513 NULL
-+__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
-+rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL
-+pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
-+drm_ht_find_item_26637 drm_ht_find_item 0 26637 NULL
-+irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
-+inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
-+usb_reset_device_26661 usb_reset_device 0 26661 NULL
-+cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
-+__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
-+rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
-+bos_desc_26752 bos_desc 0 26752 NULL
-+srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
-+snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
-+dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL
-+qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
-+cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
-+smk_write_load_26829 smk_write_load 3 26829 NULL
-+scnprint_id_26842 scnprint_id 3-0 26842 NULL
-+ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
-+svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
-+ms_read_bytes_26894 ms_read_bytes 6 26894 NULL
-+ctnetlink_counters_size_26898 ctnetlink_counters_size 0 26898 NULL
-+slhc_uncompress_26905 slhc_uncompress 0-3 26905 NULL
-+x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
-+scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
-+sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 NULL nohasharray
-+pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 &sctp_setsockopt_adaptation_layer_26935
-+hecubafb_write_26942 hecubafb_write 3 26942 NULL
-+extract_entropy_user_26952 extract_entropy_user 3 26952 NULL
-+__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
-+snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
-+paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
-+alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
-+find_first_bit_27088 find_first_bit 0 27088 NULL
-+btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
-+__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
-+__ext4_handle_dirty_metadata_27137 __ext4_handle_dirty_metadata 0 27137 NULL
-+drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
-+pms_capture_27142 pms_capture 4 27142 NULL
-+btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
-+i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
-+ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
-+write_kmem_27225 write_kmem 3 27225 NULL
-+dbAllocAG_27228 dbAllocAG 0 27228 NULL
-+rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
-+cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
-+nlmsg_new_27263 nlmsg_new 1 27263 NULL
-+usb_submit_urb_27278 usb_submit_urb 0 27278 NULL
-+hpi_read_reg_27302 hpi_read_reg 0 27302 NULL
-+copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
-+ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 NULL
-+ocfs2_blocks_to_clusters_27327 ocfs2_blocks_to_clusters 0-2 27327 NULL
-+snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
-+afs_cell_create_27346 afs_cell_create 2 27346 NULL
-+iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL
-+pcbit_stat_27364 pcbit_stat 2 27364 NULL
-+if_nlmsg_size_27404 if_nlmsg_size 0 27404 NULL
-+seq_read_27411 seq_read 3 27411 NULL
-+ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
-+cypress_write_27423 cypress_write 4 27423 NULL
-+pack_sg_list_27425 pack_sg_list 0-2 27425 NULL
-+sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
-+hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
-+ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
-+garmin_read_process_27509 garmin_read_process 3 27509 NULL
-+xfs_buf_read_uncached_27519 xfs_buf_read_uncached 4 27519 NULL
-+ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
-+intel_gtt_map_memory_27539 intel_gtt_map_memory 0 27539 NULL
-+snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
-+libipw_alloc_txb_27579 libipw_alloc_txb 1-3-2 27579 NULL
-+tipc_cfg_reply_alloc_27606 tipc_cfg_reply_alloc 1 27606 NULL
-+iwl4965_rs_sta_dbgfs_rate_scale_data_read_27619 iwl4965_rs_sta_dbgfs_rate_scale_data_read 3 27619 NULL
-+ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 NULL nohasharray
-+nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &ocfs2_xattr_ibody_get_27642 nohasharray
-+read_flush_procfs_27642 read_flush_procfs 3 27642 &nl80211_send_connect_result_27642
-+add_new_gdb_27643 add_new_gdb 3 27643 NULL
-+ieee80211_build_probe_req_27660 ieee80211_build_probe_req 7-5 27660 NULL
-+cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
-+qword_get_27670 qword_get 0 27670 NULL
-+ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
-+l2cap_sar_segment_sdu_27701 l2cap_sar_segment_sdu 3 27701 NULL
-+evm_write_key_27715 evm_write_key 3 27715 NULL
-+ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
-+pstore_write_27724 pstore_write 3 27724 NULL nohasharray
-+iwl_dbgfs_traffic_log_write_27724 iwl_dbgfs_traffic_log_write 3 27724 &pstore_write_27724 nohasharray
-+reg_w_buf_27724 reg_w_buf 3 27724 &iwl_dbgfs_traffic_log_write_27724
-+xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
-+kcalloc_27770 kcalloc 1-2 27770 NULL
-+ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
-+hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL
-+sys_listxattr_27833 sys_listxattr 3 27833 NULL nohasharray
-+init_header_complete_27833 init_header_complete 0 27833 &sys_listxattr_27833
-+read_profile_27859 read_profile 3 27859 NULL
-+sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
-+mangle_packet_27864 mangle_packet 6-8 27864 NULL
-+paranoid_check_ec_hdr_27872 paranoid_check_ec_hdr 0 27872 NULL
-+unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
-+check_mapped_name_27943 check_mapped_name 3 27943 NULL
-+sctp_make_abort_violation_27959 sctp_make_abort_violation 4 27959 NULL
-+tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
-+device_register_27972 device_register 0 27972 NULL nohasharray
-+mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972
-+snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
-+get_packet_pg_28023 get_packet_pg 4 28023 NULL
-+raid_status_28025 raid_status 4 28025 NULL
-+sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
-+cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL
-+init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL
-+lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
-+GetRecvByte_28082 GetRecvByte 0 28082 NULL
-+mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
-+vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
-+video_read_28148 video_read 3 28148 NULL
-+snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
-+stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
-+vread_28173 vread 0-3 28173 NULL
-+c4iw_reject_cr_28174 c4iw_reject_cr 3 28174 NULL
-+macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
-+line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL
-+amd_nb_num_28228 amd_nb_num 0 28228 NULL
-+usemap_size_28281 usemap_size 0 28281 NULL
-+dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
-+kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL
-+__hidp_send_ctrl_message_28303 __hidp_send_ctrl_message 4 28303 NULL
-+nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL
-+snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
-+bm_entry_write_28338 bm_entry_write 3 28338 NULL
-+tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
-+snapshot_write_28351 snapshot_write 3 28351 NULL
-+orig_node_del_if_28371 orig_node_del_if 2 28371 NULL
-+sys_writev_28384 sys_writev 3 28384 NULL
-+dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
-+subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
-+snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
-+alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
-+ocfs2_backup_super_blkno_28484 ocfs2_backup_super_blkno 0-2 28484 NULL
-+max_response_pages_28492 max_response_pages 0 28492 NULL
-+i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
-+capinc_tty_write_28539 capinc_tty_write 3 28539 NULL
-+sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
-+mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL nohasharray
-+run_delalloc_range_28545 run_delalloc_range 3-4 28545 &mptctl_getiocinfo_28545
-+sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL
-+b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
-+cfg80211_send_rx_auth_28580 cfg80211_send_rx_auth 3 28580 NULL
-+oxygen_read32_28582 oxygen_read32 0 28582 NULL
-+ocfs2_read_dir_block_28587 ocfs2_read_dir_block 2 28587 NULL
-+extract_entropy_28604 extract_entropy 5-3 28604 NULL
-+kfifo_unused_28612 kfifo_unused 0 28612 NULL
-+snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
-+_set_range_28627 _set_range 3 28627 NULL
-+setup_usemap_28636 setup_usemap 3-4 28636 NULL
-+qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL
-+read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL
-+btrfs_previous_item_28667 btrfs_previous_item 0 28667 NULL
-+blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
-+posix_acl_from_xattr_28675 posix_acl_from_xattr 2 28675 NULL
-+__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
-+nl80211_send_new_peer_candidate_28692 nl80211_send_new_peer_candidate 5 28692 NULL
-+balance_level_28707 balance_level 0 28707 NULL
-+spi_execute_28736 spi_execute 5 28736 NULL
-+snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
-+cxio_init_resource_fifo_28764 cxio_init_resource_fifo 3 28764 NULL
-+rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL
-+atomic_inc_return_unchecked_28778 atomic_inc_return_unchecked 0 28778 NULL
-+ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
-+dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
-+sel_write_member_28800 sel_write_member 3 28800 NULL
-+cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
-+iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
-+vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
-+paranoid_check_peb_vid_hdr_28866 paranoid_check_peb_vid_hdr 0 28866 NULL
-+ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
-+max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL
-+iwl3945_sta_dbgfs_stats_table_read_28882 iwl3945_sta_dbgfs_stats_table_read 3 28882 NULL
-+packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
-+ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
-+iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925 NULL
-+push_rx_28939 push_rx 3 28939 NULL
-+alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
-+hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
-+bin_uuid_28999 bin_uuid 3 28999 NULL
-+rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL nohasharray
-+ProcessGetHostMibs_29049 ProcessGetHostMibs 0 29049 &rxrpc_sendmsg_29049
-+tso_fragment_29050 tso_fragment 3 29050 NULL
-+split_bvec_29058 split_bvec 5 29058 NULL
-+iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
-+lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
-+ieee80211_probereq_get_29069 ieee80211_probereq_get 4-6 29069 NULL
-+mark_extents_written_29082 mark_extents_written 2-3 29082 NULL
-+iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL
-+isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
-+rbd_req_sync_op_29115 rbd_req_sync_op 10-9 29115 NULL
-+snprintf_29125 snprintf 0 29125 NULL
-+iov_shorten_29130 iov_shorten 0 29130 NULL
-+proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
-+reshape_ring_29147 reshape_ring 2 29147 NULL
-+wusb_prf_256_29203 wusb_prf_256 7 29203 NULL
-+do_shrinker_shrink_29208 do_shrinker_shrink 0 29208 NULL
-+security_socket_recvmsg_29224 security_socket_recvmsg 0 29224 NULL nohasharray
-+iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 &security_socket_recvmsg_29224
-+security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
-+prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
-+ext4_fiemap_29296 ext4_fiemap 4 29296 NULL
-+sn9c102_read_29305 sn9c102_read 3 29305 NULL
-+__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
-+l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
-+alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
-+mempool_create_29437 mempool_create 1 29437 NULL
-+crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
-+p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL
-+validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
-+do_register_entry_29478 do_register_entry 4 29478 NULL
-+simple_strtoul_29480 simple_strtoul 0 29480 NULL
-+btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
-+btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
-+write_file_regidx_29517 write_file_regidx 3 29517 NULL
-+atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
-+idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
-+leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
-+kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
-+lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
-+iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL
-+pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL
-+slots_per_page_29601 slots_per_page 0 29601 NULL
-+nla_get_u16_29624 nla_get_u16 0 29624 NULL
-+sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
-+br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
-+new_lockspace_29674 new_lockspace 2 29674 NULL
-+sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
-+jbd2_journal_restart_29692 jbd2_journal_restart 0 29692 NULL
-+sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL
-+probes_write_29711 probes_write 3 29711 NULL
-+emi62_writememory_29731 emi62_writememory 4 29731 NULL
-+read_cis_cache_29735 read_cis_cache 4 29735 NULL
-+cxio_hal_init_resource_29771 cxio_hal_init_resource 7-6-2 29771 NULL nohasharray
-+ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 &cxio_hal_init_resource_29771
-+cifs_ucs2_bytes_29790 cifs_ucs2_bytes 0 29790 NULL
-+dbAlloc_29794 dbAlloc 0 29794 NULL
-+tcp_sendpage_29829 tcp_sendpage 4-3 29829 NULL
-+__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
-+count_partial_29850 count_partial 0 29850 NULL
-+extract_icmp6_fields_29870 extract_icmp6_fields 2 29870 NULL
-+ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
-+scsi_end_request_29876 scsi_end_request 3 29876 NULL
-+crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
-+nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
-+pin_code_reply_29893 pin_code_reply 4 29893 NULL
-+write_file_queue_29922 write_file_queue 3 29922 NULL
-+ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
-+__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
-+ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
-+xfrm_count_auth_supported_29957 xfrm_count_auth_supported 0 29957 NULL
-+irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL
-+alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
-+scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
-+wrmalt_30043 wrmalt 0 30043 NULL
-+cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
-+snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
-+tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
-+skb_pagelen_30113 skb_pagelen 0 30113 NULL
-+spi_async_locked_30117 spi_async_locked 0 30117 NULL
-+_osd_req_sizeof_alist_header_30134 _osd_req_sizeof_alist_header 0 30134 NULL
-+recv_stream_30138 recv_stream 4 30138 NULL
-+u_memcpya_30139 u_memcpya 2-3 30139 NULL
-+i915_gem_object_get_pages_gtt_30154 i915_gem_object_get_pages_gtt 0 30154 NULL
-+i915_gem_object_wait_rendering_30173 i915_gem_object_wait_rendering 0 30173 NULL
-+cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
-+mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
-+usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
-+preallocate_pcm_pages_30209 preallocate_pcm_pages 2 30209 NULL
-+read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
-+snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
-+dccp_manip_pkt_30229 dccp_manip_pkt 2 30229 NULL
-+rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
-+isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
-+compat_readv_30273 compat_readv 3 30273 NULL
-+skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
-+ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
-+resource_from_user_30341 resource_from_user 3 30341 NULL
-+kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
-+inet_getid_30365 inet_getid 2 30365 NULL
-+sys_get_mempolicy_30379 sys_get_mempolicy 3 30379 NULL
-+blkdev_issue_zeroout_30392 blkdev_issue_zeroout 0 30392 NULL
-+c4iw_init_resource_30393 c4iw_init_resource 3-2 30393 NULL
-+enable_write_30456 enable_write 3 30456 NULL
-+urandom_read_30462 urandom_read 3 30462 NULL
-+zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
-+i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL
-+i915_mutex_lock_interruptible_30474 i915_mutex_lock_interruptible 0 30474 NULL
-+adu_write_30487 adu_write 3 30487 NULL
-+dtim_interval_write_30489 dtim_interval_write 3 30489 NULL
-+nouveau_vm_new_30495 nouveau_vm_new 3-2 30495 NULL
-+set_config_30526 set_config 0 30526 NULL
-+disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
-+blk_init_tags_30592 blk_init_tags 1 30592 NULL
-+sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
-+macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
-+compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
-+nfsd_nrpools_30651 nfsd_nrpools 0 30651 NULL
-+jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
-+dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
-+wled_proc_write_30709 wled_proc_write 3 30709 NULL
-+lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL
-+l2cap_build_conf_rsp_30719 l2cap_build_conf_rsp 0 30719 NULL
-+lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
-+snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
-+ath6kl_wmi_send_action_cmd_30735 ath6kl_wmi_send_action_cmd 6 30735 NULL
-+fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL nohasharray
-+tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 &fuse_conn_limit_write_30777
-+smk_read_doi_30813 smk_read_doi 3 30813 NULL
-+get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
-+sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
-+ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
-+pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
-+f1x_match_to_this_node_30888 f1x_match_to_this_node 3 30888 NULL
-+get_params_30899 get_params 0 30899 NULL
-+fc_host_post_vendor_event_30903 fc_host_post_vendor_event 3 30903 NULL
-+sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
-+tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
-+huge_page_mask_30981 huge_page_mask 0 30981 NULL
-+nlmsg_put_answer_30988 nlmsg_put_answer 4 30988 NULL
-+i2400mu_rx_size_grow_30989 i2400mu_rx_size_grow 0 30989 NULL
-+lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
-+compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
-+lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
-+find_next_bit_le_31064 find_next_bit_le 0 31064 NULL
-+sys_mincore_31079 sys_mincore 2-1 31079 NULL
-+scb_status_31084 scb_status 0 31084 NULL
-+sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
-+find_mergeable_31093 find_mergeable 2 31093 NULL
-+compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
-+depth_read_31112 depth_read 3 31112 NULL
-+kvm_mmu_pte_write_31120 kvm_mmu_pte_write 2-4 31120 NULL
-+ssb_read16_31139 ssb_read16 0 31139 NULL
-+kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
-+size_inside_page_31141 size_inside_page 0 31141 NULL
-+w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
-+ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
-+input_mt_init_slots_31183 input_mt_init_slots 2 31183 NULL
-+r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
-+cpumask_weight_31215 cpumask_weight 0 31215 NULL
-+__read_reg_31216 __read_reg 0 31216 NULL
-+atm_get_addr_31221 atm_get_addr 3 31221 NULL
-+tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL
-+cyy_readb_31240 cyy_readb 0 31240 NULL
-+_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
-+ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
-+ceph_copy_page_vector_to_user_31270 ceph_copy_page_vector_to_user 3-4 31270 NULL
-+uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
-+sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
-+command_file_write_31318 command_file_write 3 31318 NULL
-+hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL
-+rbd_do_op_31366 rbd_do_op 8-9 31366 NULL
-+xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL nohasharray
-+buffDnld_31372 buffDnld 0 31372 &xprt_rdma_allocate_31372
-+trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
-+inb_31388 inb 0 31388 NULL
-+key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
-+mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 NULL nohasharray
-+i915_gem_object_put_fence_31413 i915_gem_object_put_fence 0 31413 &mcs7830_set_reg_31413
-+TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
-+snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
-+opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
-+xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
-+__ext4_journal_get_write_access_31482 __ext4_journal_get_write_access 0 31482 NULL
-+alg_setkey_31485 alg_setkey 3 31485 NULL
-+rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
-+qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
-+__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
-+hidraw_write_31536 hidraw_write 3 31536 NULL
-+normalize_31566 normalize 0-1-2 31566 NULL
-+inet6_ifaddr_msgsize_31568 inet6_ifaddr_msgsize 0 31568 NULL
-+osst_write_31581 osst_write 3 31581 NULL
-+iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
-+arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
-+iwl_legacy_dbgfs_traffic_log_read_31625 iwl_legacy_dbgfs_traffic_log_read 3 31625 NULL
-+videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
-+pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
-+xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
-+__lgread_31668 __lgread 4 31668 NULL
-+fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL
-+handle_interrupt_31689 handle_interrupt 0 31689 NULL
-+iwl_legacy_dbgfs_chain_noise_read_31692 iwl_legacy_dbgfs_chain_noise_read 3 31692 NULL
-+audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
-+sctp_make_asconf_ack_31726 sctp_make_asconf_ack 3 31726 NULL
-+utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL
-+input_abs_get_max_31742 input_abs_get_max 0 31742 NULL nohasharray
-+NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 &input_abs_get_max_31742
-+bcm_char_read_31750 bcm_char_read 3 31750 NULL
-+snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
-+usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
-+get_count_order_31800 get_count_order 0 31800 NULL
-+ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
-+isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
-+strnlen_user_31815 strnlen_user 0-2 31815 NULL
-+sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
-+iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL
-+ddb_output_write_31902 ddb_output_write 3-0 31902 NULL
-+xattr_permission_31907 xattr_permission 0 31907 NULL
-+kmem_alloc_31920 kmem_alloc 1 31920 NULL
-+read_mem_31942 read_mem 3 31942 NULL nohasharray
-+iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 &read_mem_31942
-+vb2_write_31948 vb2_write 3 31948 NULL
-+pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
-+copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
-+vx_read_status_31982 vx_read_status 0 31982 NULL
-+find_next_zero_bit_31990 find_next_zero_bit 0 31990 NULL
-+sysfs_create_file_31996 sysfs_create_file 0 31996 NULL
-+calc_hmac_32010 calc_hmac 3 32010 NULL
-+aead_len_32021 aead_len 0 32021 NULL
-+ocfs2_remove_extent_32032 ocfs2_remove_extent 4-3 32032 NULL
-+posix_acl_set_32037 posix_acl_set 4 32037 NULL
-+sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 NULL
-+proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
-+nlmsg_put_32069 nlmsg_put 5 32069 NULL
-+cfg80211_send_unprot_deauth_32080 cfg80211_send_unprot_deauth 3 32080 NULL
-+ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
-+set_discoverable_32102 set_discoverable 4 32102 NULL
-+disk_status_32120 disk_status 4 32120 NULL
-+kobject_add_internal_32133 kobject_add_internal 0 32133 NULL
-+alloc_tx_32143 alloc_tx 2 32143 NULL
-+venus_link_32165 venus_link 5 32165 NULL
-+drbd_new_dev_size_32171 drbd_new_dev_size 0 32171 NULL
-+do_writepages_32173 do_writepages 0 32173 NULL
-+ubi_wl_scrub_peb_32196 ubi_wl_scrub_peb 0 32196 NULL
-+wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
-+riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
-+caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
-+lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
-+ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
-+l3_alloc_skb_32289 l3_alloc_skb 1 32289 NULL
-+cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL
-+nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
-+t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
-+dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray
-+rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357
-+f1x_translate_sysaddr_to_cs_32359 f1x_translate_sysaddr_to_cs 2 32359 NULL
-+sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
-+send_mpa_reply_32372 send_mpa_reply 3 32372 NULL
-+variax_set_raw2_32374 variax_set_raw2 4 32374 NULL
-+usbtmc_read_32377 usbtmc_read 3 32377 NULL
-+xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
-+hid_input_report_32458 hid_input_report 4 32458 NULL
-+fill_readbuf_32464 fill_readbuf 3 32464 NULL
-+ieee80211_fill_mesh_addresses_32465 ieee80211_fill_mesh_addresses 0 32465 NULL
-+ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
-+ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
-+qsfp_read_32522 qsfp_read 0-2-4 32522 NULL
-+ilo_read_32531 ilo_read 3 32531 NULL
-+ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
-+format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
-+aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
-+mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
-+read_file_beacon_32595 read_file_beacon 3 32595 NULL
-+ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
-+sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
-+__iter_shared_inline_ref_32610 __iter_shared_inline_ref 0 32610 NULL
-+irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
-+cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
-+ieee80211_hdrlen_32637 ieee80211_hdrlen 0 32637 NULL
-+kvmalloc_32646 kvmalloc 1 32646 NULL
-+ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
-+generic_readlink_32654 generic_readlink 3 32654 NULL
-+move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
-+apei_res_add_32674 apei_res_add 0 32674 NULL
-+rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
-+slhc_remember_32741 slhc_remember 3-0 32741 NULL
-+megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
-+stats_read_ul_32751 stats_read_ul 3 32751 NULL
-+write_file_disable_ani_32761 write_file_disable_ani 3 32761 NULL
-+sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
-+firmwareUpload_32794 firmwareUpload 3 32794 NULL
-+get_register_page_interruptible_32809 get_register_page_interruptible 5 32809 NULL
-+orig_node_add_if_32833 orig_node_add_if 2 32833 NULL
-+nlmsg_validate_32861 nlmsg_validate 2 32861 NULL
-+new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
-+blkio_fill_stat_32874 blkio_fill_stat 2 32874 NULL
-+vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 4-6 32884 NULL
-+zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
-+compat_filldir_32999 compat_filldir 3 32999 NULL
-+br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL
-+xfrm_mapping_msgsize_33044 xfrm_mapping_msgsize 0 33044 NULL
-+ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
-+stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
-+sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
-+acl_permission_check_33083 acl_permission_check 0 33083 NULL
-+ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
-+fb_sys_write_33130 fb_sys_write 3 33130 NULL
-+nfs4_init_slot_table_33152 nfs4_init_slot_table 2 33152 NULL
-+tun_get_user_33178 tun_get_user 3 33178 NULL
-+dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL
-+pp_read_33210 pp_read 3 33210 NULL
-+xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
-+__vb2_wait_for_done_vb_33246 __vb2_wait_for_done_vb 0 33246 NULL
-+snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
-+cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
-+i915_gem_object_flush_fence_33304 i915_gem_object_flush_fence 0 33304 NULL
-+mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
-+ceph_msgpool_init_33312 ceph_msgpool_init 3 33312 NULL
-+vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL
-+gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
-+joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
-+create_xattr_datum_33356 create_xattr_datum 5 33356 NULL
-+read_file_regidx_33370 read_file_regidx 3 33370 NULL
-+ceph_osdc_writepages_33375 ceph_osdc_writepages 5 33375 NULL
-+sctp_ulpevent_new_33377 sctp_ulpevent_new 1 33377 NULL
-+ocfs2_quota_read_33382 ocfs2_quota_read 5 33382 NULL
-+ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
-+scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
-+ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 2-3 33394 NULL
-+snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 NULL
-+create_entry_33479 create_entry 2 33479 NULL
-+ip_setsockopt_33487 ip_setsockopt 5 33487 NULL
-+ol_dqblk_chunk_off_33489 ol_dqblk_chunk_off 2 33489 NULL
-+res_counter_read_33499 res_counter_read 4 33499 NULL
-+fb_read_33506 fb_read 3 33506 NULL
-+ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
-+nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
-+tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
-+dup_array_33551 dup_array 3 33551 NULL
-+solo_enc_read_33553 solo_enc_read 3 33553 NULL
-+scsi_execute_33596 scsi_execute 5 33596 NULL
-+comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
-+ip6_find_1stfragopt_33608 ip6_find_1stfragopt 0 33608 NULL nohasharray
-+xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 &ip6_find_1stfragopt_33608
-+inw_p_33668 inw_p 0 33668 NULL
-+arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
-+rbd_alloc_coll_33678 rbd_alloc_coll 1 33678 NULL
-+sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
-+netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
-+get_free_de_33714 get_free_de 2 33714 NULL
-+pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
-+ocfs2_extent_map_get_blocks_33720 ocfs2_extent_map_get_blocks 2 33720 NULL
-+__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
-+Read_hfc_33755 Read_hfc 0 33755 NULL
-+hashtab_create_33769 hashtab_create 3 33769 NULL
-+midibuf_message_length_33770 midibuf_message_length 0 33770 NULL
-+if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
-+sky2_rx_pad_33819 sky2_rx_pad 0 33819 NULL nohasharray
-+filter_write_33819 filter_write 3 33819 &sky2_rx_pad_33819
-+ext4_journal_extend_33835 ext4_journal_extend 0 33835 NULL
-+get_user_pages_33908 get_user_pages 0 33908 NULL
-+queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
-+max8649_read_device_33930 max8649_read_device 3 33930 NULL
-+sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
-+lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
-+read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
-+vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
-+select_size_34004 select_size 0 34004 NULL
-+lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL
-+ppp_write_34034 ppp_write 3 34034 NULL
-+tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL
-+islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
-+mtu2blksize_34139 mtu2blksize 0 34139 NULL
-+skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
-+iwl_legacy_dbgfs_tx_queue_read_34192 iwl_legacy_dbgfs_tx_queue_read 3 34192 NULL
-+mtd_write_34207 mtd_write 3 34207 NULL
-+setup_nodes_for_search_34248 setup_nodes_for_search 0 34248 NULL
-+bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
-+rw_copy_check_uvector_34271 rw_copy_check_uvector 3-0 34271 NULL
-+device_private_init_34279 device_private_init 0 34279 NULL
-+zone_spanned_pages_in_node_34299 zone_spanned_pages_in_node 0 34299 NULL
-+pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 NULL nohasharray
-+iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 &pcpu_need_to_extend_34326
-+crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL
-+rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
-+p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
-+ea_read_34378 ea_read 0 34378 NULL
-+av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
-+usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
-+read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
-+ivtv_read_pos_34400 ivtv_read_pos 3 34400 NULL
-+sctp_make_heartbeat_ack_34411 sctp_make_heartbeat_ack 4 34411 NULL
-+nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
-+usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
-+sctp_make_abort_34459 sctp_make_abort 3 34459 NULL
-+mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
-+line6_dumpreq_init_34473 line6_dumpreq_init 3 34473 NULL
-+skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
-+i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
-+security_inode_permission_34488 security_inode_permission 0 34488 NULL
-+alloc_buf_34532 alloc_buf 1 34532 NULL
-+tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
-+hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4-0 34547 NULL
-+dbBackSplit_34561 dbBackSplit 0 34561 NULL
-+alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
-+velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
-+init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
-+inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
-+iwl_legacy_dbgfs_disable_ht40_write_34605 iwl_legacy_dbgfs_disable_ht40_write 3 34605 NULL
-+__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
-+__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
-+cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
-+isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
-+ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
-+platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
-+fib_rule_nlmsg_size_34736 fib_rule_nlmsg_size 0 34736 NULL nohasharray
-+reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 &fib_rule_nlmsg_size_34736
-+sctp_make_datafrag_empty_34737 sctp_make_datafrag_empty 3 34737 NULL
-+solos_param_store_34755 solos_param_store 4 34755 NULL
-+device_add_34766 device_add 0 34766 NULL
-+qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
-+tipc_log_resize_34803 tipc_log_resize 1 34803 NULL
-+drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
-+sep_prepare_input_dma_table_34832 sep_prepare_input_dma_table 3-2 34832 NULL
-+b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
-+bl_mark_for_commit_34852 bl_mark_for_commit 2-3 34852 NULL
-+acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
-+usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
-+ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
-+write_msg_34916 write_msg 3 34916 NULL
-+iwl_dbgfs_force_reset_write_34930 iwl_dbgfs_force_reset_write 3 34930 NULL
-+snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
-+i2c_transfer_34958 i2c_transfer 0 34958 NULL nohasharray
-+skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 &i2c_transfer_34958
-+Realloc_34961 Realloc 2 34961 NULL
-+iwl_legacy_dbgfs_missed_beacon_write_34966 iwl_legacy_dbgfs_missed_beacon_write 3 34966 NULL
-+l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
-+sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
-+generic_file_llseek_size_35024 generic_file_llseek_size 2 35024 NULL
-+paranoid_check_peb_ec_hdr_35027 paranoid_check_peb_ec_hdr 0 35027 NULL
-+coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
-+btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
-+ext4_split_unwritten_extents_35063 ext4_split_unwritten_extents 0 35063 NULL
-+store_ifalias_35088 store_ifalias 4 35088 NULL
-+__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
-+capi_write_35104 capi_write 3 35104 NULL
-+ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
-+ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL
-+gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
-+iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
-+ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
-+bat_ogm_aggr_packet_35202 bat_ogm_aggr_packet 3 35202 NULL
-+unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
-+_osd_req_alist_elem_size_35216 _osd_req_alist_elem_size 0-2 35216 NULL
-+security_key_getsecurity_35218 security_key_getsecurity 0 35218 NULL nohasharray
-+striped_read_35218 striped_read 2-8-0-3 35218 &security_key_getsecurity_35218
-+set_fd_set_35249 set_fd_set 1 35249 NULL
-+ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
-+jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL
-+dma_show_regs_35266 dma_show_regs 3 35266 NULL
-+irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
-+i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
-+isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
-+__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 NULL
-+tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
-+sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
-+new_bind_ctl_35324 new_bind_ctl 2 35324 NULL
-+pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
-+mlx4_alloc_hwq_res_35339 mlx4_alloc_hwq_res 3 35339 NULL
-+hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
-+compat_filldir64_35354 compat_filldir64 3 35354 NULL
-+tt_update_orig_35361 tt_update_orig 4 35361 NULL
-+read_kmem_35372 read_kmem 3 35372 NULL
-+rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
-+buffer_to_user_35439 buffer_to_user 3 35439 NULL
-+i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
-+async_setkey_35521 async_setkey 3 35521 NULL
-+__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
-+iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
-+rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
-+ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
-+ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
-+vb2_dqbuf_35559 vb2_dqbuf 0 35559 NULL
-+sysfs_create_subdir_35567 sysfs_create_subdir 0 35567 NULL
-+ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
-+ReadZReg_35604 ReadZReg 0 35604 NULL
-+rbd_req_sync_read_35615 rbd_req_sync_read 6-5 35615 NULL
-+kernel_readv_35617 kernel_readv 3 35617 NULL
-+scrub_stripe_35637 scrub_stripe 4-3 35637 NULL
-+spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
-+store_debug_level_35652 store_debug_level 3 35652 NULL
-+rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
-+compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
-+rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
-+fixup_low_keys_35734 fixup_low_keys 0 35734 NULL
-+ext4_truncate_restart_trans_35750 ext4_truncate_restart_trans 0 35750 NULL
-+iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
-+udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
-+store_fan1_input_35793 store_fan1_input 4 35793 NULL
-+read_file_stations_35795 read_file_stations 3 35795 NULL
-+pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
-+vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL
-+mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
-+wait_mgsl_event_35872 wait_mgsl_event 0 35872 NULL
-+kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
-+ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
-+uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
-+tcp_mark_head_lost_35895 tcp_mark_head_lost 2 35895 NULL
-+igmpv3_newpack_35912 igmpv3_newpack 2 35912 NULL
-+kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
-+balance_node_right_35920 balance_node_right 0 35920 NULL
-+put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
-+ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
-+acl_alloc_35979 acl_alloc 1 35979 NULL
-+device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL
-+generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
-+koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
-+write_file_antenna_35998 write_file_antenna 3 35998 NULL
-+console_store_36007 console_store 4 36007 NULL
-+i965_write_fence_reg_36017 i965_write_fence_reg 0 36017 NULL
-+sys_init_module_36047 sys_init_module 2 36047 NULL
-+gpio_power_read_36059 gpio_power_read 3 36059 NULL
-+write_emulate_36065 write_emulate 2-4 36065 NULL
-+stack_max_size_write_36068 stack_max_size_write 3 36068 NULL
-+ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
-+ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
-+snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
-+genlmsg_new_36094 genlmsg_new 1 36094 NULL
-+vga_arb_write_36112 vga_arb_write 3 36112 NULL
-+rx_enable_36125 rx_enable 0 36125 NULL
-+iwl_trans_txq_alloc_36147 iwl_trans_txq_alloc 3 36147 NULL
-+b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
-+btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
-+snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
-+FTL_Get_Block_Table_Flash_Size_Bytes_36187 FTL_Get_Block_Table_Flash_Size_Bytes 0 36187 NULL
-+__ip_append_data_36191 __ip_append_data 7-8 36191 NULL
-+atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
-+viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
-+compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
-+usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
-+modem_input_wait_36278 modem_input_wait 0 36278 NULL
-+mangle_sdp_packet_36279 mangle_sdp_packet 9 36279 NULL
-+codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
-+lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
-+ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
-+fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
-+jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
-+ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL
-+v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL
-+to_sector_36361 to_sector 0-1 36361 NULL
-+mtd_do_writeoob_36373 mtd_do_writeoob 4 36373 NULL
-+vring_new_virtqueue_36374 vring_new_virtqueue 1 36374 NULL
-+tunables_read_36385 tunables_read 3 36385 NULL
-+afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
-+sierra_write_36402 sierra_write 4 36402 NULL
-+rtnl_link_get_size_36436 rtnl_link_get_size 0 36436 NULL
-+sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
-+alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
-+b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
-+ip6_append_data_36490 ip6_append_data 4-5 36490 NULL
-+cmd_loop_36491 cmd_loop 0 36491 NULL
-+iwl_legacy_dbgfs_power_save_status_read_36492 iwl_legacy_dbgfs_power_save_status_read 3 36492 NULL
-+__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
-+mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
-+ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
-+crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
-+cpu_type_read_36540 cpu_type_read 3 36540 NULL
-+__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL nohasharray
-+macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
-+__erst_read_36579 __erst_read 0 36579 NULL
-+put_cmsg_36589 put_cmsg 4 36589 NULL
-+pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
-+fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
-+vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
-+lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
-+osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
-+iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
-+get_txidle_36698 get_txidle 0 36698 NULL
-+gsmtty_write_36702 gsmtty_write 3 36702 NULL
-+saa7134_i2c_eeprom_36729 saa7134_i2c_eeprom 3 36729 NULL
-+extract_icmp6_fields_36732 extract_icmp6_fields 2 36732 NULL
-+snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL
-+cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
-+i915_gem_evict_inactive_36767 i915_gem_evict_inactive 0 36767 NULL
-+ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
-+llc_mac_header_len_36776 llc_mac_header_len 0 36776 NULL
-+proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
-+do_dmabuf_dirty_sou_36807 do_dmabuf_dirty_sou 7 36807 NULL
-+hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
-+int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
-+fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
-+keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
-+cm_write_36858 cm_write 3 36858 NULL
-+svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
-+ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
-+selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
-+OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
-+genlmsg_total_size_36938 genlmsg_total_size 0-1 36938 NULL
-+crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
-+sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
-+setxattr_37006 setxattr 4 37006 NULL
-+command_file_read_37038 command_file_read 3 37038 NULL
-+em28xx_gpio_set_37040 em28xx_gpio_set 0 37040 NULL
-+ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL
-+parse_command_37079 parse_command 2 37079 NULL
-+snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
-+xfrm_expire_msgsize_37133 xfrm_expire_msgsize 0 37133 NULL
-+msg_word_37164 msg_word 0 37164 NULL
-+BeceemNVMRead_37166 BeceemNVMRead 0 37166 NULL
-+can_set_xattr_37182 can_set_xattr 4 37182 NULL
-+store_wimax_37196 store_wimax 4 37196 NULL
-+vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
-+sysfs_add_file_37200 sysfs_add_file 0 37200 NULL
-+crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL
-+uapsd_queues_read_37217 uapsd_queues_read 3 37217 NULL
-+regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
-+__do_replace_37227 __do_replace 5 37227 NULL
-+produce_free_peb_37232 produce_free_peb 0 37232 NULL
-+ctnetlink_secctx_size_37236 ctnetlink_secctx_size 0 37236 NULL
-+BeceemFlashBulkWrite_37255 BeceemFlashBulkWrite 0 37255 NULL
-+prot_queue_del_37258 prot_queue_del 0 37258 NULL
-+exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
-+srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
-+jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
-+send_msg_37323 send_msg 4 37323 NULL
-+brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
-+scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
-+rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
-+nf_bridge_pad_37351 nf_bridge_pad 0 37351 NULL
-+security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
-+sys_getxattr_37418 sys_getxattr 4 37418 NULL
-+hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
-+acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL nohasharray
-+find_next_bit_37422 find_next_bit 0 37422 &acpi_os_allocate_zeroed_37422
-+tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428 NULL
-+iwl_print_last_event_logs_37433 iwl_print_last_event_logs 7-9-0 37433 NULL
-+tcp_established_options_37450 tcp_established_options 0 37450 NULL
-+cmd_input_size_37457 cmd_input_size 0-1 37457 NULL
-+get_est_timing_37484 get_est_timing 0 37484 NULL
-+kmem_realloc_37489 kmem_realloc 2 37489 NULL
-+xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL
-+hdr_size_37536 hdr_size 0 37536 NULL
-+xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
-+qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL
-+kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
-+bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
-+vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 NULL nohasharray
-+rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 &vmw_framebuffer_dmabuf_dirty_37661
-+iwl_legacy_dbgfs_rxon_filter_flags_read_37666 iwl_legacy_dbgfs_rxon_filter_flags_read 3 37666 NULL
-+regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
-+__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
-+read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
-+ocfs2_duplicate_clusters_by_jbd_37749 ocfs2_duplicate_clusters_by_jbd 5-4-6 37749 NULL
-+ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
-+ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
-+dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
-+smk_read_logging_37804 smk_read_logging 3 37804 NULL
-+jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL
-+o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
-+xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
-+iwmct_fw_parser_init_37876 iwmct_fw_parser_init 4 37876 NULL
-+sys_setxattr_37880 sys_setxattr 4 37880 NULL
-+dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
-+tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
-+pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
-+read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
-+write_file_bool_37957 write_file_bool 3 37957 NULL
-+rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
-+vfs_readv_38011 vfs_readv 3 38011 NULL
-+aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
-+store_wlan_38040 store_wlan 4 38040 NULL
-+klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 NULL
-+sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL
-+nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
-+alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
-+uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
-+em28xx_set_mode_38088 em28xx_set_mode 0 38088 NULL
-+request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
-+proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
-+ep0_read_38095 ep0_read 3 38095 NULL
-+snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
-+vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
-+__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3 38153 NULL
-+kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
-+cdev_add_38176 cdev_add 2-3 38176 NULL
-+rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
-+get_ucode_user_38202 get_ucode_user 3 38202 NULL
-+osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
-+ceph_decode_16_38239 ceph_decode_16 0 38239 NULL
-+_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
-+mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray
-+ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268
-+xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
-+xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
-+zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
-+isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
-+__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
-+btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
-+sctp_sf_abort_violation_38380 sctp_sf_abort_violation 6 38380 NULL
-+dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
-+ttm_put_pages_38411 ttm_put_pages 2 38411 NULL
-+ocfs2_which_cluster_group_38413 ocfs2_which_cluster_group 0-2 38413 NULL
-+iwm_wdev_alloc_38415 iwm_wdev_alloc 1 38415 NULL
-+ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
-+pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
-+pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
-+var_name_strnsize_38447 var_name_strnsize 0-2 38447 NULL
-+kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
-+blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL
-+dev_names_read_38509 dev_names_read 3 38509 NULL
-+iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
-+event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
-+_osd_req_alist_elem_decode_38527 _osd_req_alist_elem_decode 0 38527 NULL
-+ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
-+irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
-+_ipw_read32_38565 _ipw_read32 0 38565 NULL
-+snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
-+sctp_tsnmap_num_dups_38578 sctp_tsnmap_num_dups 0 38578 NULL
-+copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
-+cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
-+reportdesc_callback_38603 reportdesc_callback 3 38603 NULL
-+pep_indicate_38611 pep_indicate 5 38611 NULL
-+__css_put_38613 __css_put 2 38613 NULL
-+icn_writecmd_38629 icn_writecmd 2 38629 NULL
-+write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL
-+receive_extralen_38634 receive_extralen 0 38634 NULL
-+audit_init_entry_38644 audit_init_entry 1 38644 NULL
-+mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
-+nfs_dns_resolve_name_38670 nfs_dns_resolve_name 2 38670 NULL
-+snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL
-+cfg80211_send_disassoc_38678 cfg80211_send_disassoc 3 38678 NULL
-+iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
-+validate_vid_hdr_38699 validate_vid_hdr 0 38699 NULL
-+v4l2_ctrl_new_38725 v4l2_ctrl_new 7 38725 NULL
-+w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL
-+iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
-+snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
-+gre_manip_pkt_38785 gre_manip_pkt 2 38785 NULL
-+do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL
-+err_decode_38804 err_decode 2 38804 NULL
-+ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
-+sys_select_38827 sys_select 1 38827 NULL
-+b43_txhdr_size_38832 b43_txhdr_size 0 38832 NULL
-+direct_entry_38836 direct_entry 3 38836 NULL
-+compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
-+read_nic_io_word_38853 read_nic_io_word 0 38853 NULL
-+interfaces_38859 interfaces 2 38859 NULL
-+pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
-+sizeof_gpio_leds_priv_38882 sizeof_gpio_leds_priv 0-1 38882 NULL
-+dbgfs_state_38894 dbgfs_state 3 38894 NULL
-+traverse_38897 traverse 0 38897 NULL
-+__fswab16_38898 __fswab16 0 38898 NULL
-+usb_maxpacket_38977 usb_maxpacket 0 38977 NULL
-+OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
-+lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
-+t4vf_pktgl_to_skb_39005 t4vf_pktgl_to_skb 2 39005 NULL
-+get_nodes_39012 get_nodes 3 39012 NULL
-+disp_proc_write_39024 disp_proc_write 3 39024 NULL
-+_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
-+do_write_kmem_39051 do_write_kmem 0-1-3 39051 NULL
-+line6_midibuf_read_39067 line6_midibuf_read 0-3 39067 NULL
-+ReadHFC_39104 ReadHFC 0 39104 NULL
-+tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
-+__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL
-+ttm_mem_global_alloc_zone_39125 ttm_mem_global_alloc_zone 0 39125 NULL
-+i915_gem_evict_something_39130 i915_gem_evict_something 0 39130 NULL
-+generic_permission_39150 generic_permission 0 39150 NULL
-+alloc_ring_39151 alloc_ring 2-4 39151 NULL
-+proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
-+ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
-+init_list_set_39188 init_list_set 2-3 39188 NULL
-+ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
-+qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
-+ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
-+drm_order_39244 drm_order 0 39244 NULL
-+snd_pcm_capture_forward_39248 snd_pcm_capture_forward 2 39248 NULL
-+r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL
-+__skb_cow_39254 __skb_cow 2 39254 NULL
-+pohmelfs_setxattr_39281 pohmelfs_setxattr 4 39281 NULL
-+mei_registration_cdev_39284 mei_registration_cdev 2 39284 NULL
-+__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
-+wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
-+__cfg80211_send_deauth_39344 __cfg80211_send_deauth 3 39344 NULL
-+ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
-+vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
-+user_power_read_39414 user_power_read 3 39414 NULL
-+alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
-+sys_semop_39457 sys_semop 3 39457 NULL
-+setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
-+btrfs_mksubvol_39479 btrfs_mksubvol 3 39479 NULL
-+ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
-+int_proc_write_39542 int_proc_write 3 39542 NULL nohasharray
-+wm8350_i2c_read_device_39542 wm8350_i2c_read_device 3 39542 &int_proc_write_39542
-+rtnl_port_size_39551 rtnl_port_size 0 39551 NULL
-+pp_write_39554 pp_write 3 39554 NULL
-+ol_dqblk_block_39558 ol_dqblk_block 0-2-3 39558 NULL
-+datablob_format_39571 datablob_format 2 39571 NULL nohasharray
-+ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
-+handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
-+ext_depth_39607 ext_depth 0 39607 NULL
-+sdio_readb_39618 sdio_readb 0 39618 NULL
-+fm_send_cmd_39639 fm_send_cmd 5 39639 NULL
-+snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
-+prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 NULL
-+pfkey_sockaddr_size_39661 pfkey_sockaddr_size 0 39661 NULL
-+kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
-+v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
-+sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL
-+ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
-+tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
-+tcp_write_xmit_39755 tcp_write_xmit 2 39755 NULL
-+usb_hcd_map_urb_for_dma_39774 usb_hcd_map_urb_for_dma 0 39774 NULL
-+ocfs2_pages_per_cluster_39790 ocfs2_pages_per_cluster 0 39790 NULL
-+security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
-+snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
-+sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
-+get_priv_size_39828 get_priv_size 0-1 39828 NULL
-+beiscsi_process_async_pdu_39834 beiscsi_process_async_pdu 7 39834 NULL
-+pkt_add_39897 pkt_add 3 39897 NULL
-+read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
-+gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
-+dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
-+aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
-+exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
-+oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
-+__spi_async_39932 __spi_async 0 39932 NULL
-+iwl_legacy_dbgfs_missed_beacon_read_39939 iwl_legacy_dbgfs_missed_beacon_read 3 39939 NULL
-+fwnet_pd_new_39947 fwnet_pd_new 4 39947 NULL
-+tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL
-+dma_push_rx_39973 dma_push_rx 2 39973 NULL
-+broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
-+mthca_array_init_39987 mthca_array_init 2 39987 NULL
-+fw_device_op_read_39990 fw_device_op_read 3 39990 NULL
-+i2c_readn_40001 i2c_readn 0 40001 NULL
-+xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
-+ivtvfb_write_40023 ivtvfb_write 3 40023 NULL
-+datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
-+atomic_xchg_40070 atomic_xchg 0 40070 NULL
-+sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
-+iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
-+pt_write_40159 pt_write 3 40159 NULL
-+scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
-+ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL
-+allocate_probes_40204 allocate_probes 1 40204 NULL
-+au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
-+compress_file_range_40225 compress_file_range 3-4 40225 NULL
-+osst_read_40237 osst_read 3 40237 NULL
-+brcmf_sdioh_request_buffer_40239 brcmf_sdioh_request_buffer 7 40239 NULL
-+ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
-+fuse_update_attributes_40262 fuse_update_attributes 0 40262 NULL nohasharray
-+rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 &fuse_update_attributes_40262
-+ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
-+rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
-+nfs_file_llseek_40306 nfs_file_llseek 2 40306 NULL
-+ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
-+bat_ogm_queue_add_40337 bat_ogm_queue_add 3 40337 NULL
-+mmio_read_40348 mmio_read 4 40348 NULL
-+ocfs2_release_clusters_40355 ocfs2_release_clusters 4 40355 NULL
-+event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
-+ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 2-3 40365 NULL
-+get_chars_40373 get_chars 3 40373 NULL
-+usb_gadget_config_buf_40374 usb_gadget_config_buf 0 40374 NULL
-+fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
-+brcmf_sdbrcm_get_image_40397 brcmf_sdbrcm_get_image 0-2 40397 NULL
-+fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL
-+atmel_rmem16_40450 atmel_rmem16 0 40450 NULL
-+tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
-+zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
-+afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL
-+devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
-+tty_write_room_40495 tty_write_room 0 40495 NULL
-+__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL
-+TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
-+i915_gem_execbuffer_relocate_object_slow_40546 i915_gem_execbuffer_relocate_object_slow 0 40546 NULL
-+ima_write_policy_40548 ima_write_policy 3 40548 NULL
-+esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
-+b1_get_byte_40597 b1_get_byte 0 40597 NULL
-+skge_rx_get_40598 skge_rx_get 3 40598 NULL
-+get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
-+sctp_manip_pkt_40620 sctp_manip_pkt 2 40620 NULL
-+fops_read_40672 fops_read 3 40672 NULL
-+ext4_mark_inode_dirty_40673 ext4_mark_inode_dirty 0 40673 NULL
-+videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 4-3 40678 NULL
-+pci_enable_resources_40680 pci_enable_resources 0 40680 NULL
-+__seq_open_private_40715 __seq_open_private 3 40715 NULL
-+find_next_zero_bit_le_40744 find_next_zero_bit_le 0 40744 NULL nohasharray
-+xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 &find_next_zero_bit_le_40744
-+security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
-+card_send_command_40757 card_send_command 3 40757 NULL
-+ad1889_readl_40765 ad1889_readl 0 40765 NULL
-+pg_write_40766 pg_write 3 40766 NULL
-+ecryptfs_readlink_40775 ecryptfs_readlink 3 40775 NULL nohasharray
-+show_list_40775 show_list 3-0 40775 &ecryptfs_readlink_40775
-+kfifo_out_copy_r_40784 kfifo_out_copy_r 3 40784 NULL
-+bitmap_weight_40791 bitmap_weight 2-0 40791 NULL
-+netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL nohasharray
-+paranoid_check_not_bad_40811 paranoid_check_not_bad 0 40811 &netdev_alloc_skb_ip_align_40811
-+nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
-+nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL
-+__shared_list_add_40850 __shared_list_add 0 40850 NULL
-+ocfs2_zero_partial_clusters_40856 ocfs2_zero_partial_clusters 2-3 40856 NULL
-+v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
-+read_file_queue_40895 read_file_queue 3 40895 NULL
-+waiters_read_40902 waiters_read 3 40902 NULL
-+isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
-+iwl_legacy_dbgfs_disable_ht40_read_40910 iwl_legacy_dbgfs_disable_ht40_read 3 40910 NULL
-+vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
-+iterate_extent_inodes_40923 iterate_extent_inodes 0 40923 NULL
-+btrfs_setsize_40931 btrfs_setsize 2 40931 NULL
-+snd_vx_create_40948 snd_vx_create 4 40948 NULL
-+skb_end_offset_40949 skb_end_offset 0 40949 NULL
-+tcp_skb_mss_40964 tcp_skb_mss 0 40964 NULL
-+rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
-+mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
-+_req_append_segment_41031 _req_append_segment 2 41031 NULL
-+mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
-+ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
-+BcmFlash2xBulkWrite_41054 BcmFlash2xBulkWrite 0 41054 NULL
-+vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray
-+beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062
-+cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
-+roccat_read_41093 roccat_read 3 41093 NULL
-+provide_user_output_41105 provide_user_output 3 41105 NULL
-+f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
-+oom_adjust_write_41116 oom_adjust_write 3 41116 NULL
-+dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
-+ol_quota_chunk_block_41177 ol_quota_chunk_block 0-2 41177 NULL
-+compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
-+dfs_file_write_41196 dfs_file_write 3 41196 NULL
-+xfs_readdir_41200 xfs_readdir 3 41200 NULL nohasharray
-+UpdateRegs_41200 UpdateRegs 0 41200 &xfs_readdir_41200
-+ocfs2_read_quota_block_41207 ocfs2_read_quota_block 2 41207 NULL
-+ceph_calc_raw_layout_41212 ceph_calc_raw_layout 4 41212 NULL
-+tun_alloc_skb_41216 tun_alloc_skb 2-4-3 41216 NULL
-+nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
-+hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
-+create_dir_41256 create_dir 0 41256 NULL
-+erst_read_41260 erst_read 0 41260 NULL
-+alloc_context_41283 alloc_context 1 41283 NULL
-+create_bounce_buffer_41330 create_bounce_buffer 3 41330 NULL
-+user_update_41332 user_update 3 41332 NULL
-+twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
-+cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
-+kmp_init_41373 kmp_init 2 41373 NULL
-+isr_commands_read_41398 isr_commands_read 3 41398 NULL
-+sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
-+xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
-+isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
-+lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
-+ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4 41442 NULL
-+pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
-+layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
-+wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
-+hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
-+xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
-+ldisc_receive_41516 ldisc_receive 4 41516 NULL
-+rng_dev_read_41581 rng_dev_read 3 41581 NULL
-+read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
-+vga_io_r_41609 vga_io_r 0 41609 NULL
-+tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
-+usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 NULL nohasharray
-+lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 &usb_endpoint_maxp_41613
-+lis3l02dq_read_accel_from_buffer_41615 lis3l02dq_read_accel_from_buffer 2 41615 NULL
-+mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
-+get_std_timing_41654 get_std_timing 0 41654 NULL
-+squashfs_cache_init_41656 squashfs_cache_init 2 41656 NULL
-+ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
-+uapsd_max_sp_len_write_41683 uapsd_max_sp_len_write 3 41683 NULL
-+apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL
-+sys_pwritev_41722 sys_pwritev 3 41722 NULL
-+hc_gpa_41744 hc_gpa 0-2-3 41744 NULL
-+fillonedir_41746 fillonedir 3 41746 NULL
-+ocfs2_dx_dir_rebalance_41793 ocfs2_dx_dir_rebalance 7 41793 NULL
-+bat_socket_read_41813 bat_socket_read 3 41813 NULL
-+sco_send_frame_41815 sco_send_frame 3 41815 NULL
-+do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
-+tcp_packets_in_flight_41853 tcp_packets_in_flight 0 41853 NULL
-+keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
-+pci_map_single_41869 pci_map_single 0 41869 NULL
-+usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
-+get_packet_41914 get_packet 3 41914 NULL
-+get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
-+ceph_get_direct_page_vector_41917 ceph_get_direct_page_vector 2 41917 NULL
-+nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
-+iscsi_iser_recv_41948 iscsi_iser_recv 4 41948 NULL
-+ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
-+efx_tx_queue_insert_41955 efx_tx_queue_insert 2 41955 NULL
-+portnames_read_41958 portnames_read 3 41958 NULL
-+dst_mtu_41969 dst_mtu 0 41969 NULL
-+cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
-+ubi_io_is_bad_41983 ubi_io_is_bad 0 41983 NULL
-+_get_slice_41991 _get_slice 0 41991 NULL
-+em28xx_write_regs_41996 em28xx_write_regs 0 41996 NULL
-+flakey_status_42000 flakey_status 4 42000 NULL
-+pool_allocate_42012 pool_allocate 3 42012 NULL
-+spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
-+rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL
-+ensure_wear_leveling_42029 ensure_wear_leveling 0 42029 NULL
-+acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
-+__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
-+irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
-+jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
-+InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
-+brcmf_sdbrcm_downloadvars_42064 brcmf_sdbrcm_downloadvars 3 42064 NULL
-+scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
-+sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
-+submit_inquiry_42108 submit_inquiry 3 42108 NULL
-+sysfs_read_file_42113 sysfs_read_file 3 42113 NULL
-+store_gps_42118 store_gps 4 42118 NULL
-+ext4_do_update_inode_42127 ext4_do_update_inode 0 42127 NULL
-+Read_hfc16_stable_42131 Read_hfc16_stable 0 42131 NULL
-+ttm_agp_populate_42144 ttm_agp_populate 2 42144 NULL
-+v9fs_alloc_rdir_buf_42150 v9fs_alloc_rdir_buf 2 42150 NULL
-+mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
-+read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
-+oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
-+write_file_beacon_42185 write_file_beacon 3 42185 NULL
-+get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
-+btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
-+ctnetlink_proto_size_42270 ctnetlink_proto_size 0 42270 NULL
-+__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL
-+snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
-+rtnl_link_get_af_size_42296 rtnl_link_get_af_size 0 42296 NULL
-+crypt_status_42302 crypt_status 4 42302 NULL nohasharray
-+sel_read_perm_42302 sel_read_perm 3 42302 &crypt_status_42302
-+sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
-+ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
-+hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL nohasharray
-+tracing_ctrl_write_42324 tracing_ctrl_write 3 42324 &hysdn_conf_read_42324
-+tcp_sync_mss_42330 tcp_sync_mss 0-2 42330 NULL
-+ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
-+msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
-+brn_proc_write_42407 brn_proc_write 3 42407 NULL
-+krng_get_random_42420 krng_get_random 3 42420 NULL
-+gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
-+key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
-+snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
-+tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
-+neigh_nlmsg_size_42464 neigh_nlmsg_size 0 42464 NULL
-+kernel_recvmsg_42482 kernel_recvmsg 0 42482 NULL
-+brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
-+kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
-+smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
-+snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
-+dbAllocNear_42546 dbAllocNear 0 42546 NULL
-+udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
-+iwl_print_event_log_42566 iwl_print_event_log 7-5-0 42566 NULL
-+xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
-+oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
-+__pskb_pull_42602 __pskb_pull 2 42602 NULL
-+sys_move_pages_42626 sys_move_pages 2 42626 NULL
-+ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
-+scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
-+br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
-+parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL
-+_regmap_raw_write_42652 _regmap_raw_write 4 42652 NULL
-+l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
-+request_key_and_link_42693 request_key_and_link 4 42693 NULL
-+vb2_read_42703 vb2_read 3 42703 NULL
-+__ocfs2_decrease_refcount_42717 __ocfs2_decrease_refcount 5-4 42717 NULL
-+read_status_42722 read_status 0 42722 NULL
-+dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
-+set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
-+ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
-+dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL
-+qla2x00_get_ctx_bsg_sp_42768 qla2x00_get_ctx_bsg_sp 3 42768 NULL
-+x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
-+snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
-+cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL
-+koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
-+ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0 42796 NULL
-+fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
-+drm_ioctl_42813 drm_ioctl 2 42813 NULL
-+iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
-+set_arg_42824 set_arg 3 42824 NULL
-+ocfs2_desc_bitmap_to_cluster_off_42831 ocfs2_desc_bitmap_to_cluster_off 2 42831 NULL
-+prandom_u32_42853 prandom_u32 0 42853 NULL
-+ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
-+pskb_expand_head_42881 pskb_expand_head 2-3 42881 NULL
-+tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
-+xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
-+SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
-+hd_end_request_42904 hd_end_request 2 42904 NULL
-+sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
-+vx_reset_chk_42946 vx_reset_chk 0 42946 NULL
-+sys_sethostname_42962 sys_sethostname 2 42962 NULL
-+ixj_enhanced_read_42980 ixj_enhanced_read 3 42980 NULL
-+pfkey_xfrm_policy2sec_ctx_size_42981 pfkey_xfrm_policy2sec_ctx_size 0 42981 NULL nohasharray
-+compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 &pfkey_xfrm_policy2sec_ctx_size_42981
-+nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
-+isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
-+wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL nohasharray
-+store_lssw_43035 store_lssw 4 43035 &wep_default_key_count_read_43035
-+uapsd_queues_write_43040 uapsd_queues_write 3 43040 NULL
-+sep_prepare_input_output_dma_table_in_dcb_43064 sep_prepare_input_output_dma_table_in_dcb 4-5-3-2 43064 NULL
-+_xfer_secondary_pool_43089 _xfer_secondary_pool 2 43089 NULL
-+ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
-+usb_string_sub_43164 usb_string_sub 0 43164 NULL
-+ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
-+teiup_create_43201 teiup_create 3 43201 NULL
-+uio_write_43202 uio_write 3 43202 NULL
-+iso_callback_43208 iso_callback 3 43208 NULL
-+atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL
-+vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
-+store_wwan_43264 store_wwan 4 43264 NULL
-+ide_end_rq_43269 ide_end_rq 4 43269 NULL
-+parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray
-+evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278
-+filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
-+alloc_subdevices_43300 alloc_subdevices 2 43300 NULL
-+store_ledd_43312 store_ledd 4 43312 NULL
-+__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
-+svc_pool_map_get_43386 svc_pool_map_get 0 43386 NULL
-+xenfb_write_43412 xenfb_write 3 43412 NULL
-+ext4_xattr_check_names_43422 ext4_xattr_check_names 0 43422 NULL
-+__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
-+usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
-+usb_string_43443 usb_string 0 43443 NULL nohasharray
-+usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443
-+__data_list_add_eb_43472 __data_list_add_eb 0 43472 NULL
-+nf_nat_ftp_fmt_cmd_43495 nf_nat_ftp_fmt_cmd 0 43495 NULL
-+ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
-+do_readlink_43518 do_readlink 2 43518 NULL
-+dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
-+cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
-+request_resource_43548 request_resource 0 43548 NULL
-+ath_rx_init_43564 ath_rx_init 2 43564 NULL nohasharray
-+_send_control_msg_43564 _send_control_msg 6 43564 &ath_rx_init_43564
-+_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
-+rpc_malloc_43573 rpc_malloc 2 43573 NULL
-+handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
-+lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
-+proc_read_43614 proc_read 3 43614 NULL
-+prison_create_43623 prison_create 1 43623 NULL
-+random_write_43656 random_write 3 43656 NULL
-+bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
-+ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
-+dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL
-+hidp_send_ctrl_message_43702 hidp_send_ctrl_message 4 43702 NULL
-+user_confirm_reply_43708 user_confirm_reply 4 43708 NULL
-+drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
-+reset_card_proc_43731 reset_card_proc 0 43731 NULL
-+snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
-+ocfs2_replace_clusters_43733 ocfs2_replace_clusters 5 43733 NULL
-+fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL
-+osdv1_attr_list_elem_size_43747 osdv1_attr_list_elem_size 0-1 43747 NULL
-+gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
-+sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
-+ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
-+byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
-+btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1-0 43806 NULL
-+store_cpufv_disabled_43809 store_cpufv_disabled 4 43809 NULL
-+hci_send_cmd_43810 hci_send_cmd 3 43810 NULL
-+ext4_split_extent_43818 ext4_split_extent 0 43818 NULL
-+i915_gem_execbuffer_relocate_entry_43822 i915_gem_execbuffer_relocate_entry 0 43822 NULL
-+ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
-+ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
-+p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
-+read_flush_43851 read_flush 3 43851 NULL
-+idmap_update_entry_43885 idmap_update_entry 3 43885 NULL
-+prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
-+stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
-+i915_ring_idle_43969 i915_ring_idle 0 43969 NULL
-+__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL
-+nla_reserve_43984 nla_reserve 3 43984 NULL
-+scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
-+bcm_recvmsg_43992 bcm_recvmsg 4 43992 &scsi_command_size_43992 nohasharray
-+kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &bcm_recvmsg_43992
-+write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
-+btrfs_prev_leaf_44083 btrfs_prev_leaf 0 44083 NULL
-+xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
-+skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
-+tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
-+em28xx_read_reg_req_44130 em28xx_read_reg_req 0 44130 NULL
-+scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
-+ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
-+handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
-+srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
-+scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
-+enlarge_skb_44248 enlarge_skb 2 44248 NULL
-+apei_resources_sub_44252 apei_resources_sub 0 44252 NULL
-+device_create_file_44285 device_create_file 0 44285 NULL
-+ocfs2_zero_range_for_truncate_44294 ocfs2_zero_range_for_truncate 3 44294 NULL
-+iwl3945_statistics_flag_44310 iwl3945_statistics_flag 3-0 44310 NULL
-+bitmap_scnprintf_44318 bitmap_scnprintf 2-0 44318 NULL
-+dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
-+rs_init_44327 rs_init 1 44327 NULL
-+count_ah_combs_44334 count_ah_combs 0 44334 NULL
-+blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL
-+rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
-+aoedev_flush_44398 aoedev_flush 2 44398 NULL
-+strlcpy_44400 strlcpy 3 44400 NULL
-+drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
-+osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
-+write_file_debug_44476 write_file_debug 3 44476 NULL
-+btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
-+sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
-+ath6kl_tm_rx_report_44494 ath6kl_tm_rx_report 3 44494 NULL
-+ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
-+xfrm_sa_len_44502 xfrm_sa_len 0 44502 NULL
-+ac_register_board_44504 ac_register_board 3 44504 NULL
-+security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
-+iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
-+spidev_write_44510 spidev_write 3 44510 NULL
-+sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
-+comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
-+sysfs_add_one_44629 sysfs_add_one 0 44629 NULL
-+cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
-+alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
-+sysfs_create_link_44685 sysfs_create_link 0 44685 NULL
-+ts_read_44687 ts_read 3 44687 NULL
-+i915_wait_request_44703 i915_wait_request 0 44703 NULL
-+__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 NULL
-+mempool_create_node_44715 mempool_create_node 1 44715 NULL
-+_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
-+clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
-+fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
-+key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
-+tnode_new_44757 tnode_new 3 44757 NULL nohasharray
-+pty_write_44757 pty_write 3 44757 &tnode_new_44757
-+__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL
-+sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
-+rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
-+x25_pacsize_to_bytes_44812 x25_pacsize_to_bytes 0 44812 NULL
-+sisusb_write_44834 sisusb_write 3 44834 NULL
-+nl80211_send_unprot_disassoc_44846 nl80211_send_unprot_disassoc 4 44846 NULL
-+qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL
-+init_rs_44873 init_rs 1 44873 NULL
-+skb_availroom_44883 skb_availroom 0 44883 NULL
-+nf_bridge_encap_header_len_44890 nf_bridge_encap_header_len 0 44890 NULL
-+do_tty_write_44896 do_tty_write 5 44896 NULL
-+tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
-+ftdi_process_packet_45005 ftdi_process_packet 5 45005 NULL
-+i915_gem_do_execbuffer_45012 i915_gem_do_execbuffer 0 45012 NULL
-+ptrace_writedata_45021 ptrace_writedata 4 45021 NULL
-+vhci_get_user_45039 vhci_get_user 3 45039 NULL
-+sel_write_user_45060 sel_write_user 3 45060 NULL
-+snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
-+orig_hash_del_if_45080 orig_hash_del_if 2 45080 NULL
-+usbdev_read_45114 usbdev_read 3 45114 NULL
-+send_to_tty_45141 send_to_tty 3 45141 NULL
-+crypto_aead_blocksize_45148 crypto_aead_blocksize 0 45148 NULL
-+gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
-+device_write_45156 device_write 3 45156 NULL nohasharray
-+ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 &device_write_45156
-+tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
-+sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
-+snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray
-+sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 &snd_sb_csp_load_user_45190
-+add_child_45201 add_child 4 45201 NULL
-+iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
-+spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
-+ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
-+event_enable_write_45238 event_enable_write 3 45238 NULL
-+gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL
-+snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
-+e1000_tx_map_45309 e1000_tx_map 5 45309 NULL
-+copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
-+lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
-+__data_list_add_45403 __data_list_add 0 45403 NULL
-+keymap_store_45406 keymap_store 4 45406 NULL
-+ath6kl_wmi_send_probe_response_cmd_45422 ath6kl_wmi_send_probe_response_cmd 5 45422 NULL
-+tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
-+__node_remap_45458 __node_remap 4 45458 NULL
-+rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
-+tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
-+rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
-+sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
-+cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
-+copy_macs_45534 copy_macs 4 45534 NULL
-+nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
-+v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
-+cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
-+stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
-+posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
-+venus_rmdir_45564 venus_rmdir 4 45564 NULL
-+rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
-+hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
-+audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
-+i915_gem_evict_everything_45629 i915_gem_evict_everything 0 45629 NULL
-+ext4_reserve_inode_write_45654 ext4_reserve_inode_write 0 45654 NULL
-+compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
-+dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
-+smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
-+ip_nat_sip_expect_45693 ip_nat_sip_expect 7 45693 NULL
-+bscnl_emit_45699 bscnl_emit 2-5-0 45699 NULL nohasharray
-+unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 &bscnl_emit_45699
-+sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
-+dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
-+handle_response_icmp_45733 handle_response_icmp 7 45733 NULL
-+snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
-+v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 NULL
-+lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
-+i915_gem_object_flush_gpu_write_domain_45755 i915_gem_object_flush_gpu_write_domain 0 45755 NULL
-+alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
-+nfs_idmap_request_key_45791 nfs_idmap_request_key 2 45791 NULL
-+raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
-+rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL
-+lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
-+pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
-+ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
-+amthi_read_45831 amthi_read 4 45831 NULL
-+audit_make_reply_45835 audit_make_reply 7 45835 NULL
-+__ip_select_ident_45851 __ip_select_ident 3 45851 NULL
-+smp_build_cmd_45853 smp_build_cmd 3 45853 NULL
-+isdn_write_45863 isdn_write 3 45863 NULL
-+rbd_get_num_segments_45864 rbd_get_num_segments 0-2-3 45864 NULL
-+tpm_config_in_45880 tpm_config_in 0 45880 NULL
-+get_rdac_req_45882 get_rdac_req 3 45882 NULL
-+ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
-+__svc_create_45903 __svc_create 3 45903 NULL
-+dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
-+alloc_mr_45935 alloc_mr 1 45935 NULL
-+cma_user_data_offset_45954 cma_user_data_offset 0 45954 NULL
-+ndisc_opt_addr_space_45959 ndisc_opt_addr_space 0 45959 NULL
-+rb_simple_read_45972 rb_simple_read 3 45972 NULL
-+ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
-+ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
-+sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
-+get_free_entries_46030 get_free_entries 1 46030 NULL
-+__access_remote_vm_46031 __access_remote_vm 0 46031 NULL
-+snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
-+run_card_proc_46057 run_card_proc 0 46057 NULL
-+line6_midibuf_bytes_used_46059 line6_midibuf_bytes_used 0 46059 NULL
-+__ocfs2_move_extent_46060 __ocfs2_move_extent 3-4-6-5 46060 NULL nohasharray
-+dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
-+slhc_toss_46066 slhc_toss 0 46066 NULL
-+mgmt_event_46069 mgmt_event 4 46069 NULL
-+xfrm_sadinfo_msgsize_46073 xfrm_sadinfo_msgsize 0 46073 NULL
-+sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
-+ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
-+mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
-+ddp_clear_map_46152 ddp_clear_map 4 46152 NULL
-+__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
-+qlcnic_alloc_msix_entries_46160 qlcnic_alloc_msix_entries 2 46160 NULL
-+vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
-+i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
-+tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
-+dsp_write_46218 dsp_write 2 46218 NULL
-+tx_abort_46232 tx_abort 0 46232 NULL
-+xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL
-+ReadReg_46277 ReadReg 0 46277 NULL
-+pep_alloc_skb_46303 pep_alloc_skb 3 46303 NULL
-+sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
-+__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
-+iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
-+smk_write_direct_46363 smk_write_direct 3 46363 NULL
-+fib_nlmsg_size_46383 fib_nlmsg_size 0 46383 NULL
-+fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
-+crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
-+ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL
-+cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
-+parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL
-+filldir64_46469 filldir64 3 46469 NULL
-+mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
-+nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
-+mv_get_hc_count_46554 mv_get_hc_count 0 46554 NULL
-+link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
-+dn_current_mss_46574 dn_current_mss 0 46574 NULL
-+serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
-+vscnprintf_46617 vscnprintf 0-2 46617 NULL
-+__kfifo_out_r_46623 __kfifo_out_r 3 46623 NULL
-+request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
-+aircable_process_packet_46639 aircable_process_packet 5 46639 NULL
-+pci_enable_device_46642 pci_enable_device 0 46642 NULL
-+cx18_v4l2_ioctl_46647 cx18_v4l2_ioctl 2 46647 NULL
-+e1000_tx_map_46672 e1000_tx_map 4 46672 NULL
-+iwl4965_ucode_rx_stats_read_46676 iwl4965_ucode_rx_stats_read 3 46676 NULL
-+l2cap_parse_conf_rsp_46683 l2cap_parse_conf_rsp 0 46683 NULL
-+alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
-+__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
-+erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
-+ctnetlink_nlmsg_size_46736 ctnetlink_nlmsg_size 0 46736 NULL
-+hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
-+int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
-+_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
-+xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
-+ieee80211_rx_radiotap_len_46846 ieee80211_rx_radiotap_len 0 46846 NULL
-+spi_async_46857 spi_async 0 46857 NULL
-+vsnprintf_46863 vsnprintf 0 46863 NULL
-+hpi_read_word_nolock_46881 hpi_read_word_nolock 0 46881 NULL
-+sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
-+ol_dqblk_off_46904 ol_dqblk_off 2-3 46904 NULL
-+tracing_ctrl_read_46922 tracing_ctrl_read 3 46922 NULL
-+fb_write_46924 fb_write 3 46924 NULL
-+btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
-+kvm_register_read_46948 kvm_register_read 0 46948 NULL
-+__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
-+calculate_alignment_46958 calculate_alignment 0-2 46958 NULL
-+crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
-+ath6kl_add_bss_if_needed_46978 ath6kl_add_bss_if_needed 5 46978 NULL
-+strlcat_46985 strlcat 3 46985 NULL
-+gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 NULL nohasharray
-+sel_write_bool_46996 sel_write_bool 3 46996 &gfs2_xattr_system_set_46996
-+ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
-+blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
-+cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL
-+ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0 47070 NULL
-+xfrm_report_msgsize_47077 xfrm_report_msgsize 0 47077 NULL
-+scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
-+set_params_47113 set_params 0 47113 NULL
-+mousedev_read_47123 mousedev_read 3 47123 NULL
-+acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 NULL nohasharray
-+ses_recv_diag_47143 ses_recv_diag 4 47143 &acpi_ut_initialize_buffer_47143
-+cxio_init_resource_fifo_random_47151 cxio_init_resource_fifo_random 3 47151 NULL
-+rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
-+svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
-+can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
-+l2headersize_47238 l2headersize 0 47238 NULL
-+options_write_47243 options_write 3 47243 NULL
-+portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
-+ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
-+tty_audit_log_47280 tty_audit_log 8 47280 NULL
-+vsnprintf_47291 vsnprintf 0 47291 NULL
-+tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
-+channel_type_read_47308 channel_type_read 3 47308 NULL
-+ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
-+avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
-+__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
-+trace_options_core_read_47390 trace_options_core_read 3 47390 NULL
-+pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
-+lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL
-+crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
-+lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL
-+posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
-+newpart_47485 newpart 6-4 47485 NULL
-+core_sys_select_47494 core_sys_select 1 47494 NULL
-+alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
-+unlink_simple_47506 unlink_simple 3 47506 NULL
-+process_vm_rw_47533 process_vm_rw 3-5 47533 NULL nohasharray
-+vscnprintf_47533 vscnprintf 0-2 47533 &process_vm_rw_47533
-+einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL
-+ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
-+set_printer_interface_47551 set_printer_interface 0 47551 NULL
-+read_ldt_47570 read_ldt 2 47570 NULL
-+vendorextnReadSection_47583 vendorextnReadSection 0 47583 NULL
-+ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
-+sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL
-+uea_request_47613 uea_request 4 47613 NULL
-+cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
-+kvm_pv_mmu_write_47630 kvm_pv_mmu_write 2 47630 NULL
-+ivtv_serialized_ioctl_47632 ivtv_serialized_ioctl 3 47632 NULL
-+__build_packet_message_47643 __build_packet_message 3-9 47643 NULL
-+packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray
-+ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700
-+bits_to_user_47733 bits_to_user 2-3 47733 NULL
-+carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
-+ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
-+mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
-+alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
-+i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
-+uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
-+sst_prepare_output_buffers_47781 sst_prepare_output_buffers 4 47781 NULL
-+tt_len_47789 tt_len 0-1 47789 NULL
-+stmmac_set_bfsize_47834 stmmac_set_bfsize 0 47834 NULL
-+ath6kl_wmi_set_appie_cmd_47855 ath6kl_wmi_set_appie_cmd 4 47855 NULL
-+vhci_read_47878 vhci_read 3 47878 NULL
-+keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
-+osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
-+comedi_write_47926 comedi_write 3 47926 NULL
-+nf_nat_ftp_47948 nf_nat_ftp 5 47948 NULL
-+cfg80211_testmode_alloc_reply_skb_47966 cfg80211_testmode_alloc_reply_skb 2 47966 NULL
-+mempool_resize_47983 mempool_resize 2 47983 NULL nohasharray
-+iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 &mempool_resize_47983
-+mgmt_pending_add_47990 mgmt_pending_add 5 47990 NULL nohasharray
-+dbg_port_buf_47990 dbg_port_buf 2 47990 &mgmt_pending_add_47990
-+ib_umad_write_47993 ib_umad_write 3 47993 NULL
-+ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
-+bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
-+pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
-+wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
-+posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
-+c4iw_init_resource_fifo_48090 c4iw_init_resource_fifo 3 48090 NULL
-+mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
-+skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
-+vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
-+dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
-+__tcp_push_pending_frames_48148 __tcp_push_pending_frames 2 48148 NULL
-+isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
-+init_ipath_48187 init_ipath 1 48187 NULL
-+snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
-+uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
-+read_file_recv_48232 read_file_recv 3 48232 NULL
-+blk_rq_pos_48233 blk_rq_pos 0 48233 NULL
-+nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
-+usb_hcd_submit_urb_48256 usb_hcd_submit_urb 0 48256 NULL
-+cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
-+trace_options_write_48275 trace_options_write 3 48275 NULL
-+pkt_bio_alloc_48284 pkt_bio_alloc 1 48284 NULL
-+lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
-+timblogiw_read_48305 timblogiw_read 3 48305 NULL
-+hash_setkey_48310 hash_setkey 3 48310 NULL
-+bcm_download_config_file_48313 bcm_download_config_file 0 48313 NULL
-+skb_add_data_48363 skb_add_data 3 48363 NULL
-+eexp_start_irq_48364 eexp_start_irq 2 48364 NULL
-+iscsi_complete_pdu_48372 iscsi_complete_pdu 4 48372 NULL
-+lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
-+pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
-+nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
-+wm8994_write_48439 wm8994_write 3 48439 NULL
-+tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
-+ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
-+r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
-+send_control_msg_48498 send_control_msg 6 48498 NULL
-+mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
-+diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL nohasharray
-+iwl_legacy_dbgfs_status_read_48508 iwl_legacy_dbgfs_status_read 3 48508 &diva_os_copy_to_user_48508
-+phantom_get_free_48514 phantom_get_free 0 48514 NULL
-+ubi_dbg_check_write_48525 ubi_dbg_check_write 0 48525 NULL
-+wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
-+drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL
-+ext_sd_execute_read_data_48589 ext_sd_execute_read_data 9-11 48589 NULL
-+do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
-+lc_create_48662 lc_create 3 48662 NULL
-+aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
-+sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL nohasharray
-+sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
-+l2cap_build_cmd_48676 l2cap_build_cmd 4 48676 NULL
-+hysdn_log_write_48694 hysdn_log_write 3 48694 NULL
-+altera_drscan_48698 altera_drscan 2 48698 NULL
-+kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
-+recv_msg_48709 recv_msg 4 48709 NULL
-+lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL
-+RFTrackingFiltersCorrection_48722 RFTrackingFiltersCorrection 0 48722 NULL
-+disconnect_48738 disconnect 4 48738 NULL
-+ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
-+icmp_manip_pkt_48801 icmp_manip_pkt 2 48801 NULL
-+twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
-+tcp_push_one_48816 tcp_push_one 2 48816 NULL
-+atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
-+azx_get_position_48841 azx_get_position 0 48841 NULL
-+vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
-+viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL
-+__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
-+sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
-+get_num_ops_48886 get_num_ops 0 48886 NULL
-+crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
-+joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 NULL nohasharray
-+mac_drv_rx_init_48898 mac_drv_rx_init 2 48898 &joydev_handle_JSIOCSAXMAP_48898
-+xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
-+msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
-+rts51x_ctrl_transfer_48914 rts51x_ctrl_transfer 8 48914 NULL
-+lpfc_sli4_get_els_iocb_cnt_48926 lpfc_sli4_get_els_iocb_cnt 0 48926 NULL
-+event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
-+_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
-+rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
-+sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
-+xd_rw_49020 xd_rw 3-4 49020 NULL
-+transient_status_49027 transient_status 4 49027 NULL
-+l2cap_bredr_sig_cmd_49065 l2cap_bredr_sig_cmd 3 49065 NULL
-+mirror_status_49073 mirror_status 4 49073 NULL
-+vmx_set_msr_49090 vmx_set_msr 3 49090 NULL
-+scsi_register_49094 scsi_register 2 49094 NULL
-+compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
-+receive_client_update_packet_49104 receive_client_update_packet 3 49104 NULL
-+xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
-+pt_read_49136 pt_read 3 49136 NULL
-+tipc_multicast_49144 tipc_multicast 5 49144 NULL nohasharray
-+iwl_legacy_dbgfs_fh_reg_read_49144 iwl_legacy_dbgfs_fh_reg_read 3 49144 &tipc_multicast_49144
-+ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
-+ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
-+__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL
-+dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
-+root_nfs_cat_49192 root_nfs_cat 3 49192 NULL
-+iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
-+do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
-+handle_response_49269 handle_response 5 49269 NULL
-+osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
-+__ext4_ext_dirty_49284 __ext4_ext_dirty 0 49284 NULL
-+viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL
-+uio_read_49300 uio_read 3 49300 NULL
-+cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
-+joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
-+ocfs2_remove_btree_range_49370 ocfs2_remove_btree_range 4-3-5 49370 NULL
-+px_raw_event_49371 px_raw_event 4 49371 NULL
-+iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
-+applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
-+rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
-+iwl_legacy_dbgfs_nvm_read_49405 iwl_legacy_dbgfs_nvm_read 3 49405 NULL
-+tnode_alloc_49407 tnode_alloc 1 49407 NULL
-+samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
-+i915_gem_object_set_to_gtt_domain_49450 i915_gem_object_set_to_gtt_domain 0 49450 NULL
-+agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
-+xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
-+dn_nsp_do_disc_49474 dn_nsp_do_disc 6-2 49474 NULL
-+esp4_get_mtu_49483 esp4_get_mtu 0-2 49483 NULL
-+isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
-+__sock_recvmsg_nosec_49520 __sock_recvmsg_nosec 0 49520 NULL nohasharray
-+emulator_write_phys_49520 emulator_write_phys 2-4 49520 &__sock_recvmsg_nosec_49520
-+smk_write_access_49561 smk_write_access 3 49561 NULL
-+alloc_chunk_49575 alloc_chunk 1 49575 NULL
-+sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
-+readfifo_49583 readfifo 1 49583 NULL
-+isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
-+heap_init_49617 heap_init 2 49617 NULL
-+smk_write_doi_49621 smk_write_doi 3 49621 NULL
-+port_fops_read_49626 port_fops_read 3 49626 NULL
-+svm_set_msr_49643 svm_set_msr 3 49643 NULL
-+aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
-+sys_gethostname_49698 sys_gethostname 2 49698 NULL
-+cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
-+get_key_haup_common_49709 get_key_haup_common 4 49709 NULL
-+write_pool_49718 write_pool 3 49718 NULL
-+sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
-+check_frame_49741 check_frame 0 49741 NULL
-+zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
-+btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
-+nci_skb_alloc_49757 nci_skb_alloc 2 49757 NULL
-+key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
-+fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
-+w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL
-+pohmelfs_send_xattr_req_49783 pohmelfs_send_xattr_req 6 49783 NULL
-+ceph_osdc_readpages_49789 ceph_osdc_readpages 10-4-0 49789 NULL
-+nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
-+ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
-+b1dma_tolink_49834 b1dma_tolink 0 49834 NULL
-+iraw_loop_49842 iraw_loop 0-1 49842 NULL
-+vmw_execbuf_process_49845 vmw_execbuf_process 5 49845 NULL
-+scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
-+timeradd_entry_49850 timeradd_entry 3 49850 NULL
-+ubifs_destroy_tnc_subtree_49853 ubifs_destroy_tnc_subtree 0 49853 NULL
-+sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
-+ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
-+config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL
-+b43legacy_pio_read_49978 b43legacy_pio_read 0 49978 NULL
-+ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
-+drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL
-+dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
-+isdn_read_50021 isdn_read 3 50021 NULL
-+rbd_req_write_50041 rbd_req_write 4-5 50041 NULL
-+alloc_ebda_hpc_50046 alloc_ebda_hpc 1-2 50046 NULL
-+fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL
-+__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL
-+dev_set_alias_50084 dev_set_alias 3 50084 NULL
-+pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
-+sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
-+altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
-+read_file_slot_50111 read_file_slot 3 50111 NULL
-+pn544_fw_read_50112 pn544_fw_read 0 50112 NULL
-+rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL
-+copy_items_50140 copy_items 6 50140 NULL
-+kmalloc_node_50163 kmalloc_node 1 50163 NULL
-+ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL
-+odev_update_50169 odev_update 2 50169 NULL
-+ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL nohasharray
-+ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 &ubi_resize_volume_50172
-+ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
-+l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
-+ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
-+mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
-+sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
-+afs_extract_data_50261 afs_extract_data 5 50261 NULL
-+rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
-+soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL
-+iterate_irefs_50313 iterate_irefs 0 50313 NULL
-+cifs_readdata_alloc_50318 cifs_readdata_alloc 1 50318 NULL
-+do_launder_page_50329 do_launder_page 0 50329 NULL
-+lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
-+ocfs2_block_to_cluster_group_50337 ocfs2_block_to_cluster_group 2 50337 NULL nohasharray
-+snd_pcm_lib_writev_50337 snd_pcm_lib_writev 3-0 50337 &ocfs2_block_to_cluster_group_50337
-+tpm_read_50344 tpm_read 3 50344 NULL
-+rts51x_bulk_transfer_buf_50352 rts51x_bulk_transfer_buf 4 50352 NULL
-+isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
-+unpack_u16_chunk_50357 unpack_u16_chunk 0 50357 NULL
-+iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 NULL
-+xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
-+sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
-+inet_nlmsg_size_50399 inet_nlmsg_size 0 50399 NULL
-+l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
-+iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
-+pgctrl_write_50453 pgctrl_write 3 50453 NULL
-+device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL
-+cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
-+pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
-+fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
-+ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
-+usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL
-+hme_read_desc32_50574 hme_read_desc32 0 50574 NULL
-+pep_reply_50582 pep_reply 5 50582 NULL
-+iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
-+sge_rx_50594 sge_rx 3 50594 NULL
-+GET_WORD_50624 GET_WORD 0 50624 NULL
-+macvtap_alloc_skb_50629 macvtap_alloc_skb 2-4-3 50629 NULL
-+simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
-+ocfs2_readlink_50656 ocfs2_readlink 3 50656 NULL
-+sys_readv_50664 sys_readv 3 50664 NULL
-+btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
-+prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL
-+get_wear_leveling_table_len_50712 get_wear_leveling_table_len 0 50712 NULL
-+__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
-+skb_padto_50759 skb_padto 2 50759 NULL
-+udp_manip_pkt_50770 udp_manip_pkt 2 50770 NULL
-+ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
-+pipe_handler_request_50774 pipe_handler_request 5 50774 NULL nohasharray
-+tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 &pipe_handler_request_50774
-+bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
-+tpm_write_50798 tpm_write 3 50798 NULL
-+tun_do_read_50800 tun_do_read 4 50800 NULL
-+write_flush_50803 write_flush 3 50803 NULL
-+dvb_play_50814 dvb_play 3 50814 NULL
-+pstore_mkfile_50830 pstore_mkfile 5 50830 NULL
-+videobuf_dma_init_user_50839 videobuf_dma_init_user 4-3 50839 NULL
-+ChannelConfiguration_50853 ChannelConfiguration 0 50853 NULL
-+carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
-+netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
-+osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
-+xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
-+blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
-+hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
-+chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
-+sock_bindtodevice_50942 sock_bindtodevice 3 50942 NULL
-+mld_newpack_50950 mld_newpack 2 50950 NULL
-+ocfs2_add_refcount_flag_50952 ocfs2_add_refcount_flag 6 50952 NULL
-+sdio_uart_write_50954 sdio_uart_write 3 50954 NULL
-+iwl_statistics_flag_50981 iwl_statistics_flag 3-0 50981 NULL
-+timeout_write_50991 timeout_write 3 50991 NULL
-+proc_write_51003 proc_write 3 51003 NULL
-+jbd2_journal_extend_51012 jbd2_journal_extend 0 51012 NULL
-+lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
-+fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 NULL
-+BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
-+dump_midi_51040 dump_midi 3 51040 NULL
-+usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL
-+do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
-+wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
-+jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
-+__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
-+read_file_wiphy_51103 read_file_wiphy 3 51103 NULL
-+iscsi_nop_out_rsp_51117 iscsi_nop_out_rsp 4 51117 NULL
-+nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
-+alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
-+wl1271_cmd_build_probe_req_51141 wl1271_cmd_build_probe_req 3-5 51141 NULL
-+xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
-+snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
-+tipc_send_51238 tipc_send 4 51238 NULL
-+drm_property_create_51239 drm_property_create 4 51239 NULL
-+st_read_51251 st_read 3 51251 NULL
-+compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
-+dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
-+ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
-+pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
-+xfrm_count_enc_supported_51290 xfrm_count_enc_supported 0 51290 NULL
-+buffDnldVerify_51297 buffDnldVerify 0 51297 NULL
-+alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
-+ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
-+alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
-+ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
-+ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
-+radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
-+ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
-+ceph_sync_read_51410 ceph_sync_read 3-0 51410 NULL
-+blk_register_region_51424 blk_register_region 1-2 51424 NULL
-+mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
-+ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
-+print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
-+____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
-+xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
-+kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL
-+__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
-+ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
-+rfcomm_tty_write_51603 rfcomm_tty_write 3 51603 NULL
-+table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
-+dns_resolve_server_name_to_ip_51632 dns_resolve_server_name_to_ip 0 51632 NULL
-+sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
-+iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
-+get_new_cssid_51665 get_new_cssid 2 51665 NULL
-+ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
-+sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
-+sel_write_access_51704 sel_write_access 3 51704 NULL
-+gem_alloc_skb_51715 gem_alloc_skb 2 51715 NULL
-+drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
-+sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
-+msg_set_51725 msg_set 3 51725 NULL
-+cm4040_read_51732 cm4040_read 3 51732 NULL
-+pwc_video_read_51735 pwc_video_read 3 51735 NULL
-+hid_parse_report_51737 hid_parse_report 3 51737 NULL
-+get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL
-+ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
-+if_write_51756 if_write 3 51756 NULL
-+iio_buffer_add_channel_sysfs_51766 iio_buffer_add_channel_sysfs 0 51766 NULL
-+swiotlb_init_with_tbl_51770 swiotlb_init_with_tbl 2 51770 NULL
-+l2cap_create_iframe_pdu_51801 l2cap_create_iframe_pdu 3 51801 NULL
-+qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
-+buffer_from_user_51826 buffer_from_user 3 51826 NULL
-+ioread32_51847 ioread32 0 51847 NULL nohasharray
-+read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
-+do_readv_writev_51849 do_readv_writev 4 51849 NULL
-+pointer_size_read_51863 pointer_size_read 3 51863 NULL
-+get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
-+user_read_51881 user_read 3 51881 NULL
-+dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
-+iio_read_first_n_sw_rb_51911 iio_read_first_n_sw_rb 2 51911 NULL
-+dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
-+xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
-+scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
-+ab8500_bank_write_51960 ab8500_bank_write 3 51960 NULL
-+snd_mask_min_51969 snd_mask_min 0 51969 NULL
-+__blkdev_get_51972 __blkdev_get 0 51972 NULL
-+ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
-+skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
-+rdmalt_52022 rdmalt 0 52022 NULL
-+vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
-+override_release_52032 override_release 2 52032 NULL
-+end_port_52042 end_port 0 52042 NULL
-+dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
-+msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
-+dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
-+nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
-+o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
-+bcm_compare_buff_contents_52124 bcm_compare_buff_contents 0 52124 NULL
-+retry_count_read_52129 retry_count_read 3 52129 NULL
-+hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL
-+wait_gpio_52146 wait_gpio 0 52146 NULL
-+__le16_to_cpup_52155 __le16_to_cpup 0 52155 NULL
-+ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
-+mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
-+proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
-+iscsi_if_send_reply_52219 iscsi_if_send_reply 7 52219 NULL nohasharray
-+iwl_dbgfs_wd_timeout_write_52219 iwl_dbgfs_wd_timeout_write 3 52219 &iscsi_if_send_reply_52219
-+_alloc_mISDN_skb_52232 _alloc_mISDN_skb 3 52232 NULL
-+sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
-+smk_write_load_list_52280 smk_write_load_list 3 52280 NULL
-+handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
-+ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
-+kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
-+hwflags_read_52318 hwflags_read 3 52318 NULL
-+test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
-+iwl3945_ucode_rx_stats_read_52340 iwl3945_ucode_rx_stats_read 3 52340 NULL
-+bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
-+copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
-+iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
-+pfkey_sockaddr_pair_size_52378 pfkey_sockaddr_pair_size 0 52378 NULL
-+isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
-+jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
-+aer_inject_write_52399 aer_inject_write 3 52399 NULL
-+cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
-+line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
-+hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL
-+ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
-+nl80211_send_mgmt_tx_status_52445 nl80211_send_mgmt_tx_status 5 52445 NULL
-+alauda_read_data_52452 alauda_read_data 3 52452 NULL
-+ip6_skb_dst_mtu_52457 ip6_skb_dst_mtu 0 52457 NULL
-+ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
-+usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL
-+ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
-+skb_cow_head_52495 skb_cow_head 2 52495 NULL
-+int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
-+netlbl_unlabel_init_52506 netlbl_unlabel_init 1 52506 NULL
-+pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
-+bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
-+dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
-+raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
-+dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
-+ocfs2_make_right_split_rec_52562 ocfs2_make_right_split_rec 3 52562 NULL
-+debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL
-+xfs_file_buffered_aio_write_52609 xfs_file_buffered_aio_write 4 52609 NULL
-+iwl_legacy_dbgfs_channels_read_52619 iwl_legacy_dbgfs_channels_read 3 52619 NULL
-+__iter_shared_inline_ref_inodes_52668 __iter_shared_inline_ref_inodes 0 52668 NULL
-+vendorextnWriteSection_52698 vendorextnWriteSection 0 52698 NULL
-+ms_transfer_data_52705 ms_transfer_data 9 52705 NULL
-+cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
-+blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
-+relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
-+carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
-+debug_lpm_write_52830 debug_lpm_write 3 52830 NULL
-+bl_mark_sectors_init_52831 bl_mark_sectors_init 3-2 52831 NULL
-+pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
-+ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
-+mon_bin_get_event_52863 mon_bin_get_event 4-6 52863 NULL
-+iwl_legacy_dbgfs_clear_traffic_statistics_write_52866 iwl_legacy_dbgfs_clear_traffic_statistics_write 3 52866 NULL
-+qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL
-+pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
-+cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
-+__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
-+__iio_device_attr_init_52936 __iio_device_attr_init 0 52936 NULL
-+ip_nat_sdp_port_52938 ip_nat_sdp_port 6 52938 NULL
-+__nodes_remap_52951 __nodes_remap 5 52951 NULL
-+store_disp_52952 store_disp 4 52952 NULL
-+send_packet_52960 send_packet 4 52960 NULL
-+ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
-+num_node_state_52989 num_node_state 0 52989 NULL
-+bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
-+cfi_read_query_53066 cfi_read_query 0 53066 NULL
-+iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL
-+mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
-+pcbit_readw_53084 pcbit_readw 0 53084 NULL
-+line6_dumpreq_initbuf_53123 line6_dumpreq_initbuf 3 53123 NULL
-+clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
-+__pci_enable_device_flags_53213 __pci_enable_device_flags 0 53213 NULL
-+sctp_make_fwdtsn_53265 sctp_make_fwdtsn 3 53265 NULL
-+btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
-+pn544_i2c_read_53270 pn544_i2c_read 0 53270 NULL
-+lirc_buffer_init_53282 lirc_buffer_init 3-2 53282 NULL
-+ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL
-+gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
-+vendorextnIoctl_53350 vendorextnIoctl 0 53350 NULL
-+bnx2i_send_nl_mesg_53353 bnx2i_send_nl_mesg 4 53353 NULL
-+get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
-+roccat_common_receive_53407 roccat_common_receive 4 53407 NULL
-+i915_gem_execbuffer_relocate_object_53435 i915_gem_execbuffer_relocate_object 0 53435 NULL
-+isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
-+mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL nohasharray
-+snd_dma_alloc_pages_53447 snd_dma_alloc_pages 3 53447 &mwifiex_info_read_53447
-+apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
-+rds_tcp_data_recv_53476 rds_tcp_data_recv 3-4 53476 NULL
-+iowarrior_read_53483 iowarrior_read 3 53483 NULL
-+osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
-+do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
-+snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
-+dbAllocNext_53506 dbAllocNext 0 53506 NULL
-+ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
-+check_acl_53512 check_acl 0 53512 NULL
-+set_registers_53582 set_registers 3 53582 NULL
-+pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
-+___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
-+xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
-+ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
-+uapsd_max_sp_len_read_53651 uapsd_max_sp_len_read 3 53651 NULL
-+nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
-+orig_hash_add_if_53676 orig_hash_add_if 2 53676 NULL nohasharray
-+_preload_range_53676 _preload_range 3-2 53676 &orig_hash_add_if_53676
-+fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
-+bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL
-+i830_write_fence_reg_53695 i830_write_fence_reg 0 53695 NULL
-+phy_read_1bit_53708 phy_read_1bit 0 53708 NULL
-+find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
-+bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
-+wdm_write_53735 wdm_write 3 53735 NULL
-+amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 NULL nohasharray
-+lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 &amdtp_out_stream_get_max_payload_53755
-+ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
-+__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
-+regmap_raw_write_53803 regmap_raw_write 4 53803 NULL
-+lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
-+nls_nullsize_53815 nls_nullsize 0 53815 NULL
-+setup_data_read_53822 setup_data_read 3 53822 NULL
-+multipath_status_53836 multipath_status 4 53836 NULL
-+i915_gem_flush_ring_53843 i915_gem_flush_ring 0 53843 NULL
-+pms_read_53873 pms_read 3 53873 NULL
-+ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
-+ocfs2_rm_xattr_cluster_53900 ocfs2_rm_xattr_cluster 4-5-3 53900 NULL
-+proc_file_read_53905 proc_file_read 3 53905 NULL
-+tcp_mss_split_point_53925 tcp_mss_split_point 0-3-4 53925 NULL
-+usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL
-+ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 5-4 53938 NULL
-+mlx4_num_eq_uar_53965 mlx4_num_eq_uar 0 53965 NULL
-+idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
-+__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
-+snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL
-+cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
-+ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
-+nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
-+wl12xx_rx_get_buf_size_54070 wl12xx_rx_get_buf_size 0 54070 NULL
-+_malloc_54077 _malloc 1 54077 NULL
-+bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
-+altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
-+create_xattr_54106 create_xattr 5 54106 NULL
-+strn_len_54122 strn_len 0 54122 NULL
-+store_sys_acpi_54129 store_sys_acpi 4 54129 NULL
-+isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
-+i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
-+memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
-+p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL
-+sd_read_data_54207 sd_read_data 9 54207 NULL
-+do_sys_poll_54221 do_sys_poll 2 54221 NULL
-+__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
-+_format_mac_addr_54229 _format_mac_addr 2-0 54229 NULL
-+pi_read_regr_54231 pi_read_regr 0 54231 NULL
-+jbd2__journal_restart_54249 jbd2__journal_restart 0 54249 NULL
-+xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
-+ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
-+wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
-+audio_write_54261 audio_write 4 54261 &wusb_prf_54261
-+mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
-+kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL
-+altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
-+dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
-+ttm_mem_global_alloc_54299 ttm_mem_global_alloc 0 54299 NULL
-+sprintf_54306 sprintf 0 54306 NULL
-+pn_raw_send_54330 pn_raw_send 2 54330 NULL
-+br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
-+__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
-+_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 NULL nohasharray
-+__get_free_pages_54352 __get_free_pages 0 54352 &_osd_realloc_seg_54352
-+tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
-+read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
-+vfs_readlink_54368 vfs_readlink 3 54368 NULL
-+do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL
-+ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
-+sysfs_dir_llseek_54385 sysfs_dir_llseek 2 54385 NULL
-+snd_pcm_oss_read2_54387 snd_pcm_oss_read2 0-3 54387 NULL
-+iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
-+add_packet_54433 add_packet 3 54433 NULL
-+simple_strtoull_54493 simple_strtoull 0 54493 NULL
-+cifs_idmap_key_instantiate_54503 cifs_idmap_key_instantiate 3 54503 NULL
-+l2cap_create_basic_pdu_54508 l2cap_create_basic_pdu 3 54508 NULL
-+btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
-+cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
-+rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
-+viacam_read_54526 viacam_read 3 54526 NULL
-+unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
-+setsockopt_54539 setsockopt 5 54539 NULL
-+lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL
-+rts51x_seq_read_register_54567 rts51x_seq_read_register 3 54567 NULL
-+nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
-+fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
-+xfrm_polexpire_msgsize_54589 xfrm_polexpire_msgsize 0 54589 NULL
-+fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
-+port_fops_write_54627 port_fops_write 3 54627 NULL
-+dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
-+bus_add_device_54665 bus_add_device 0 54665 NULL
-+bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
-+evm_read_key_54674 evm_read_key 3 54674 NULL
-+addtgt_54703 addtgt 3 54703 NULL
-+rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
-+_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
-+ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
-+kzalloc_54740 kzalloc 1 54740 NULL
-+drm_mode_crtc_set_gamma_size_54742 drm_mode_crtc_set_gamma_size 2 54742 NULL
-+wep_iv_read_54744 wep_iv_read 3 54744 NULL
-+lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL
-+flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
-+nfsd_write_54809 nfsd_write 6 54809 NULL
-+crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 NULL nohasharray
-+aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 &crypto_tfm_ctx_alignment_54815
-+generic_perform_write_54832 generic_perform_write 3 54832 NULL
-+write_rio_54837 write_rio 3 54837 NULL
-+ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 NULL
-+edac_mc_alloc_54846 edac_mc_alloc 1 54846 NULL
-+ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
-+printer_read_54851 printer_read 3 54851 NULL
-+em28xx_isoc_dvb_max_packetsize_54854 em28xx_isoc_dvb_max_packetsize 0 54854 NULL
-+alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
-+broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
-+prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
-+tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
-+iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL nohasharray
-+kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913
-+btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
-+add_port_54941 add_port 2 54941 NULL
-+alauda_write_data_54967 alauda_write_data 3 54967 NULL
-+c4_add_card_54968 c4_add_card 3 54968 NULL
-+__proc_file_read_54978 __proc_file_read 3 54978 NULL
-+brcmf_sdcard_send_buf_54980 brcmf_sdcard_send_buf 6 54980 NULL
-+_queue_data_54983 _queue_data 4 54983 NULL
-+ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
-+cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
-+ext4_ext_handle_uninitialized_extents_55059 ext4_ext_handle_uninitialized_extents 0-6 55059 NULL
-+__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
-+apei_exec_run_55075 apei_exec_run 0 55075 NULL
-+set_interface_55085 set_interface 0 55085 NULL
-+PropagateCalParamsFromFlashToMemory_55099 PropagateCalParamsFromFlashToMemory 0 55099 NULL
-+rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
-+kmalloc_large_55111 kmalloc_large 1 55111 NULL
-+crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
-+filldir_55137 filldir 3 55137 NULL nohasharray
-+ocfs2_prepare_refcount_change_for_del_55137 ocfs2_prepare_refcount_change_for_del 3 55137 &filldir_55137
-+ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
-+sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
-+sched_feat_write_55202 sched_feat_write 3 55202 NULL
-+isdn_net_ciscohdlck_alloc_skb_55209 isdn_net_ciscohdlck_alloc_skb 2 55209 NULL nohasharray
-+ht40allow_map_read_55209 ht40allow_map_read 3 55209 &isdn_net_ciscohdlck_alloc_skb_55209
-+__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
-+do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
-+sctp_abort_pkt_new_55218 sctp_abort_pkt_new 5 55218 NULL
-+dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
-+tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
-+ext4_ext_convert_to_initialized_55235 ext4_ext_convert_to_initialized 0 55235 NULL
-+memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
-+lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL
-+rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL nohasharray
-+xd_read_cis_55291 xd_read_cis 4 55291 &rx_streaming_interval_read_55291
-+gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
-+wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
-+vme_user_read_55338 vme_user_read 3 55338 NULL
-+__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 NULL nohasharray
-+sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 &__wa_xfer_setup_sizes_55342
-+rts51x_seq_write_register_55345 rts51x_seq_write_register 3 55345 NULL
-+acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
-+iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
-+alloc_skb_55439 alloc_skb 1 55439 NULL
-+__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
-+isdnhdlc_decode_55466 isdnhdlc_decode 0 55466 NULL
-+cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
-+snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
-+i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
-+ocfs2_rec_clusters_55501 ocfs2_rec_clusters 0 55501 NULL
-+cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL
-+ea_get_55522 ea_get 3-0 55522 NULL
-+set_msr_interception_55538 set_msr_interception 2 55538 NULL
-+add_partition_55588 add_partition 2 55588 NULL
-+kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
-+macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
-+selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
-+reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 NULL nohasharray
-+pktgen_if_write_55628 pktgen_if_write 3 55628 &reiserfs_xattr_get_55628
-+xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
-+read_oldmem_55658 read_oldmem 3 55658 NULL
-+lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
-+get_info_55681 get_info 3 55681 NULL
-+iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL
-+pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
-+prepare_reply_55734 prepare_reply 4 55734 NULL
-+__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2-3 55738 NULL
-+strlen_55778 strlen 0 55778 NULL
-+req_bio_endio_55786 req_bio_endio 3 55786 NULL
-+rtnl_vfinfo_size_55794 rtnl_vfinfo_size 0 55794 NULL
-+uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
-+iwl_legacy_dbgfs_sensitivity_read_55816 iwl_legacy_dbgfs_sensitivity_read 3 55816 NULL
-+sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
-+xfs_da_buf_make_55845 xfs_da_buf_make 1 55845 NULL
-+ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
-+hcd_alloc_coherent_55862 hcd_alloc_coherent 5-0 55862 NULL
-+shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
-+__check_block_validity_55869 __check_block_validity 0 55869 NULL
-+pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
-+snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
-+kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL
-+sel_read_policy_55947 sel_read_policy 3 55947 NULL
-+handle_response_55951 handle_response 5 55951 NULL
-+simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
-+dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
-+pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
-+em28xx_write_reg_bits_56107 em28xx_write_reg_bits 0 56107 NULL
-+sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
-+write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL
-+ath6kl_wmi_bssinfo_event_rx_56146 ath6kl_wmi_bssinfo_event_rx 3 56146 NULL
-+rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
-+skb_headroom_56200 skb_headroom 0 56200 NULL
-+ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
-+cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
-+do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
-+fd_copyin_56247 fd_copyin 3 56247 NULL
-+dvb_aplay_56296 dvb_aplay 3 56296 NULL
-+btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
-+speakup_file_write_56310 speakup_file_write 3 56310 NULL
-+journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
-+snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
-+vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
-+iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4-0 56368 NULL
-+dev_read_56369 dev_read 3 56369 NULL
-+ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
-+do_get_write_access_56410 do_get_write_access 0 56410 NULL
-+store_msg_56417 store_msg 3 56417 NULL
-+pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
-+fl_create_56435 fl_create 5 56435 NULL
-+gnttab_map_56439 gnttab_map 2 56439 NULL
-+cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL
-+osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
-+lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL
-+calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
-+crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
-+cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
-+iwl_legacy_dbgfs_rx_queue_read_56533 iwl_legacy_dbgfs_rx_queue_read 3 56533 NULL
-+l1oip_socket_recv_56537 l1oip_socket_recv 6 56537 NULL
-+ip_options_get_56538 ip_options_get 4 56538 NULL
-+tcp_cwnd_test_56547 tcp_cwnd_test 0 56547 NULL
-+ocfs2_change_extent_flag_56549 ocfs2_change_extent_flag 5 56549 NULL
-+alloc_apertures_56561 alloc_apertures 1 56561 NULL
-+rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
-+portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
-+event_filter_write_56609 event_filter_write 3 56609 NULL
-+gather_array_56641 gather_array 3 56641 NULL
-+dlm_dir_lookup_56662 dlm_dir_lookup 4 56662 NULL
-+tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL
-+btrfs_cow_block_56678 btrfs_cow_block 0 56678 NULL
-+snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
-+dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL
-+sta_flags_read_56710 sta_flags_read 3 56710 NULL
-+ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
-+__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
-+__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL
-+drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
-+mfd_add_devices_56753 mfd_add_devices 4 56753 NULL
-+__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
-+ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 NULL
-+ext4_ext_rm_idx_56827 ext4_ext_rm_idx 0 56827 NULL
-+snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL
-+ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
-+pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
-+__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
-+journal_init_revoke_56933 journal_init_revoke 2 56933 NULL nohasharray
-+CopyBufferToControlPacket_56933 CopyBufferToControlPacket 0 56933 &journal_init_revoke_56933
-+diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
-+vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
-+btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
-+snd_dma_alloc_pages_fallback_57029 snd_dma_alloc_pages_fallback 3 57029 NULL
-+skb_network_offset_57043 skb_network_offset 0 57043 NULL nohasharray
-+ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 &skb_network_offset_57043
-+bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
-+cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
-+sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
-+pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
-+sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
-+thin_status_57084 thin_status 4 57084 NULL
-+tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
-+altera_get_note_57099 altera_get_note 6 57099 NULL
-+crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
-+sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
-+cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
-+rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 NULL nohasharray
-+nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 &rds_ib_sub_signaled_57136 nohasharray
-+ima_show_htable_value_57136 ima_show_htable_value 2 57136 &nl80211_send_deauth_57136
-+snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
-+stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
-+extent_from_logical_57179 extent_from_logical 0 57179 NULL nohasharray
-+rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 &extent_from_logical_57179
-+sys_poll_57190 sys_poll 2 57190 NULL
-+ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
-+oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
-+lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL
-+pstore_file_read_57288 pstore_file_read 3 57288 NULL
-+snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
-+ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
-+ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
-+write_file_regval_57313 write_file_regval 3 57313 NULL
-+ocfs2_xattr_shrink_size_57328 ocfs2_xattr_shrink_size 3 57328 NULL
-+usblp_read_57342 usblp_read 3 57342 NULL
-+print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
-+read_file_blob_57406 read_file_blob 3 57406 NULL
-+enclosure_register_57412 enclosure_register 3 57412 NULL
-+compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
-+alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
-+copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
-+sys_pselect6_57449 sys_pselect6 1 57449 NULL
-+ReadReg_57453 ReadReg 0 57453 NULL
-+__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
-+crypto_tfm_alg_blocksize_57463 crypto_tfm_alg_blocksize 0 57463 NULL nohasharray
-+send_midi_async_57463 send_midi_async 3 57463 &crypto_tfm_alg_blocksize_57463
-+sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 NULL nohasharray
-+iwl4965_statistics_flag_57466 iwl4965_statistics_flag 3-0 57466 &sisusb_clear_vram_57466
-+ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL
-+ocfs2_write_cluster_57483 ocfs2_write_cluster 9-8-2 57483 NULL
-+nl80211_send_mgmt_57497 nl80211_send_mgmt 6 57497 NULL
-+skb_headlen_57501 skb_headlen 0 57501 NULL
-+copy_in_user_57502 copy_in_user 3 57502 NULL
-+ckhdid_printf_57505 ckhdid_printf 2 57505 NULL nohasharray
-+ks8842_read32_57505 ks8842_read32 0 57505 &ckhdid_printf_57505
-+init_tag_map_57515 init_tag_map 3 57515 NULL
-+cmm_read_57520 cmm_read 3 57520 NULL
-+inode_permission_57531 inode_permission 0 57531 NULL
-+ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL
-+snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
-+get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL
-+iwl4965_rs_sta_dbgfs_scale_table_write_57595 iwl4965_rs_sta_dbgfs_scale_table_write 3 57595 NULL
-+sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
-+osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
-+sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
-+mem_read_57631 mem_read 3 57631 NULL
-+sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
-+r3964_write_57662 r3964_write 4 57662 NULL
-+__lgwrite_57669 __lgwrite 4 57669 NULL
-+ieee80211_MFIE_rate_len_57692 ieee80211_MFIE_rate_len 0 57692 NULL
-+i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
-+aa_matching_read_57720 aa_matching_read 3 57720 NULL
-+pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 NULL nohasharray
-+compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 &pppol2tp_recvmsg_57742
-+ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
-+read_block_for_search_57781 read_block_for_search 0 57781 NULL
-+apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL
-+ld2_57794 ld2 0 57794 NULL
-+ivtv_read_57796 ivtv_read 3 57796 NULL
-+bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
-+copy_to_user_57835 copy_to_user 3-0 57835 NULL
-+flash_read_57843 flash_read 3 57843 NULL
-+tt_response_fill_table_57902 tt_response_fill_table 1 57902 NULL
-+xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
-+emi26_writememory_57908 emi26_writememory 4 57908 NULL
-+atomic_add_return_unchecked_57910 atomic_add_return_unchecked 0-1 57910 NULL nohasharray
-+iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 &atomic_add_return_unchecked_57910
-+__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
-+sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
-+xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL
-+rx_57944 rx 4 57944 NULL
-+key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
-+ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
-+ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
-+i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
-+hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
-+stripe_status_57985 stripe_status 4 57985 NULL
-+rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
-+regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
-+iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
-+io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
-+mce_async_out_58056 mce_async_out 3 58056 NULL
-+ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
-+cm4040_write_58079 cm4040_write 3 58079 NULL
-+rfcomm_wmalloc_58090 rfcomm_wmalloc 2 58090 NULL
-+i915_add_request_58096 i915_add_request 0 58096 NULL
-+savemem_58129 savemem 3 58129 NULL
-+ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
-+slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
-+garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
-+asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
-+ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
-+nci_send_cmd_58206 nci_send_cmd 3 58206 NULL
-+sysfs_add_file_mode_58222 sysfs_add_file_mode 0 58222 NULL
-+read_file_debug_58256 read_file_debug 3 58256 NULL
-+cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
-+profile_load_58267 profile_load 3 58267 NULL
-+kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL
-+acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
-+iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
-+my_skb_head_push_58297 my_skb_head_push 2 58297 NULL
-+ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
-+ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 0 58331 NULL
-+vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
-+sctp_make_init_58401 sctp_make_init 4 58401 NULL
-+idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 NULL
-+i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
-+iscsi_offload_mesg_58425 iscsi_offload_mesg 5 58425 NULL
-+__iio_add_chan_devattr_58451 __iio_add_chan_devattr 0 58451 NULL
-+capabilities_read_58457 capabilities_read 3 58457 NULL
-+lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
-+compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
-+snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
-+snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
-+rndis_add_response_58544 rndis_add_response 2 58544 NULL
-+efx_tsoh_heap_alloc_58545 efx_tsoh_heap_alloc 2 58545 NULL
-+wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
-+scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
-+get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL
-+ea_read_inline_58589 ea_read_inline 0 58589 NULL
-+xip_file_read_58592 xip_file_read 3 58592 NULL
-+ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL
-+ixj_read_58615 ixj_read 3 58615 NULL
-+skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
-+vb2_qbuf_58631 vb2_qbuf 0 58631 NULL
-+module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL
-+ocfs2_block_to_cluster_start_58653 ocfs2_block_to_cluster_start 2 58653 NULL
-+iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL
-+uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
-+vx_send_msg_58711 vx_send_msg 0 58711 NULL
-+csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL
-+frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
-+ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
-+agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
-+__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
-+regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
-+raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
-+ep_read_58813 ep_read 3 58813 NULL
-+command_write_58841 command_write 3 58841 NULL
-+ocfs2_truncate_log_append_58850 ocfs2_truncate_log_append 3 58850 NULL
-+iwl_dbgfs_traffic_log_read_58870 iwl_dbgfs_traffic_log_read 3 58870 NULL
-+gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
-+print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
-+st5481_isoc_flatten_58952 st5481_isoc_flatten 0 58952 NULL
-+netpoll_send_udp_58955 netpoll_send_udp 3 58955 NULL
-+wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL
-+crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
-+max3107_handlerx_58978 max3107_handlerx 2 58978 NULL
-+handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
-+ep_write_59008 ep_write 3 59008 NULL
-+lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL
-+receive_server_sync_packet_59021 receive_server_sync_packet 3 59021 NULL
-+selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
-+crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
-+mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
-+scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
-+__iio_add_event_config_attrs_59136 __iio_add_event_config_attrs 0 59136 NULL
-+print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
-+framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
-+radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
-+pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
-+setup_window_59178 setup_window 4-2-5-7 59178 NULL
-+ocfs2_move_extent_59187 ocfs2_move_extent 3-2-5 59187 NULL
-+InitLedSettings_59192 InitLedSettings 0 59192 NULL
-+validate_exec_list_59204 validate_exec_list 0 59204 NULL
-+xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
-+fast_rx_path_59214 fast_rx_path 3 59214 NULL
-+inftl_partscan_59216 inftl_partscan 0 59216 NULL nohasharray
-+check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 &inftl_partscan_59216
-+dt3155_read_59226 dt3155_read 3 59226 NULL
-+tcp_try_rmem_schedule_59231 tcp_try_rmem_schedule 2 59231 NULL
-+tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
-+solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
-+nla_len_59258 nla_len 0 59258 NULL
-+btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
-+fd_copyout_59323 fd_copyout 3 59323 NULL
-+read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
-+xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
-+__map_request_59350 __map_request 0 59350 NULL
-+xfs_dir2_sf_entsize_59366 xfs_dir2_sf_entsize 0-2 59366 NULL
-+pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
-+journal_init_dev_59384 journal_init_dev 5 59384 NULL
-+fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
-+rts51x_transfer_data_59416 rts51x_transfer_data 4 59416 NULL
-+pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL
-+vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
-+squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
-+shrink_tnc_trees_59481 shrink_tnc_trees 0 59481 NULL
-+ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
-+rds_pin_pages_59507 rds_pin_pages 0 59507 NULL
-+tunables_write_59563 tunables_write 3 59563 NULL
-+__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL
-+write_pbl_59583 write_pbl 4 59583 NULL
-+memdup_user_59590 memdup_user 2 59590 NULL
-+fcoe_ctlr_vn_send_59607 fcoe_ctlr_vn_send 4 59607 NULL
-+mtrr_write_59622 mtrr_write 3 59622 NULL
-+ip_vs_icmp_xmit_59624 ip_vs_icmp_xmit 4 59624 NULL
-+find_first_zero_bit_59636 find_first_zero_bit 0 59636 NULL
-+dn_fib_nlmsg_size_59643 dn_fib_nlmsg_size 0 59643 NULL
-+ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
-+hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
-+v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
-+tcp_skb_pcount_59664 tcp_skb_pcount 0 59664 NULL
-+alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
-+ieee80211_mgmt_tx_59699 ieee80211_mgmt_tx 9 59699 NULL
-+mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
-+ioperm_get_59701 ioperm_get 4-3 59701 NULL
-+prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
-+sock_rmalloc_59740 sock_rmalloc 2 59740 NULL nohasharray
-+ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 &sock_rmalloc_59740
-+qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
-+strnlen_59746 strnlen 0 59746 NULL nohasharray
-+fuse_file_llseek_59746 fuse_file_llseek 2 59746 &strnlen_59746
-+ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
-+long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
-+venus_remove_59781 venus_remove 4 59781 NULL
-+ipw_write_59807 ipw_write 3 59807 NULL
-+rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
-+ubi_dbg_check_all_ff_59810 ubi_dbg_check_all_ff 0 59810 NULL
-+scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
-+ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
-+gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
-+tun_put_user_59849 tun_put_user 4 59849 NULL
-+format_array_59854 format_array 0 59854 NULL
-+pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
-+l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
-+ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
-+dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 NULL
-+__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
-+osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
-+ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
-+rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
-+mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
-+osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
-+xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
-+sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
-+bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
-+do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
-+vcs_size_60050 vcs_size 0 60050 NULL
-+load_module_60056 load_module 2 60056 NULL nohasharray
-+gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 &load_module_60056
-+compat_writev_60063 compat_writev 3 60063 NULL
-+c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
-+rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
-+ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
-+ld_usb_read_60156 ld_usb_read 3 60156 NULL
-+jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
-+init_state_60165 init_state 2 60165 NULL
-+jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 NULL nohasharray
-+sg_build_sgat_60179 sg_build_sgat 3 60179 &jffs2_alloc_full_dirent_60179
-+ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
-+qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
-+store_iwmct_log_level_60209 store_iwmct_log_level 4 60209 NULL
-+printer_write_60276 printer_write 3 60276 NULL
-+__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
-+dn_nsp_return_disc_60296 dn_nsp_return_disc 2 60296 NULL
-+do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
-+ext3_dir_llseek_60298 ext3_dir_llseek 2 60298 NULL
-+getDataLength_60301 getDataLength 0 60301 NULL
-+usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL
-+__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL
-+brcmf_alloc_wdev_60347 brcmf_alloc_wdev 1 60347 NULL
-+rh_call_control_60349 rh_call_control 0 60349 NULL
-+dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
-+mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
-+ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
-+tveeprom_read_60397 tveeprom_read 3 60397 NULL
-+driver_names_read_60399 driver_names_read 3 60399 NULL
-+simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
-+excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
-+kmalloc_60432 kmalloc 1 60432 NULL nohasharray
-+tstats_write_60432 tstats_write 3 60432 &kmalloc_60432
-+tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
-+rx_data_60442 rx_data 4 60442 NULL
-+tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
-+iwm_ntf_rx_packet_60452 iwm_ntf_rx_packet 3 60452 NULL
-+crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
-+ath_tx_init_60515 ath_tx_init 2 60515 NULL
-+ubi_wl_get_peb_60525 ubi_wl_get_peb 0 60525 NULL
-+hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
-+v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
-+tracing_entries_write_60563 tracing_entries_write 3 60563 NULL
-+skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
-+wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
-+acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
-+free_dind_blocks_60635 free_dind_blocks 0 60635 NULL
-+if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
-+ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
-+init_data_container_60709 init_data_container 1 60709 NULL
-+vga_rcrt_60731 vga_rcrt 0 60731 NULL
-+snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
-+sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
-+opticon_write_60775 opticon_write 4 60775 NULL
-+acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
-+snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
-+pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
-+pool_status_60861 pool_status 4 60861 NULL
-+ieee80211_send_auth_60865 ieee80211_send_auth 5 60865 NULL
-+generic_writepages_60871 generic_writepages 0 60871 NULL
-+mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
-+set_powered_60938 set_powered 4 60938 NULL
-+pti_char_write_60960 pti_char_write 3 60960 NULL
-+mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
-+blkio_get_key_name_61014 blkio_get_key_name 4 61014 NULL
-+ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
-+lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL
-+symtab_init_61050 symtab_init 2 61050 NULL
-+fuse_send_write_61053 fuse_send_write 0 61053 NULL
-+bitmap_scnlistprintf_61062 bitmap_scnlistprintf 2-0 61062 NULL
-+ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
-+get_derived_key_61100 get_derived_key 4 61100 NULL
-+alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
-+__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL nohasharray
-+p80211_headerlen_61119 p80211_headerlen 0 61119 &__probe_kernel_read_61119
-+proto_ports_offset_61125 proto_ports_offset 0 61125 NULL
-+vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
-+afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
-+event_oom_late_read_61175 event_oom_late_read 3 61175 NULL
-+sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
-+cfpkt_append_61206 cfpkt_append 3 61206 NULL
-+arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
-+pn544_write_61215 pn544_write 3 61215 NULL
-+smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
-+__verify_planes_array_61249 __verify_planes_array 0 61249 NULL
-+find_get_pages_tag_61270 find_get_pages_tag 0 61270 NULL
-+kick_a_thread_61273 kick_a_thread 0 61273 NULL
-+vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL
-+sys_add_key_61288 sys_add_key 4 61288 NULL
-+xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
-+st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
-+set_params_61373 set_params 0 61373 NULL
-+change_xattr_61390 change_xattr 5 61390 NULL
-+system_enable_write_61396 system_enable_write 3 61396 NULL
-+pm860x_bulk_read_61415 pm860x_bulk_read 3 61415 NULL
-+i915_emit_box_61436 i915_emit_box 0 61436 NULL
-+unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
-+snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
-+btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
-+clone_bio_61526 clone_bio 5 61526 NULL nohasharray
-+erst_errno_61526 erst_errno 0 61526 &clone_bio_61526
-+trace_options_core_write_61551 trace_options_core_write 3 61551 NULL
-+dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
-+rbd_do_request_61561 rbd_do_request 6-7 61561 NULL
-+parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL
-+fan_proc_write_61569 fan_proc_write 3 61569 NULL
-+ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
-+seq_open_private_61589 seq_open_private 3 61589 NULL
-+netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
-+cx2341x_handler_init_61601 cx2341x_handler_init 2 61601 NULL
-+configfs_write_file_61621 configfs_write_file 3 61621 NULL
-+ieee80211_rx_bss_info_61630 ieee80211_rx_bss_info 3 61630 NULL
-+i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
-+snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
-+resize_stripes_61650 resize_stripes 2 61650 NULL
-+ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL
-+insert_one_name_61668 insert_one_name 7 61668 NULL
-+qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
-+lock_loop_61681 lock_loop 1 61681 NULL
-+filter_read_61692 filter_read 3 61692 NULL
-+iov_length_61716 iov_length 0 61716 NULL
-+fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
-+read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
-+read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
-+mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL
-+btrfs_file_llseek_61838 btrfs_file_llseek 2 61838 NULL
-+bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
-+evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
-+get_fw_name_61874 get_fw_name 3 61874 NULL
-+ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
-+ax25_addr_size_61899 ax25_addr_size 0 61899 NULL nohasharray
-+cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 &ax25_addr_size_61899
-+clear_refs_write_61904 clear_refs_write 3 61904 NULL
-+au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
-+sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
-+send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
-+squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
-+ocfs2_quota_write_61972 ocfs2_quota_write 5-4 61972 NULL
-+fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
-+cow_file_range_61979 cow_file_range 3 61979 NULL
-+module_alloc_exec_61991 module_alloc_exec 1 61991 NULL
-+virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
-+dequeue_event_62000 dequeue_event 3 62000 NULL
-+xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
-+jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
-+pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
-+proc_fdinfo_read_62043 proc_fdinfo_read 3 62043 NULL
-+ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
-+sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
-+do_pselect_62061 do_pselect 1 62061 NULL
-+pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
-+jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
-+ip_recv_error_62117 ip_recv_error 3 62117 NULL
-+generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
-+llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
-+kobject_add_varg_62133 kobject_add_varg 0 62133 NULL nohasharray
-+qib_diag_write_62133 qib_diag_write 3 62133 &kobject_add_varg_62133
-+ql_status_62135 ql_status 5 62135 NULL nohasharray
-+device_add_attrs_62135 device_add_attrs 0 62135 &ql_status_62135
-+video_usercopy_62151 video_usercopy 2 62151 NULL
-+wrmWithLock_62164 wrmWithLock 0 62164 NULL
-+prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
-+alloc_upcall_62186 alloc_upcall 2 62186 NULL
-+btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
-+sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
-+check_unicast_packet_62217 check_unicast_packet 2 62217 NULL
-+hash_new_62224 hash_new 1 62224 NULL
-+nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
-+send_control_msg_62261 send_control_msg 5 62261 NULL
-+subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
-+udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
-+hfcpci_empty_bfifo_62323 hfcpci_empty_bfifo 4 62323 NULL
-+Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
-+flash_write_62354 flash_write 3 62354 NULL
-+xfpregs_set_62363 xfpregs_set 4 62363 NULL
-+altera_irscan_62396 altera_irscan 2 62396 NULL
-+udplite_manip_pkt_62433 udplite_manip_pkt 2 62433 NULL
-+netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
-+e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
-+pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
-+store_pwm1_62529 store_pwm1 4 62529 NULL
-+test_iso_queue_62534 test_iso_queue 5 62534 NULL
-+debugfs_read_62535 debugfs_read 3 62535 NULL
-+sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
-+qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
-+xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
-+packet_alloc_skb_62602 packet_alloc_skb 2-5-4 62602 NULL
-+nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL nohasharray
-+prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 &nfsd_vfs_read_62605
-+iwl_dbgfs_force_reset_read_62628 iwl_dbgfs_force_reset_read 3 62628 NULL
-+lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
-+tt_changes_fill_buffer_62649 tt_changes_fill_buffer 3 62649 NULL
-+write_62671 write 3 62671 NULL
-+printer_req_alloc_62687 printer_req_alloc 2 62687 NULL nohasharray
-+iwl_dbgfs_rx_statistics_read_62687 iwl_dbgfs_rx_statistics_read 3 62687 &printer_req_alloc_62687
-+ext4_ind_map_blocks_62690 ext4_ind_map_blocks 0 62690 NULL
-+adxl34x_i2c_read_block_62691 adxl34x_i2c_read_block 3 62691 NULL
-+bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
-+rdm_62719 rdm 0 62719 NULL
-+key_replays_read_62746 key_replays_read 3 62746 NULL
-+mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL
-+ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
-+scrub_chunk_62771 scrub_chunk 4 62771 NULL
-+tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
-+BeceemEEPROMBulkRead_62835 BeceemEEPROMBulkRead 0 62835 NULL
-+__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
-+xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
-+rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
-+genlmsg_msg_size_62845 genlmsg_msg_size 0-1 62845 NULL
-+read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL
-+hpi_read_word_62862 hpi_read_word 0 62862 NULL
-+nfs_writedata_alloc_62868 nfs_writedata_alloc 1 62868 NULL
-+aoechr_write_62883 aoechr_write 3 62883 NULL
-+resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
-+if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
-+ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL
-+mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
-+getdqbuf_62908 getdqbuf 1 62908 NULL
-+agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
-+get_skb_63008 get_skb 2 63008 NULL
-+kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL
-+__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
-+scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
-+unlink1_63059 unlink1 3 63059 NULL
-+ocfs2_decrease_refcount_63078 ocfs2_decrease_refcount 4-3 63078 NULL
-+brcmf_alloc_pkt_and_read_63116 brcmf_alloc_pkt_and_read 2 63116 NULL nohasharray
-+iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 &brcmf_alloc_pkt_and_read_63116
-+ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
-+snd_pcm_lib_malloc_pages_63182 snd_pcm_lib_malloc_pages 2 63182 NULL
-+vme_master_read_63221 vme_master_read 0 63221 NULL
-+module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
-+ptp_read_63251 ptp_read 4 63251 NULL
-+readword_63288 readword 0 63288 NULL
-+tcp_collapse_63294 tcp_collapse 6-5 63294 NULL
-+isdn_ppp_ccp_xmit_reset_63297 isdn_ppp_ccp_xmit_reset 6 63297 NULL
-+dns_resolver_instantiate_63314 dns_resolver_instantiate 3 63314 NULL
-+proc_info_read_63344 proc_info_read 3 63344 NULL
-+ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
-+idmouse_read_63374 idmouse_read 3 63374 NULL
-+edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL
-+rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
-+noack_read_63419 noack_read 3 63419 NULL
-+l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
-+iwl_dbgfs_debug_level_read_63430 iwl_dbgfs_debug_level_read 3 63430 NULL
-+brcmu_pkttotlen_63431 brcmu_pkttotlen 0 63431 NULL
-+kone_send_63435 kone_send 4 63435 NULL
-+nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
-+snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
-+do_work_63483 do_work 0 63483 NULL
-+get_gpio_63488 get_gpio 0 63488 NULL nohasharray
-+read_kcore_63488 read_kcore 3 63488 &get_gpio_63488
-+snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
-+ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
-+snapshot_status_63538 snapshot_status 4 63538 NULL
-+if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
-+append_to_buffer_63550 append_to_buffer 3 63550 NULL
-+kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 NULL
-+ocfs2_calc_trunc_pos_63576 ocfs2_calc_trunc_pos 4 63576 NULL
-+mlx4_ib_alloc_cq_buf_63610 mlx4_ib_alloc_cq_buf 3 63610 NULL
-+module_alloc_63630 module_alloc 1 63630 NULL
-+symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
-+ext4_ext_get_access_63642 ext4_ext_get_access 0 63642 NULL
-+proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
-+nand_ecc_test_63654 nand_ecc_test 1 63654 NULL nohasharray
-+ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 &nand_ecc_test_63654
-+hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
-+iwl4965_rs_sta_dbgfs_scale_table_read_63672 iwl4965_rs_sta_dbgfs_scale_table_read 3 63672 NULL
-+vbi_read_63673 vbi_read 3 63673 NULL
-+bin_search_63697 bin_search 0 63697 NULL
-+btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
-+nfs4_reset_slot_table_63721 nfs4_reset_slot_table 2 63721 NULL
-+i915_gem_execbuffer_relocate_63728 i915_gem_execbuffer_relocate 0 63728 NULL
-+selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
-+i915_gem_execbuffer_flush_63749 i915_gem_execbuffer_flush 0 63749 NULL
-+snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
-+snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
-+spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
-+mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
-+copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
-+sel_write_load_63830 sel_write_load 3 63830 NULL
-+kvm_init_63834 kvm_init 3 63834 NULL
-+IsSectionWritable_63842 IsSectionWritable 0 63842 NULL
-+proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
-+ieee80211_if_fmt_channel_type_63855 ieee80211_if_fmt_channel_type 3 63855 NULL
-+init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
-+xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL nohasharray
-+IsOffsetWritable_63902 IsOffsetWritable 0 63902 &xhci_alloc_stream_info_63902
-+pohmelfs_readpages_trans_complete_63912 pohmelfs_readpages_trans_complete 2 63912 NULL
-+uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL
-+ledd_proc_write_63928 ledd_proc_write 3 63928 NULL
-+tipc_send2port_63935 tipc_send2port 5 63935 NULL
-+afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
-+macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
-+ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
-+iwl_legacy_dbgfs_tx_statistics_read_63987 iwl_legacy_dbgfs_tx_statistics_read 3 63987 NULL
-+read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
-+kmemdup_64015 kmemdup 2 64015 NULL
-+tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 NULL
-+dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
-+frequency_read_64031 frequency_read 3 64031 NULL
-+get_u8_64076 get_u8 0 64076 NULL
-+sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
-+lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
-+do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
-+ol_quota_entries_per_block_64122 ol_quota_entries_per_block 0 64122 NULL
-+i915_gem_execbuffer_reserve_64127 i915_gem_execbuffer_reserve 0 64127 NULL
-+init_bch_64130 init_bch 1-2 64130 NULL
-+uea_idma_write_64139 uea_idma_write 3 64139 NULL
-+ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
-+dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
-+WriteReg_64163 WriteReg 0 64163 NULL
-+cpumask_scnprintf_64170 cpumask_scnprintf 2 64170 NULL
-+alloc_session_64171 alloc_session 2-1 64171 NULL
-+ea_len_64229 ea_len 0 64229 NULL
-+header_len_64232 header_len 0 64232 NULL
-+xfrm_acquire_msgsize_64239 xfrm_acquire_msgsize 0 64239 NULL
-+redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
-+fuse_do_getattr_64245 fuse_do_getattr 0 64245 NULL
-+io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
-+btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
-+xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 NULL nohasharray
-+event_id_read_64288 event_id_read 3 64288 &xfs_dir_cilookup_result_64288
-+ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
-+snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL
-+sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
-+ts_write_64336 ts_write 3 64336 NULL
-+usbtmc_write_64340 usbtmc_write 3 64340 NULL
-+user_regset_copyin_64360 user_regset_copyin 7 64360 NULL
-+llc_alloc_frame_64366 llc_alloc_frame 4 64366 NULL
-+ilo_write_64378 ilo_write 3 64378 NULL
-+ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
-+pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
-+rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
-+snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
-+keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
-+ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
-+pfkey_sockaddr_len_64453 pfkey_sockaddr_len 0 64453 NULL
-+ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
-+alloc_large_system_hash_64490 alloc_large_system_hash 2 64490 NULL
-+p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
-+emulator_cmpxchg_emulated_64501 emulator_cmpxchg_emulated 5 64501 NULL
-+msg_data_sz_64503 msg_data_sz 0 64503 NULL
-+crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
-+opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
-+iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL
-+ses_send_diag_64527 ses_send_diag 4 64527 NULL
-+lm8323_read_64547 lm8323_read 4 64547 NULL
-+__spi_sync_64561 __spi_sync 0 64561 NULL
-+__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
-+diva_os_alloc_message_buffer_64568 diva_os_alloc_message_buffer 1 64568 NULL
-+kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL
-+use_pool_64607 use_pool 2 64607 NULL
-+fanotify_write_64623 fanotify_write 3 64623 NULL
-+ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL
-+nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
-+ip_select_ident_more_64707 ip_select_ident_more 4 64707 NULL
-+__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
-+snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
-+dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
-+atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
-+i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
-+AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL
-+squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
-+bio_map_kern_64751 bio_map_kern 3 64751 NULL
-+rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
-+isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
-+nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
-+rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
-+megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
-+ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
-+do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
-+em28xx_read_reg_64839 em28xx_read_reg 0 64839 NULL
-+altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
-+ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
-+ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
-+usb_reset_and_verify_device_64933 usb_reset_and_verify_device 0 64933 NULL
-+ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
-+pskb_pull_65005 pskb_pull 2 65005 NULL
-+crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
-+insert_dent_65034 insert_dent 7 65034 NULL
-+brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
-+ath9k_multi_regread_65056 ath9k_multi_regread 4 65056 NULL
-+pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL
-+bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 NULL
-+__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
-+ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
-+nf_bridge_mtu_reduction_65192 nf_bridge_mtu_reduction 0 65192 NULL
-+nfulnl_alloc_skb_65207 nfulnl_alloc_skb 2-1 65207 NULL
-+whci_n_caps_65247 whci_n_caps 0 65247 NULL
-+kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
-+compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
-+redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
-+get_var_len_65304 get_var_len 0 65304 NULL
-+unpack_array_65318 unpack_array 0 65318 NULL
-+rts51x_get_rsp_65334 rts51x_get_rsp 2 65334 NULL
-+dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
-+dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
-+alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
-+__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
-+trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
-+usb_ep_enable_65405 usb_ep_enable 0 65405 NULL
-+iio_device_add_channel_sysfs_65406 iio_device_add_channel_sysfs 0 65406 NULL
-+ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 4-3 65410 NULL
-+drm_calloc_large_65421 drm_calloc_large 1-2 65421 NULL
-+device_add_groups_65423 device_add_groups 0 65423 NULL
-+xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
-+usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
-+clear_user_65470 clear_user 2 65470 NULL
-+ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
-+alloc_dr_65495 alloc_dr 2 65495 NULL
-+selnl_msglen_65499 selnl_msglen 0 65499 NULL
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
-new file mode 100644
-index 0000000..4ad4525
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
-@@ -0,0 +1,91 @@
-+spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL
-+zfs_lookup_2144 zfs_lookup 0 2144 NULL
-+mappedread_2627 mappedread 2 2627 NULL
-+vdev_disk_dio_alloc_2957 vdev_disk_dio_alloc 1 2957 NULL
-+nv_alloc_pushpage_spl_4286 nv_alloc_pushpage_spl 2 4286 NULL
-+zpl_xattr_get_4574 zpl_xattr_get 0 4574 NULL
-+sa_replace_all_by_template_5699 sa_replace_all_by_template 3 5699 NULL
-+dmu_write_6048 dmu_write 4-3 6048 NULL
-+dmu_buf_hold_array_6095 dmu_buf_hold_array 4-3 6095 NULL
-+update_pages_6225 update_pages 2-3 6225 NULL
-+bio_nr_pages_7117 bio_nr_pages 0-2 7117 NULL
-+dmu_buf_hold_array_by_bonus_8562 dmu_buf_hold_array_by_bonus 3-2 8562 NULL
-+zpios_dmu_write_8858 zpios_dmu_write 4-5 8858 NULL
-+ddi_copyout_9401 ddi_copyout 3 9401 NULL
-+avl_numnodes_12384 avl_numnodes 0 12384 NULL
-+dmu_write_uio_dnode_12473 dmu_write_uio_dnode 3 12473 NULL
-+dmu_xuio_init_12866 dmu_xuio_init 2 12866 NULL
-+zpl_read_common_14389 zpl_read_common 0 14389 NULL
-+dmu_snapshot_realname_14632 dmu_snapshot_realname 4 14632 NULL
-+kmem_alloc_debug_14852 kmem_alloc_debug 1 14852 NULL
-+kmalloc_node_nofail_15151 kmalloc_node_nofail 1 15151 NULL
-+dmu_write_uio_16351 dmu_write_uio 4 16351 NULL
-+zfs_log_write_16524 zfs_log_write 6-5 16524 NULL
-+sa_build_layouts_16910 sa_build_layouts 3 16910 NULL
-+dsl_dir_namelen_17053 dsl_dir_namelen 0 17053 NULL
-+kcopy_copy_to_user_17336 kcopy_copy_to_user 5 17336 NULL
-+sa_add_layout_entry_17507 sa_add_layout_entry 3 17507 NULL
-+sa_attr_table_setup_18029 sa_attr_table_setup 3 18029 NULL
-+uiocopy_18680 uiocopy 2 18680 NULL
-+dmu_buf_hold_array_by_dnode_19125 dmu_buf_hold_array_by_dnode 2-3 19125 NULL
-+zpl_acl_from_xattr_21141 zpl_acl_from_xattr 2 21141 NULL
-+dsl_pool_tx_assign_init_22518 dsl_pool_tx_assign_init 2 22518 NULL
-+nvlist_lookup_byte_array_22527 nvlist_lookup_byte_array 0 22527 NULL
-+sa_replace_all_by_template_locked_22533 sa_replace_all_by_template_locked 3 22533 NULL
-+tsd_hash_table_init_22559 tsd_hash_table_init 1 22559 NULL
-+spa_vdev_remove_aux_23966 spa_vdev_remove_aux 4 23966 NULL
-+zpl_xattr_acl_set_access_24129 zpl_xattr_acl_set_access 4 24129 NULL
-+dmu_assign_arcbuf_24622 dmu_assign_arcbuf 2 24622 NULL
-+zap_lookup_norm_25166 zap_lookup_norm 9 25166 NULL
-+dmu_prealloc_25456 dmu_prealloc 4-3 25456 NULL
-+kmalloc_nofail_26347 kmalloc_nofail 1 26347 NULL
-+zfsctl_snapshot_zpath_27578 zfsctl_snapshot_zpath 2 27578 NULL
-+zpios_dmu_read_30015 zpios_dmu_read 4-5 30015 NULL
-+splat_write_30943 splat_write 3 30943 NULL
-+zpl_xattr_get_sa_31183 zpl_xattr_get_sa 0 31183 NULL
-+dmu_read_uio_31467 dmu_read_uio 4 31467 NULL
-+zfs_replay_fuids_31479 zfs_replay_fuids 4 31479 NULL
-+spa_history_log_to_phys_31632 spa_history_log_to_phys 0-1 31632 NULL
-+__zpl_xattr_get_32601 __zpl_xattr_get 0 32601 NULL
-+proc_copyout_string_34049 proc_copyout_string 2 34049 NULL
-+nv_alloc_sleep_spl_34544 nv_alloc_sleep_spl 2 34544 NULL
-+nv_alloc_nosleep_spl_34761 nv_alloc_nosleep_spl 2 34761 NULL
-+zap_leaf_array_match_36922 zap_leaf_array_match 4 36922 NULL
-+copyinstr_36980 copyinstr 3 36980 NULL
-+zpl_xattr_acl_set_default_37864 zpl_xattr_acl_set_default 4 37864 NULL
-+splat_read_38116 splat_read 3 38116 NULL
-+sa_setup_38756 sa_setup 4 38756 NULL
-+vdev_disk_physio_39898 vdev_disk_physio 3 39898 NULL
-+arc_buf_size_39982 arc_buf_size 0 39982 NULL
-+kzalloc_nofail_40719 kzalloc_nofail 1 40719 NULL
-+fuidstr_to_sid_40777 fuidstr_to_sid 4 40777 NULL
-+vdev_raidz_matrix_reconstruct_40852 vdev_raidz_matrix_reconstruct 2-3 40852 NULL
-+sa_find_layout_40892 sa_find_layout 4 40892 NULL
-+zpl_xattr_get_dir_41918 zpl_xattr_get_dir 0 41918 NULL
-+zfs_sa_get_xattr_42600 zfs_sa_get_xattr 0 42600 NULL
-+zpl_xattr_acl_set_42808 zpl_xattr_acl_set 4 42808 NULL
-+xdr_dec_array_43091 xdr_dec_array 5 43091 NULL
-+dsl_dataset_namelen_43136 dsl_dataset_namelen 0 43136 NULL
-+kcopy_write_43683 kcopy_write 3 43683 NULL
-+uiomove_44355 uiomove 2 44355 NULL
-+dmu_read_44418 dmu_read 4-3 44418 NULL
-+ddi_copyin_44846 ddi_copyin 3 44846 NULL
-+kcopy_do_get_45061 kcopy_do_get 5 45061 NULL
-+copyin_45945 copyin 3 45945 NULL
-+zil_itx_create_46555 zil_itx_create 2 46555 NULL
-+dmu_write_uio_dbuf_48064 dmu_write_uio_dbuf 3 48064 NULL
-+spa_history_write_49650 spa_history_write 3 49650 NULL
-+kcopy_copy_pages_to_user_49823 kcopy_copy_pages_to_user 3-4 49823 NULL
-+zfs_log_write_50162 zfs_log_write 6-5 50162 NULL
-+i_fm_alloc_51038 i_fm_alloc 2 51038 NULL
-+copyout_51409 copyout 3 51409 NULL
-+zvol_log_write_54898 zvol_log_write 4-3 54898 NULL
-+zfs_acl_node_alloc_55641 zfs_acl_node_alloc 1 55641 NULL
-+get_nvlist_56685 get_nvlist 2 56685 NULL
-+zprop_get_numprops_56820 zprop_get_numprops 0 56820 NULL
-+splat_taskq_test4_common_59829 splat_taskq_test4_common 5 59829 NULL
-+zfs_replay_domain_cnt_61399 zfs_replay_domain_cnt 0 61399 NULL
-+zpios_write_61823 zpios_write 3 61823 NULL
-+proc_copyin_string_62019 proc_copyin_string 4 62019 NULL
-+random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL
-+zpios_read_64734 zpios_read 3 64734 NULL
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
-new file mode 100644
-index 0000000..7e07890
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
-@@ -0,0 +1,260 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+tree report_size_overflow_decl;
-+
-+tree size_overflow_type_HI;
-+tree size_overflow_type_SI;
-+tree size_overflow_type_DI;
-+tree size_overflow_type_TI;
-+
-+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140725_01",
-+ .help = "no-size-overflow\tturn off size overflow checking\n",
-+};
-+
-+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
-+{
-+ unsigned int arg_count;
-+ enum tree_code code = TREE_CODE(*node);
-+
-+ switch (code) {
-+ case FUNCTION_DECL:
-+ arg_count = type_num_arguments(TREE_TYPE(*node));
-+ break;
-+ case FUNCTION_TYPE:
-+ case METHOD_TYPE:
-+ arg_count = type_num_arguments(*node);
-+ break;
-+ default:
-+ *no_add_attrs = true;
-+ error("%s: %qE attribute only applies to functions", __func__, name);
-+ return NULL_TREE;
-+ }
-+
-+ for (; args; args = TREE_CHAIN(args)) {
-+ tree position = TREE_VALUE(args);
-+ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
-+ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
-+ *no_add_attrs = true;
-+ }
-+ }
-+ return NULL_TREE;
-+}
-+
-+static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
-+{
-+ unsigned int arg_count;
-+ enum tree_code code = TREE_CODE(*node);
-+
-+ switch (code) {
-+ case FUNCTION_DECL:
-+ arg_count = type_num_arguments(TREE_TYPE(*node));
-+ break;
-+ case FUNCTION_TYPE:
-+ case METHOD_TYPE:
-+ arg_count = type_num_arguments(*node);
-+ break;
-+ case FIELD_DECL:
-+ return NULL_TREE;
-+ default:
-+ *no_add_attrs = true;
-+ error("%qE attribute only applies to functions", name);
-+ return NULL_TREE;
-+ }
-+
-+ if (tree_to_shwi(TREE_VALUE(args)) != 0)
-+ return NULL_TREE;
-+
-+ for (; args; args = TREE_CHAIN(args)) {
-+ tree position = TREE_VALUE(args);
-+ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
-+ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
-+ *no_add_attrs = true;
-+ }
-+ }
-+ return NULL_TREE;
-+}
-+
-+static struct attribute_spec size_overflow_attr = {
-+ .name = "size_overflow",
-+ .min_length = 1,
-+ .max_length = -1,
-+ .decl_required = true,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_size_overflow_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = false
-+#endif
-+};
-+
-+static struct attribute_spec intentional_overflow_attr = {
-+ .name = "intentional_overflow",
-+ .min_length = 1,
-+ .max_length = -1,
-+ .decl_required = true,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_intentional_overflow_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = false
-+#endif
-+};
-+
-+static void register_attributes(void __unused *event_data, void __unused *data)
-+{
-+ register_attribute(&size_overflow_attr);
-+ register_attribute(&intentional_overflow_attr);
-+}
-+
-+static tree create_typedef(tree type, const char* ident)
-+{
-+ tree new_type, decl;
-+
-+ new_type = build_variant_type_copy(type);
-+ decl = build_decl(BUILTINS_LOCATION, TYPE_DECL, get_identifier(ident), new_type);
-+ DECL_ORIGINAL_TYPE(decl) = type;
-+ TYPE_NAME(new_type) = decl;
-+ return new_type;
-+}
-+
-+// Create the noreturn report_size_overflow() function decl.
-+static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
-+{
-+ tree const_char_ptr_type_node;
-+ tree fntype;
-+
-+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
-+
-+ size_overflow_type_HI = create_typedef(intHI_type_node, "size_overflow_type_HI");
-+ size_overflow_type_SI = create_typedef(intSI_type_node, "size_overflow_type_SI");
-+ size_overflow_type_DI = create_typedef(intDI_type_node, "size_overflow_type_DI");
-+ size_overflow_type_TI = create_typedef(intTI_type_node, "size_overflow_type_TI");
-+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
-+ fntype = build_function_type_list(void_type_node,
-+ const_char_ptr_type_node,
-+ unsigned_type_node,
-+ const_char_ptr_type_node,
-+ const_char_ptr_type_node,
-+ NULL_TREE);
-+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
-+
-+ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
-+ TREE_PUBLIC(report_size_overflow_decl) = 1;
-+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
-+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
-+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
-+}
-+
-+
-+extern struct gimple_opt_pass pass_dce;
-+
-+static struct opt_pass *make_dce_pass(void)
-+{
-+#if BUILDING_GCC_VERSION >= 4009
-+ return make_pass_dce(g);
-+#else
-+ return &pass_dce.pass;
-+#endif
-+}
-+
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ int i;
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ bool enable = true;
-+ struct register_pass_info insert_size_overflow_asm_pass_info;
-+ struct register_pass_info __unused dump_before_pass_info;
-+ struct register_pass_info __unused dump_after_pass_info;
-+ struct register_pass_info insert_size_overflow_check_info;
-+ struct register_pass_info dce_pass_info;
-+ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
-+ {
-+ .base = &report_size_overflow_decl,
-+ .nelt = 1,
-+ .stride = sizeof(report_size_overflow_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ LAST_GGC_ROOT_TAB
-+ };
-+
-+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
-+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
-+ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
-+ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ dump_before_pass_info.pass = make_dump_pass();
-+ dump_before_pass_info.reference_pass_name = "increase_alignment";
-+ dump_before_pass_info.ref_pass_instance_number = 1;
-+ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ insert_size_overflow_check_info.pass = make_insert_size_overflow_check();
-+ insert_size_overflow_check_info.reference_pass_name = "increase_alignment";
-+ insert_size_overflow_check_info.ref_pass_instance_number = 1;
-+ insert_size_overflow_check_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ dump_after_pass_info.pass = make_dump_pass();
-+ dump_after_pass_info.reference_pass_name = "increase_alignment";
-+ dump_after_pass_info.ref_pass_instance_number = 1;
-+ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ dce_pass_info.pass = make_dce_pass();
-+ dce_pass_info.reference_pass_name = "vrp";
-+ dce_pass_info.ref_pass_instance_number = 1;
-+ dce_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "no-size-overflow")) {
-+ enable = false;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
-+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_check_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
-new file mode 100644
-index 0000000..2a693fe
---- /dev/null
-+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
-@@ -0,0 +1,355 @@
-+/*
-+ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
-+ * Licensed under the GPL v2, or (at your option) v3
-+ *
-+ * Homepage:
-+ * https://github.com/ephox-gcc-plugins
-+ * http://www.grsecurity.net/~ephox/overflow_plugin/
-+ *
-+ * Documentation:
-+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
-+ *
-+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
-+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
-+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
-+ *
-+ * Usage:
-+ * $ make
-+ * $ make run
-+ */
-+
-+#include "gcc-common.h"
-+#include "size_overflow.h"
-+
-+#include "size_overflow_hash.h"
-+#include "size_overflow_hash_aux.h"
-+
-+#define CODES_LIMIT 32
-+
-+static unsigned char get_tree_code(const_tree type)
-+{
-+ switch (TREE_CODE(type)) {
-+ case ARRAY_TYPE:
-+ return 0;
-+ case BOOLEAN_TYPE:
-+ return 1;
-+ case ENUMERAL_TYPE:
-+ return 2;
-+ case FUNCTION_TYPE:
-+ return 3;
-+ case INTEGER_TYPE:
-+ return 4;
-+ case POINTER_TYPE:
-+ return 5;
-+ case RECORD_TYPE:
-+ return 6;
-+ case UNION_TYPE:
-+ return 7;
-+ case VOID_TYPE:
-+ return 8;
-+ case REAL_TYPE:
-+ return 9;
-+ case VECTOR_TYPE:
-+ return 10;
-+ case REFERENCE_TYPE:
-+ return 11;
-+ case OFFSET_TYPE:
-+ return 12;
-+ case COMPLEX_TYPE:
-+ return 13;
-+ default:
-+ debug_tree((tree)type);
-+ gcc_unreachable();
-+ }
-+}
-+
-+struct function_hash {
-+ size_t tree_codes_len;
-+ unsigned char tree_codes[CODES_LIMIT];
-+ const_tree fndecl;
-+ unsigned int hash;
-+};
-+
-+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
-+static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
-+{
-+#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
-+#define cwmixa( in ) { cwfold( in, m, k, h ); }
-+#define cwmixb( in ) { cwfold( in, n, h, k ); }
-+
-+ unsigned int m = 0x57559429;
-+ unsigned int n = 0x5052acdb;
-+ const unsigned int *key4 = (const unsigned int *)key;
-+ unsigned int h = len;
-+ unsigned int k = len + seed + n;
-+ unsigned long long p;
-+
-+ while (len >= 8) {
-+ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
-+ len -= 8;
-+ }
-+ if (len >= 4) {
-+ cwmixb(key4[0]) key4 += 1;
-+ len -= 4;
-+ }
-+ if (len)
-+ cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
-+ cwmixb(h ^ (k + n));
-+ return k ^ h;
-+
-+#undef cwfold
-+#undef cwmixa
-+#undef cwmixb
-+}
-+
-+static void set_hash(const char *fn_name, struct function_hash *fn_hash_data)
-+{
-+ unsigned int fn, codes, seed = 0;
-+
-+ fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff;
-+ codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len, seed) & 0xffff;
-+
-+ fn_hash_data->hash = fn ^ codes;
-+}
-+
-+static void set_node_codes(const_tree type, struct function_hash *fn_hash_data)
-+{
-+ gcc_assert(type != NULL_TREE);
-+ gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type);
-+
-+ while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) {
-+ fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type);
-+ fn_hash_data->tree_codes_len++;
-+ type = TREE_TYPE(type);
-+ }
-+}
-+
-+static void set_result_codes(const_tree node, struct function_hash *fn_hash_data)
-+{
-+ const_tree result;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ if (DECL_P(node)) {
-+ result = DECL_RESULT(node);
-+ if (result != NULL_TREE)
-+ return set_node_codes(TREE_TYPE(result), fn_hash_data);
-+ return set_result_codes(TREE_TYPE(node), fn_hash_data);
-+ }
-+
-+ gcc_assert(TYPE_P(node));
-+
-+ if (TREE_CODE(node) == FUNCTION_TYPE)
-+ return set_result_codes(TREE_TYPE(node), fn_hash_data);
-+
-+ return set_node_codes(node, fn_hash_data);
-+}
-+
-+static void set_function_codes(struct function_hash *fn_hash_data)
-+{
-+ const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl);
-+ enum tree_code code = TREE_CODE(type);
-+
-+ gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
-+
-+ set_result_codes(fn_hash_data->fndecl, fn_hash_data);
-+
-+ for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len < CODES_LIMIT; arg = TREE_CHAIN(arg))
-+ set_node_codes(TREE_VALUE(arg), fn_hash_data);
-+}
-+
-+static const struct size_overflow_hash *get_proper_hash_chain(const struct size_overflow_hash *entry, const char *func_name)
-+{
-+ while (entry) {
-+ if (!strcmp(entry->name, func_name))
-+ return entry;
-+ entry = entry->next;
-+ }
-+ return NULL;
-+}
-+
-+const struct size_overflow_hash *get_function_hash(const_tree fndecl)
-+{
-+ const struct size_overflow_hash *entry;
-+ struct function_hash fn_hash_data;
-+ const char *func_name;
-+
-+ // skip builtins __builtin_constant_p
-+ if (DECL_BUILT_IN(fndecl))
-+ return NULL;
-+
-+ fn_hash_data.fndecl = fndecl;
-+ fn_hash_data.tree_codes_len = 0;
-+
-+ set_function_codes(&fn_hash_data);
-+ gcc_assert(fn_hash_data.tree_codes_len != 0);
-+
-+ func_name = DECL_NAME_POINTER(fn_hash_data.fndecl);
-+ set_hash(func_name, &fn_hash_data);
-+
-+ entry = size_overflow_hash[fn_hash_data.hash];
-+ entry = get_proper_hash_chain(entry, func_name);
-+ if (entry)
-+ return entry;
-+ entry = size_overflow_hash_aux[fn_hash_data.hash];
-+ return get_proper_hash_chain(entry, func_name);
-+}
-+
-+static void print_missing_msg(const_tree func, unsigned int argnum)
-+{
-+ location_t loc;
-+ const char *curfunc;
-+ struct function_hash fn_hash_data;
-+
-+ fn_hash_data.fndecl = DECL_ORIGIN(func);
-+ fn_hash_data.tree_codes_len = 0;
-+
-+ loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl);
-+ curfunc = DECL_NAME_POINTER(fn_hash_data.fndecl);
-+
-+ set_function_codes(&fn_hash_data);
-+ set_hash(curfunc, &fn_hash_data);
-+
-+ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
-+}
-+
-+unsigned int find_arg_number_tree(const_tree arg, const_tree func)
-+{
-+ tree var;
-+ unsigned int argnum = 1;
-+
-+ if (TREE_CODE(arg) == SSA_NAME)
-+ arg = SSA_NAME_VAR(arg);
-+
-+ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
-+ if (!operand_equal_p(arg, var, 0) && strcmp(DECL_NAME_POINTER(var), DECL_NAME_POINTER(arg)))
-+ continue;
-+ if (!skip_types(var))
-+ return argnum;
-+ }
-+
-+ return CANNOT_FIND_ARG;
-+}
-+
-+bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt)
-+{
-+ const char *str;
-+
-+ if (!stmt)
-+ return false;
-+ str = gimple_asm_string(stmt);
-+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
-+}
-+
-+bool is_size_overflow_intentional_asm_yes(const gasm *stmt)
-+{
-+ const char *str;
-+
-+ if (!stmt)
-+ return false;
-+ str = gimple_asm_string(stmt);
-+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
-+}
-+
-+bool is_size_overflow_asm(const gasm *stmt)
-+{
-+ const char *str;
-+
-+ if (!stmt)
-+ return false;
-+ str = gimple_asm_string(stmt);
-+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
-+}
-+
-+bool is_a_return_check(const_tree node)
-+{
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ return true;
-+
-+ gcc_assert(TREE_CODE(node) == PARM_DECL);
-+ return false;
-+}
-+
-+// Get the argnum of a function decl, if node is a return then the argnum is 0
-+unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
-+{
-+ if (is_a_return_check(node))
-+ return 0;
-+ else
-+ return find_arg_number_tree(node, orig_fndecl);
-+}
-+
-+unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
-+{
-+ const struct size_overflow_hash *hash;
-+ unsigned int new_argnum;
-+ tree arg;
-+ const_tree origarg;
-+
-+ if (argnum == 0)
-+ return argnum;
-+
-+ hash = get_function_hash(fndecl);
-+ if (hash && hash->param & (1U << argnum))
-+ return argnum;
-+
-+ if (DECL_EXTERNAL(fndecl))
-+ return argnum;
-+
-+ origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl));
-+ argnum--;
-+ while (origarg && argnum) {
-+ origarg = TREE_CHAIN(origarg);
-+ argnum--;
-+ }
-+ gcc_assert(argnum == 0);
-+ gcc_assert(origarg != NULL_TREE);
-+
-+ for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++)
-+ if (operand_equal_p(origarg, arg, 0) || !strcmp(DECL_NAME_POINTER(origarg), DECL_NAME_POINTER(arg)))
-+ return new_argnum;
-+
-+ return CANNOT_FIND_ARG;
-+}
-+
-+static bool is_in_hash_table(const_tree fndecl, unsigned int num)
-+{
-+ const struct size_overflow_hash *hash;
-+
-+ hash = get_function_hash(fndecl);
-+ if (hash && (hash->param & (1U << num)))
-+ return true;
-+ return false;
-+}
-+
-+/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
-+ * If the function is missing everywhere then print the missing message into stderr.
-+ */
-+bool is_missing_function(const_tree orig_fndecl, unsigned int num)
-+{
-+ switch (DECL_FUNCTION_CODE(orig_fndecl)) {
-+#if BUILDING_GCC_VERSION >= 4008
-+ case BUILT_IN_BSWAP16:
-+#endif
-+ case BUILT_IN_BSWAP32:
-+ case BUILT_IN_BSWAP64:
-+ case BUILT_IN_EXPECT:
-+ case BUILT_IN_MEMCMP:
-+ return false;
-+ default:
-+ break;
-+ }
-+
-+ // skip test.c
-+ if (strcmp(DECL_NAME_POINTER(current_function_decl), "coolmalloc")) {
-+ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl)))
-+ warning(0, "unnecessary size_overflow attribute on: %s\n", DECL_NAME_POINTER(orig_fndecl));
-+ }
-+
-+ if (is_in_hash_table(orig_fndecl, num))
-+ return false;
-+
-+ print_missing_msg(orig_fndecl, num);
-+ return true;
-+}
-+
-diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
-new file mode 100644
-index 0000000..1d296ce
---- /dev/null
-+++ b/tools/gcc/stackleak_plugin.c
-@@ -0,0 +1,432 @@
-+/*
-+ * Copyright 2011-2015 by the PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to help implement various PaX features
-+ *
-+ * - track lowest stack pointer
-+ *
-+ * TODO:
-+ * - initialize all local variables
-+ *
-+ * BUGS:
-+ * - none known
-+ */
-+
-+#include "gcc-common.h"
-+
-+int plugin_is_GPL_compatible;
-+
-+static int track_frame_size = -1;
-+static const char track_function[] = "pax_track_stack";
-+static const char check_function[] = "pax_check_alloca";
-+static GTY(()) tree track_function_decl;
-+static GTY(()) tree check_function_decl;
-+static bool init_locals;
-+
-+static struct plugin_info stackleak_plugin_info = {
-+ .version = "201504282245",
-+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
-+// "initialize-locals\t\tforcibly initialize all stack frames\n"
-+};
-+
-+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
-+{
-+ gcall *check_alloca;
-+ tree alloca_size;
-+ cgraph_node_ptr node;
-+ int frequency;
-+ basic_block bb;
-+
-+ // insert call to void pax_check_alloca(unsigned long size)
-+ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
-+ check_alloca = gimple_build_call(check_function_decl, 1, alloca_size);
-+ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
-+
-+ // update the cgraph
-+ bb = gimple_bb(check_alloca);
-+ node = cgraph_get_create_node(check_function_decl);
-+ gcc_assert(node);
-+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
-+ cgraph_create_edge(cgraph_get_node(current_function_decl), node, check_alloca, bb->count, frequency, bb->loop_depth);
-+}
-+
-+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
-+{
-+ gcall *track_stack;
-+ cgraph_node_ptr node;
-+ int frequency;
-+ basic_block bb;
-+
-+ // insert call to void pax_track_stack(void)
-+ track_stack = gimple_build_call(track_function_decl, 0);
-+ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
-+
-+ // update the cgraph
-+ bb = gimple_bb(track_stack);
-+ node = cgraph_get_create_node(track_function_decl);
-+ gcc_assert(node);
-+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
-+ cgraph_create_edge(cgraph_get_node(current_function_decl), node, track_stack, bb->count, frequency, bb->loop_depth);
-+}
-+
-+static bool is_alloca(gimple stmt)
-+{
-+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
-+ return true;
-+
-+#if BUILDING_GCC_VERSION >= 4007
-+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
-+ return true;
-+#endif
-+
-+ return false;
-+}
-+
-+static unsigned int execute_stackleak_tree_instrument(void)
-+{
-+ basic_block bb, entry_bb;
-+ bool prologue_instrumented = false, is_leaf = true;
-+
-+ entry_bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb;
-+
-+ // 1. loop through BBs and GIMPLE statements
-+ FOR_EACH_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt;
-+
-+ stmt = gsi_stmt(gsi);
-+
-+ if (is_gimple_call(stmt))
-+ is_leaf = false;
-+
-+ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
-+ if (!is_alloca(stmt))
-+ continue;
-+
-+ // 2. insert stack overflow check before each __builtin_alloca call
-+ stackleak_check_alloca(&gsi);
-+
-+ // 3. insert track call after each __builtin_alloca call
-+ stackleak_add_instrumentation(&gsi);
-+ if (bb == entry_bb)
-+ prologue_instrumented = true;
-+ }
-+ }
-+
-+ // special cases for some bad linux code: taking the address of static inline functions will materialize them
-+ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
-+ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
-+ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
-+ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
-+ return 0;
-+ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
-+ return 0;
-+
-+ // 4. insert track call at the beginning
-+ if (!prologue_instrumented) {
-+ gimple_stmt_iterator gsi;
-+
-+ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ if (dom_info_available_p(CDI_DOMINATORS))
-+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+ gsi = gsi_start_bb(bb);
-+ stackleak_add_instrumentation(&gsi);
-+ }
-+
-+ return 0;
-+}
-+
-+static unsigned int execute_stackleak_final(void)
-+{
-+ rtx_insn *insn, *next;
-+
-+ if (cfun->calls_alloca)
-+ return 0;
-+
-+ // keep calls only if function frame is big enough
-+ if (get_frame_size() >= track_frame_size)
-+ return 0;
-+
-+ // 1. find pax_track_stack calls
-+ for (insn = get_insns(); insn; insn = next) {
-+ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
-+ rtx body;
-+
-+ next = NEXT_INSN(insn);
-+ if (!CALL_P(insn))
-+ continue;
-+ body = PATTERN(insn);
-+ if (GET_CODE(body) != CALL)
-+ continue;
-+ body = XEXP(body, 0);
-+ if (GET_CODE(body) != MEM)
-+ continue;
-+ body = XEXP(body, 0);
-+ if (GET_CODE(body) != SYMBOL_REF)
-+ continue;
-+// if (strcmp(XSTR(body, 0), track_function))
-+ if (SYMBOL_REF_DECL(body) != track_function_decl)
-+ continue;
-+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
-+ // 2. delete call
-+ delete_insn_and_edges(insn);
-+#if BUILDING_GCC_VERSION >= 4007
-+ if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
-+ insn = next;
-+ next = NEXT_INSN(insn);
-+ delete_insn_and_edges(insn);
-+ }
-+#endif
-+ }
-+
-+// print_simple_rtl(stderr, get_insns());
-+// print_rtl(stderr, get_insns());
-+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
-+
-+ return 0;
-+}
-+
-+static bool gate_stackleak_track_stack(void)
-+{
-+ tree section;
-+
-+ if (ix86_cmodel != CM_KERNEL)
-+ return false;
-+
-+ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
-+ if (section && TREE_VALUE(section)) {
-+ section = TREE_VALUE(TREE_VALUE(section));
-+
-+ if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10))
-+ return false;
-+ if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13))
-+ return false;
-+ if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13))
-+ return false;
-+ if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13))
-+ return false;
-+ }
-+
-+ return track_frame_size >= 0;
-+}
-+
-+static void stackleak_start_unit(void *gcc_data, void *user_data)
-+{
-+ tree fntype;
-+
-+ // void pax_track_stack(void)
-+ fntype = build_function_type_list(void_type_node, NULL_TREE);
-+ track_function_decl = build_fn_decl(track_function, fntype);
-+ DECL_ASSEMBLER_NAME(track_function_decl); // for LTO
-+ TREE_PUBLIC(track_function_decl) = 1;
-+ TREE_USED(track_function_decl) = 1;
-+ DECL_EXTERNAL(track_function_decl) = 1;
-+ DECL_ARTIFICIAL(track_function_decl) = 1;
-+ DECL_PRESERVE_P(track_function_decl) = 1;
-+
-+ // void pax_check_alloca(unsigned long)
-+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
-+ check_function_decl = build_fn_decl(check_function, fntype);
-+ DECL_ASSEMBLER_NAME(check_function_decl); // for LTO
-+ TREE_PUBLIC(check_function_decl) = 1;
-+ TREE_USED(check_function_decl) = 1;
-+ DECL_EXTERNAL(check_function_decl) = 1;
-+ DECL_ARTIFICIAL(check_function_decl) = 1;
-+ DECL_PRESERVE_P(check_function_decl) = 1;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data stackleak_tree_instrument_pass_data = {
-+#else
-+static struct gimple_opt_pass stackleak_tree_instrument_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "stackleak_tree_instrument",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = gate_stackleak_track_stack,
-+ .execute = execute_stackleak_tree_instrument,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_gimple_leh | PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa | TODO_rebuild_cgraph_edges
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data stackleak_final_rtl_opt_pass_data = {
-+#else
-+static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
-+ .pass = {
-+#endif
-+ .type = RTL_PASS,
-+ .name = "stackleak_final",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = true,
-+ .has_execute = true,
-+#else
-+ .gate = gate_stackleak_track_stack,
-+ .execute = execute_stackleak_final,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class stackleak_tree_instrument_pass : public gimple_opt_pass {
-+public:
-+ stackleak_tree_instrument_pass() : gimple_opt_pass(stackleak_tree_instrument_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return gate_stackleak_track_stack(); }
-+ virtual unsigned int execute(function *) { return execute_stackleak_tree_instrument(); }
-+#else
-+ bool gate() { return gate_stackleak_track_stack(); }
-+ unsigned int execute() { return execute_stackleak_tree_instrument(); }
-+#endif
-+};
-+
-+class stackleak_final_rtl_opt_pass : public rtl_opt_pass {
-+public:
-+ stackleak_final_rtl_opt_pass() : rtl_opt_pass(stackleak_final_rtl_opt_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual bool gate(function *) { return gate_stackleak_track_stack(); }
-+ virtual unsigned int execute(function *) { return execute_stackleak_final(); }
-+#else
-+ bool gate() { return gate_stackleak_track_stack(); }
-+ unsigned int execute() { return execute_stackleak_final(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_stackleak_tree_instrument_pass(void)
-+{
-+ return new stackleak_tree_instrument_pass();
-+}
-+
-+static opt_pass *make_stackleak_final_rtl_opt_pass(void)
-+{
-+ return new stackleak_final_rtl_opt_pass();
-+}
-+#else
-+static struct opt_pass *make_stackleak_tree_instrument_pass(void)
-+{
-+ return &stackleak_tree_instrument_pass.pass;
-+}
-+
-+static struct opt_pass *make_stackleak_final_rtl_opt_pass(void)
-+{
-+ return &stackleak_final_rtl_opt_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ int i;
-+ struct register_pass_info stackleak_tree_instrument_pass_info;
-+ struct register_pass_info stackleak_final_pass_info;
-+ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
-+ {
-+ .base = &track_function_decl,
-+ .nelt = 1,
-+ .stride = sizeof(track_function_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ {
-+ .base = &check_function_decl,
-+ .nelt = 1,
-+ .stride = sizeof(check_function_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ LAST_GGC_ROOT_TAB
-+ };
-+
-+ stackleak_tree_instrument_pass_info.pass = make_stackleak_tree_instrument_pass();
-+// stackleak_tree_instrument_pass_info.reference_pass_name = "tree_profile";
-+ stackleak_tree_instrument_pass_info.reference_pass_name = "optimized";
-+ stackleak_tree_instrument_pass_info.ref_pass_instance_number = 1;
-+ stackleak_tree_instrument_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ stackleak_final_pass_info.pass = make_stackleak_final_rtl_opt_pass();
-+ stackleak_final_pass_info.reference_pass_name = "final";
-+ stackleak_final_pass_info.ref_pass_instance_number = 1;
-+ stackleak_final_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "track-lowest-sp")) {
-+ if (!argv[i].value) {
-+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ continue;
-+ }
-+ track_frame_size = atoi(argv[i].value);
-+ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
-+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
-+ continue;
-+ }
-+ if (!strcmp(argv[i].key, "initialize-locals")) {
-+ if (argv[i].value) {
-+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
-+ continue;
-+ }
-+ init_locals = true;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_stackleak);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
-+
-+ return 0;
-+}
-diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
-new file mode 100644
-index 0000000..e9dbd4b
---- /dev/null
-+++ b/tools/gcc/structleak_plugin.c
-@@ -0,0 +1,287 @@
-+/*
-+ * Copyright 2013-2015 by PaX Team <pageexec@freemail.hu>
-+ * Licensed under the GPL v2
-+ *
-+ * Note: the choice of the license means that the compilation process is
-+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
-+ * but for the kernel it doesn't matter since it doesn't link against
-+ * any of the gcc libraries
-+ *
-+ * gcc plugin to forcibly initialize certain local variables that could
-+ * otherwise leak kernel stack to userland if they aren't properly initialized
-+ * by later code
-+ *
-+ * Homepage: http://pax.grsecurity.net/
-+ *
-+ * Usage:
-+ * $ # for 4.5/4.6/C based 4.7
-+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
-+ * $ # for C++ based 4.7/4.8+
-+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
-+ * $ gcc -fplugin=./structleak_plugin.so test.c -O2
-+ *
-+ * TODO: eliminate redundant initializers
-+ * increase type coverage
-+ */
-+
-+#include "gcc-common.h"
-+
-+// unused C type flag in all versions 4.5-5.0
-+#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_5(TYPE)
-+
-+int plugin_is_GPL_compatible;
-+
-+static struct plugin_info structleak_plugin_info = {
-+ .version = "201401260140",
-+ .help = "disable\tdo not activate plugin\n",
-+};
-+
-+static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
-+{
-+ *no_add_attrs = true;
-+
-+ // check for types? for now accept everything linux has to offer
-+ if (TREE_CODE(*node) != FIELD_DECL)
-+ return NULL_TREE;
-+
-+ *no_add_attrs = false;
-+ return NULL_TREE;
-+}
-+
-+static struct attribute_spec user_attr = {
-+ .name = "user",
-+ .min_length = 0,
-+ .max_length = 0,
-+ .decl_required = false,
-+ .type_required = false,
-+ .function_type_required = false,
-+ .handler = handle_user_attribute,
-+#if BUILDING_GCC_VERSION >= 4007
-+ .affects_type_identity = true
-+#endif
-+};
-+
-+static void register_attributes(void *event_data, void *data)
-+{
-+ register_attribute(&user_attr);
-+// register_attribute(&force_attr);
-+}
-+
-+static tree get_field_type(tree field)
-+{
-+ return strip_array_types(TREE_TYPE(field));
-+}
-+
-+static bool is_userspace_type(tree type)
-+{
-+ tree field;
-+
-+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
-+ tree fieldtype = get_field_type(field);
-+ enum tree_code code = TREE_CODE(fieldtype);
-+
-+ if (code == RECORD_TYPE || code == UNION_TYPE)
-+ if (is_userspace_type(fieldtype))
-+ return true;
-+
-+ if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static void finish_type(void *event_data, void *data)
-+{
-+ tree type = (tree)event_data;
-+
-+ if (type == NULL_TREE || type == error_mark_node)
-+ return;
-+
-+#if BUILDING_GCC_VERSION >= 5000
-+ if (TREE_CODE(type) == ENUMERAL_TYPE)
-+ return;
-+#endif
-+
-+ if (TYPE_USERSPACE(type))
-+ return;
-+
-+ if (is_userspace_type(type))
-+ TYPE_USERSPACE(type) = 1;
-+}
-+
-+static void initialize(tree var)
-+{
-+ basic_block bb;
-+ gimple_stmt_iterator gsi;
-+ tree initializer;
-+ gimple init_stmt;
-+
-+ // this is the original entry bb before the forced split
-+ // TODO: check further BBs in case more splits occured before us
-+ bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb->next_bb;
-+
-+ // first check if the variable is already initialized, warn otherwise
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt = gsi_stmt(gsi);
-+ tree rhs1;
-+
-+ // we're looking for an assignment of a single rhs...
-+ if (!gimple_assign_single_p(stmt))
-+ continue;
-+ rhs1 = gimple_assign_rhs1(stmt);
-+#if BUILDING_GCC_VERSION >= 4007
-+ // ... of a non-clobbering expression...
-+ if (TREE_CLOBBER_P(rhs1))
-+ continue;
-+#endif
-+ // ... to our variable...
-+ if (gimple_get_lhs(stmt) != var)
-+ continue;
-+ // if it's an initializer then we're good
-+ if (TREE_CODE(rhs1) == CONSTRUCTOR)
-+ return;
-+ }
-+
-+ // these aren't the 0days you're looking for
-+// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
-+
-+ // build the initializer expression
-+ initializer = build_constructor(TREE_TYPE(var), NULL);
-+
-+ // build the initializer stmt
-+ init_stmt = gimple_build_assign(var, initializer);
-+ gsi = gsi_start_bb(ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb);
-+ gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
-+ update_stmt(init_stmt);
-+}
-+
-+static unsigned int handle_function(void)
-+{
-+ basic_block bb;
-+ unsigned int ret = 0;
-+ tree var;
-+ unsigned int i;
-+
-+ // split the first bb where we can put the forced initializers
-+ bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ if (dom_info_available_p(CDI_DOMINATORS))
-+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+
-+ // enumarate all local variables and forcibly initialize our targets
-+ FOR_EACH_LOCAL_DECL(cfun, i, var) {
-+ tree type = TREE_TYPE(var);
-+
-+ gcc_assert(DECL_P(var));
-+ if (!auto_var_in_fn_p(var, current_function_decl))
-+ continue;
-+
-+ // only care about structure types
-+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
-+ continue;
-+
-+ // if the type is of interest, examine the variable
-+ if (TYPE_USERSPACE(type))
-+ initialize(var);
-+ }
-+
-+ return ret;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+static const struct pass_data structleak_pass_data = {
-+#else
-+static struct gimple_opt_pass structleak_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "structleak",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION == 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = handle_function,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+class structleak_pass : public gimple_opt_pass {
-+public:
-+ structleak_pass() : gimple_opt_pass(structleak_pass_data, g) {}
-+#if BUILDING_GCC_VERSION >= 5000
-+ virtual unsigned int execute(function *) { return handle_function(); }
-+#else
-+ unsigned int execute() { return handle_function(); }
-+#endif
-+};
-+}
-+
-+static opt_pass *make_structleak_pass(void)
-+{
-+ return new structleak_pass();
-+}
-+#else
-+static struct opt_pass *make_structleak_pass(void)
-+{
-+ return &structleak_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ int i;
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ bool enable = true;
-+ struct register_pass_info structleak_pass_info;
-+
-+ structleak_pass_info.pass = make_structleak_pass();
-+ structleak_pass_info.reference_pass_name = "ssa";
-+ structleak_pass_info.ref_pass_instance_number = 1;
-+ structleak_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ if (strncmp(lang_hooks.name, "GNU C", 5) && !strncmp(lang_hooks.name, "GNU C+", 6)) {
-+ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
-+ enable = false;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "disable")) {
-+ enable = false;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
-+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
-+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
-diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
-index 6789d788..4afd019e 100644
---- a/tools/perf/util/include/asm/alternative-asm.h
-+++ b/tools/perf/util/include/asm/alternative-asm.h
-@@ -5,4 +5,7 @@
-
- #define altinstruction_entry #
-
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .endm
-+
- #endif
-diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
-index 547628e..74de9f2 100644
---- a/tools/perf/util/include/linux/compiler.h
-+++ b/tools/perf/util/include/linux/compiler.h
-@@ -11,4 +11,12 @@
-
- #define __used __attribute__((__unused__))
-
-+#ifndef __size_overflow
-+# define __size_overflow(...)
-+#endif
-+
-+#ifndef __intentional_overflow
-+# define __intentional_overflow(...)
-+#endif
-+
- #endif
-diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 8b0617a..05843b4 100644
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -76,12 +76,17 @@ LIST_HEAD(vm_list);
-
- static cpumask_var_t cpus_hardware_enabled;
- static int kvm_usage_count = 0;
--static atomic_t hardware_enable_failed;
-+static atomic_unchecked_t hardware_enable_failed;
-
- struct kmem_cache *kvm_vcpu_cache;
- EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
-
--static __read_mostly struct preempt_ops kvm_preempt_ops;
-+static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
-+static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
-+static struct preempt_ops kvm_preempt_ops = {
-+ .sched_in = kvm_sched_in,
-+ .sched_out = kvm_sched_out,
-+};
-
- struct dentry *kvm_debugfs_dir;
-
-@@ -660,7 +665,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
- /* We can read the guest memory with __xxx_user() later on. */
- if (user_alloc &&
- ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
-- !access_ok(VERIFY_WRITE,
-+ !access_ok_noprefault(VERIFY_WRITE,
- (void __user *)(unsigned long)mem->userspace_addr,
- mem->memory_size)))
- goto out;
-@@ -1494,8 +1499,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
-
- int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
- {
-- return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
-- offset, len);
-+ int r;
-+ unsigned long addr;
-+
-+ addr = gfn_to_hva(kvm, gfn);
-+ if (kvm_is_error_hva(addr))
-+ return -EFAULT;
-+ r = __clear_user((void __user *)addr + offset, len);
-+ if (r)
-+ return -EFAULT;
-+ mark_page_dirty(kvm, gfn);
-+ return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
-
-@@ -1661,7 +1675,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
- return 0;
- }
-
--static struct file_operations kvm_vcpu_fops = {
-+static file_operations_no_const kvm_vcpu_fops __read_only = {
- .release = kvm_vcpu_release,
- .unlocked_ioctl = kvm_vcpu_ioctl,
- #ifdef CONFIG_COMPAT
-@@ -2187,7 +2201,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
- return 0;
- }
-
--static struct file_operations kvm_vm_fops = {
-+static file_operations_no_const kvm_vm_fops __read_only = {
- .release = kvm_vm_release,
- .unlocked_ioctl = kvm_vm_ioctl,
- #ifdef CONFIG_COMPAT
-@@ -2285,7 +2299,7 @@ out:
- return r;
- }
-
--static struct file_operations kvm_chardev_ops = {
-+static file_operations_no_const kvm_chardev_ops __read_only = {
- .unlocked_ioctl = kvm_dev_ioctl,
- .compat_ioctl = kvm_dev_ioctl,
- .llseek = noop_llseek,
-@@ -2311,7 +2325,7 @@ static void hardware_enable_nolock(void *junk)
-
- if (r) {
- cpumask_clear_cpu(cpu, cpus_hardware_enabled);
-- atomic_inc(&hardware_enable_failed);
-+ atomic_inc_unchecked(&hardware_enable_failed);
- printk(KERN_INFO "kvm: enabling virtualization on "
- "CPU%d failed\n", cpu);
- }
-@@ -2365,10 +2379,10 @@ static int hardware_enable_all(void)
-
- kvm_usage_count++;
- if (kvm_usage_count == 1) {
-- atomic_set(&hardware_enable_failed, 0);
-+ atomic_set_unchecked(&hardware_enable_failed, 0);
- on_each_cpu(hardware_enable_nolock, NULL, 1);
-
-- if (atomic_read(&hardware_enable_failed)) {
-+ if (atomic_read_unchecked(&hardware_enable_failed)) {
- hardware_disable_all_nolock();
- r = -EBUSY;
- }
-@@ -2719,7 +2733,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
- kvm_arch_vcpu_put(vcpu);
- }
-
--int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
-+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module)
- {
- int r;
-@@ -2782,7 +2796,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- if (!vcpu_align)
- vcpu_align = __alignof__(struct kvm_vcpu);
- kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
-- 0, NULL);
-+ SLAB_USERCOPY, NULL);
- if (!kvm_vcpu_cache) {
- r = -ENOMEM;
- goto out_free_3;
-@@ -2792,9 +2806,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- if (r)
- goto out_free;
-
-+ pax_open_kernel();
- kvm_chardev_ops.owner = module;
- kvm_vm_fops.owner = module;
- kvm_vcpu_fops.owner = module;
-+ pax_close_kernel();
-
- r = misc_register(&kvm_dev);
- if (r) {
-@@ -2804,9 +2820,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
-
- register_syscore_ops(&kvm_syscore_ops);
-
-- kvm_preempt_ops.sched_in = kvm_sched_in;
-- kvm_preempt_ops.sched_out = kvm_sched_out;
--
- kvm_init_debug();
-
- return 0;