summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-05-16 08:29:36 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-05-16 08:29:36 -0400
commita81a6a98c0ca585a721c9e44f924b454194077c6 (patch)
tree0c1a747b95607ce493bc251ace3e86b6a059964e
parentGrsec/PaX: 3.1-{3.2.69,3.14.42,4.0.3}-201505141746 (diff)
downloadhardened-patchset-a81a6a98c0ca585a721c9e44f924b454194077c6.tar.gz
hardened-patchset-a81a6a98c0ca585a721c9e44f924b454194077c6.tar.bz2
hardened-patchset-a81a6a98c0ca585a721c9e44f924b454194077c6.zip
Remove 3.19.6.
-rw-r--r--3.19.6/0000_README48
-rw-r--r--3.19.6/1005_linux-3.19.6.patch1674
-rw-r--r--3.19.6/4420_grsecurity-3.1-3.19.6-201505021013.patch148994
-rw-r--r--3.19.6/4425_grsec_remove_EI_PAX.patch19
-rw-r--r--3.19.6/4427_force_XATTR_PAX_tmpfs.patch35
-rw-r--r--3.19.6/4430_grsec-remove-localversion-grsec.patch9
-rw-r--r--3.19.6/4435_grsec-mute-warnings.patch43
-rw-r--r--3.19.6/4440_grsec-remove-protected-paths.patch20
-rw-r--r--3.19.6/4450_grsec-kconfig-default-gids.patch111
-rw-r--r--3.19.6/4465_selinux-avc_audit-log-curr_ip.patch73
-rw-r--r--3.19.6/4470_disable-compat_vdso.patch58
-rw-r--r--3.19.6/4475_emutramp_default_on.patch34
12 files changed, 0 insertions, 151118 deletions
diff --git a/3.19.6/0000_README b/3.19.6/0000_README
deleted file mode 100644
index a40b535..0000000
--- a/3.19.6/0000_README
+++ /dev/null
@@ -1,48 +0,0 @@
-README
------------------------------------------------------------------------------
-Individual Patch Descriptions:
------------------------------------------------------------------------------
-Patch: 1005_linux-3.19.6.patch
-From: http://www.kernel.org
-Desc: Linux 3.19.6
-
-Patch: 4420_grsecurity-3.1-3.19.6-201505021013.patch
-From: http://www.grsecurity.net
-Desc: hardened-sources base patch from upstream grsecurity
-
-Patch: 4425_grsec_remove_EI_PAX.patch
-From: Anthony G. Basile <blueness@gentoo.org>
-Desc: Remove EI_PAX option and force off
-
-Patch: 4430_grsec-remove-localversion-grsec.patch
-From: Kerin Millar <kerframil@gmail.com>
-Desc: Removes grsecurity's localversion-grsec file
-
-Patch: 4435_grsec-mute-warnings.patch
-From: Alexander Gabert <gaberta@fh-trier.de>
- Gordon Malm <gengor@gentoo.org>
-Desc: Removes verbose compile warning settings from grsecurity, restores
- mainline Linux kernel behavior
-
-Patch: 4440_grsec-remove-protected-paths.patch
-From: Anthony G. Basile <blueness@gentoo.org>
-Desc: Removes chmod statements from grsecurity/Makefile
-
-Patch: 4450_grsec-kconfig-default-gids.patch
-From: Kerin Millar <kerframil@gmail.com>
-Desc: Sets sane(r) default GIDs on various grsecurity group-dependent
- features
-
-Patch: 4465_selinux-avc_audit-log-curr_ip.patch
-From: Gordon Malm <gengor@gentoo.org>
- Anthony G. Basile <blueness@gentoo.org>
-Desc: Configurable option to add src IP address to SELinux log messages
-
-Patch: 4470_disable-compat_vdso.patch
-From: Gordon Malm <gengor@gentoo.org>
- Kerin Millar <kerframil@gmail.com>
-Desc: Disables VDSO_COMPAT operation completely
-
-Patch: 4475_emutramp_default_on.patch
-From: Anthony G. Basile <blueness@gentoo.org>
-Dnux-3.18.4.patchesc: Set PAX_EMUTRAMP default on for libffi, bugs #329499 and #457194
diff --git a/3.19.6/1005_linux-3.19.6.patch b/3.19.6/1005_linux-3.19.6.patch
deleted file mode 100644
index f55bc7e..0000000
--- a/3.19.6/1005_linux-3.19.6.patch
+++ /dev/null
@@ -1,1674 +0,0 @@
-diff --git a/Makefile b/Makefile
-index 633b5f0..65c7c87 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 3
- PATCHLEVEL = 19
--SUBLEVEL = 5
-+SUBLEVEL = 6
- EXTRAVERSION =
- NAME = Diseased Newt
-
-diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
-index 66781bf..c724124 100644
---- a/arch/arm/mm/hugetlbpage.c
-+++ b/arch/arm/mm/hugetlbpage.c
-@@ -36,12 +36,6 @@
- * of type casting from pmd_t * to pte_t *.
- */
-
--struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-- int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pud_huge(pud_t pud)
- {
- return 0;
-diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
-index 023747b..2de9d2e 100644
---- a/arch/arm64/mm/hugetlbpage.c
-+++ b/arch/arm64/mm/hugetlbpage.c
-@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
- }
- #endif
-
--struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-- int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return !(pmd_val(pmd) & PMD_TABLE_BIT);
-diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 76069c1..52b7604 100644
---- a/arch/ia64/mm/hugetlbpage.c
-+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
- return 0;
- }
-
--struct page *
--follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
--{
-- return NULL;
--}
--
- void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
-index 3c32075..7ca80ac 100644
---- a/arch/metag/mm/hugetlbpage.c
-+++ b/arch/metag/mm/hugetlbpage.c
-@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
- return 0;
- }
-
--struct page *follow_huge_addr(struct mm_struct *mm,
-- unsigned long address, int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return pmd_page_shift(pmd) > PAGE_SHIFT;
-diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
-index 4ec8ee1..06e0f42 100644
---- a/arch/mips/mm/hugetlbpage.c
-+++ b/arch/mips/mm/hugetlbpage.c
-@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
- return 0;
- }
-
--struct page *
--follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return (pmd_val(pmd) & _PAGE_HUGE) != 0;
-@@ -83,15 +77,3 @@ int pud_huge(pud_t pud)
- {
- return (pud_val(pud) & _PAGE_HUGE) != 0;
- }
--
--struct page *
--follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- struct page *page;
--
-- page = pte_page(*(pte_t *)pmd);
-- if (page)
-- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
-- return page;
--}
-diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
-index 620d0ec..7e408bf 100644
---- a/arch/powerpc/mm/hugetlbpage.c
-+++ b/arch/powerpc/mm/hugetlbpage.c
-@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- return NULL;
- }
-
-+struct page *
-+follow_huge_pud(struct mm_struct *mm, unsigned long address,
-+ pud_t *pud, int write)
-+{
-+ BUG();
-+ return NULL;
-+}
-+
- static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
- unsigned long sz)
- {
-diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
-index 3c80d2e..210ffed 100644
---- a/arch/s390/mm/hugetlbpage.c
-+++ b/arch/s390/mm/hugetlbpage.c
-@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
- return 0;
- }
-
--struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-- int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- if (!MACHINE_HAS_HPAGE)
-@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
- {
- return 0;
- }
--
--struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmdp, int write)
--{
-- struct page *page;
--
-- if (!MACHINE_HAS_HPAGE)
-- return NULL;
--
-- page = pmd_page(*pmdp);
-- if (page)
-- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
-- return page;
--}
-diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
-index d776234..534bc97 100644
---- a/arch/sh/mm/hugetlbpage.c
-+++ b/arch/sh/mm/hugetlbpage.c
-@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
- return 0;
- }
-
--struct page *follow_huge_addr(struct mm_struct *mm,
-- unsigned long address, int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return 0;
-@@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
- {
- return 0;
- }
--
--struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- return NULL;
--}
-diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index d329537..4242eab 100644
---- a/arch/sparc/mm/hugetlbpage.c
-+++ b/arch/sparc/mm/hugetlbpage.c
-@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- return entry;
- }
-
--struct page *follow_huge_addr(struct mm_struct *mm,
-- unsigned long address, int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return 0;
-@@ -230,9 +224,3 @@ int pud_huge(pud_t pud)
- {
- return 0;
- }
--
--struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- return NULL;
--}
-diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
-index 3270e00..8416240 100644
---- a/arch/tile/mm/hugetlbpage.c
-+++ b/arch/tile/mm/hugetlbpage.c
-@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
- return NULL;
- }
-
--struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
-- int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- int pmd_huge(pmd_t pmd)
- {
- return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
-@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
- return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
- }
-
--struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- struct page *page;
--
-- page = pte_page(*(pte_t *)pmd);
-- if (page)
-- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
-- return page;
--}
--
--struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
-- pud_t *pud, int write)
--{
-- struct page *page;
--
-- page = pte_page(*(pte_t *)pud);
-- if (page)
-- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
-- return page;
--}
--
- int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
- {
- return 0;
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index d4c58d8..3124464 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -2404,8 +2404,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
-
- if (enable_ept) {
- /* nested EPT: emulate EPT also to L1 */
-- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
-- SECONDARY_EXEC_UNRESTRICTED_GUEST;
-+ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
- nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
- VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
- VMX_EPT_INVEPT_BIT;
-@@ -2419,6 +2418,10 @@ static __init void nested_vmx_setup_ctls_msrs(void)
- } else
- nested_vmx_ept_caps = 0;
-
-+ if (enable_unrestricted_guest)
-+ nested_vmx_secondary_ctls_high |=
-+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
-+
- /* miscellaneous data */
- rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
- nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
-diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index 006cc91..9161f76 100644
---- a/arch/x86/mm/hugetlbpage.c
-+++ b/arch/x86/mm/hugetlbpage.c
-@@ -52,20 +52,8 @@ int pud_huge(pud_t pud)
- return 0;
- }
-
--struct page *
--follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- return NULL;
--}
- #else
-
--struct page *
--follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
--{
-- return ERR_PTR(-EINVAL);
--}
--
- /*
- * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
- * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index 0dceba1..68ad39a 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -3797,7 +3797,8 @@ static inline int bond_slave_override(struct bonding *bond,
- /* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb->queue_mapping) {
-- if (bond_slave_can_tx(slave)) {
-+ if (bond_slave_is_up(slave) &&
-+ slave->link == BOND_LINK_UP) {
- bond_dev_queue_xmit(bond, skb, slave->dev);
- return 0;
- }
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-index c3a6072..2559206 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
-@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
- struct napi_struct napi;
-
- #ifdef CONFIG_NET_RX_BUSY_POLL
-- unsigned int state;
--#define BNX2X_FP_STATE_IDLE 0
--#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
--#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
--#define BNX2X_FP_STATE_DISABLED (1 << 2)
--#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
--#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
--#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
--#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
--#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
--#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
-- /* protect state */
-- spinlock_t lock;
--#endif /* CONFIG_NET_RX_BUSY_POLL */
-+ unsigned long busy_poll_state;
-+#endif
-
- union host_hc_status_block status_blk;
- /* chip independent shortcuts into sb structure */
-@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
- #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-
- #ifdef CONFIG_NET_RX_BUSY_POLL
--static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
-+
-+enum bnx2x_fp_state {
-+ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
-+
-+ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
-+ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
-+
-+ BNX2X_STATE_FP_POLL_BIT = 2,
-+ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
-+
-+ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
-+};
-+
-+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
- {
-- spin_lock_init(&fp->lock);
-- fp->state = BNX2X_FP_STATE_IDLE;
-+ WRITE_ONCE(fp->busy_poll_state, 0);
- }
-
- /* called from the device poll routine to get ownership of a FP */
- static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
- {
-- bool rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if (fp->state & BNX2X_FP_LOCKED) {
-- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
-- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
-- rc = false;
-- } else {
-- /* we don't care if someone yielded */
-- fp->state = BNX2X_FP_STATE_NAPI;
-+ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
-+
-+ while (1) {
-+ switch (old) {
-+ case BNX2X_STATE_FP_POLL:
-+ /* make sure bnx2x_fp_lock_poll() wont starve us */
-+ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
-+ &fp->busy_poll_state);
-+ /* fallthrough */
-+ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
-+ return false;
-+ default:
-+ break;
-+ }
-+ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
-+ if (unlikely(prev != old)) {
-+ old = prev;
-+ continue;
-+ }
-+ return true;
- }
-- spin_unlock_bh(&fp->lock);
-- return rc;
- }
-
--/* returns true is someone tried to get the FP while napi had it */
--static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
- {
-- bool rc = false;
--
-- spin_lock_bh(&fp->lock);
-- WARN_ON(fp->state &
-- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
--
-- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
-- rc = true;
--
-- /* state ==> idle, unless currently disabled */
-- fp->state &= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ smp_wmb();
-+ fp->busy_poll_state = 0;
- }
-
- /* called from bnx2x_low_latency_poll() */
- static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
- {
-- bool rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if ((fp->state & BNX2X_FP_LOCKED)) {
-- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
-- rc = false;
-- } else {
-- /* preserve yield marks */
-- fp->state |= BNX2X_FP_STATE_POLL;
-- }
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
- }
-
--/* returns true if someone tried to get the FP while it was locked */
--static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
- {
-- bool rc = false;
--
-- spin_lock_bh(&fp->lock);
-- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
--
-- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
-- rc = true;
--
-- /* state ==> idle, unless currently disabled */
-- fp->state &= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-- return rc;
-+ smp_mb__before_atomic();
-+ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
- }
-
--/* true if a socket is polling, even if it did not get the lock */
-+/* true if a socket is polling */
- static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
- {
-- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
-- return fp->state & BNX2X_FP_USER_PEND;
-+ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
- }
-
- /* false if fp is currently owned */
- static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
- {
-- int rc = true;
--
-- spin_lock_bh(&fp->lock);
-- if (fp->state & BNX2X_FP_OWNED)
-- rc = false;
-- fp->state |= BNX2X_FP_STATE_DISABLED;
-- spin_unlock_bh(&fp->lock);
-+ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
-+ return !bnx2x_fp_ll_polling(fp);
-
-- return rc;
- }
- #else
--static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
- {
- }
-
-@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
- return true;
- }
-
--static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
- {
-- return false;
- }
-
- static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
-@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
- return false;
- }
-
--static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
-+static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
- {
-- return false;
- }
-
- static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-index e468ed3..2b8e8b2 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
-@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
- int i;
-
- for_each_rx_queue_cnic(bp, i) {
-- bnx2x_fp_init_lock(&bp->fp[i]);
-+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
- napi_enable(&bnx2x_fp(bp, i, napi));
- }
- }
-@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
- int i;
-
- for_each_eth_queue(bp, i) {
-- bnx2x_fp_init_lock(&bp->fp[i]);
-+ bnx2x_fp_busy_poll_init(&bp->fp[i]);
- napi_enable(&bnx2x_fp(bp, i, napi));
- }
- }
-@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
- }
- }
-
-+ bnx2x_fp_unlock_napi(fp);
-+
- /* Fall out from the NAPI loop if needed */
-- if (!bnx2x_fp_unlock_napi(fp) &&
-- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-
- /* No need to update SB for FCoE L2 ring as long as
- * it's connected to the default SB and the SB
-diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
-index 96bf01b..05ae126 100644
---- a/drivers/net/ethernet/broadcom/tg3.c
-+++ b/drivers/net/ethernet/broadcom/tg3.c
-@@ -17868,8 +17868,10 @@ static int tg3_init_one(struct pci_dev *pdev,
- */
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-+ tg3_full_lock(tp, 0);
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-+ tg3_full_unlock(tp);
- }
-
- err = tg3_test_dma(tp);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-index 5c93d14..9842bf9 100644
---- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-@@ -585,7 +585,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
- * on the host, we deprecate the error message for this
- * specific command/input_mod/opcode_mod/fw-status to be debug.
- */
-- if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
-+ if (op == MLX4_CMD_SET_PORT &&
-+ (in_modifier == 1 || in_modifier == 2) &&
- op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
- mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
- op, context->fw_status);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index ac6a8f1..2617c9d 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -2627,13 +2627,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- netif_carrier_off(dev);
- mlx4_en_set_default_moderation(priv);
-
-- err = register_netdev(dev);
-- if (err) {
-- en_err(priv, "Netdev registration failed for port %d\n", port);
-- goto out;
-- }
-- priv->registered = 1;
--
- en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
- en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
-
-@@ -2673,6 +2666,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- queue_delayed_work(mdev->workqueue, &priv->service_task,
- SERVICE_TASK_DELAY);
-
-+ err = register_netdev(dev);
-+ if (err) {
-+ en_err(priv, "Netdev registration failed for port %d\n", port);
-+ goto out;
-+ }
-+
-+ priv->registered = 1;
-+
- return 0;
-
- out:
-diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
-index 2f398fa..24c0284 100644
---- a/drivers/net/ethernet/rocker/rocker.c
-+++ b/drivers/net/ethernet/rocker/rocker.c
-@@ -4305,10 +4305,16 @@ static int rocker_port_master_changed(struct net_device *dev)
- struct net_device *master = netdev_master_upper_dev_get(dev);
- int err = 0;
-
-+ /* There are currently three cases handled here:
-+ * 1. Joining a bridge
-+ * 2. Leaving a previously joined bridge
-+ * 3. Other, e.g. being added to or removed from a bond or openvswitch,
-+ * in which case nothing is done
-+ */
- if (master && master->rtnl_link_ops &&
- !strcmp(master->rtnl_link_ops->kind, "bridge"))
- err = rocker_port_bridge_join(rocker_port, master);
-- else
-+ else if (rocker_port_is_bridged(rocker_port))
- err = rocker_port_bridge_leave(rocker_port);
-
- return err;
-diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 10f9e40..9a409a8 100644
---- a/drivers/net/tun.c
-+++ b/drivers/net/tun.c
-@@ -1368,7 +1368,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
- skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
- &peeked, &off, &err);
- if (!skb)
-- return 0;
-+ return err;
-
- ret = tun_put_user(tun, tfile, skb, to);
- if (unlikely(ret < 0))
-diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
-index 5c55f11..75d6f26 100644
---- a/drivers/net/usb/asix_common.c
-+++ b/drivers/net/usb/asix_common.c
-@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
- skb_put(skb, sizeof(padbytes));
- }
-+
-+ usbnet_set_skb_tx_stats(skb, 1, 0);
- return skb;
- }
-
-diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
-index 80a844e..c3e4da9 100644
---- a/drivers/net/usb/cdc_ncm.c
-+++ b/drivers/net/usb/cdc_ncm.c
-@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
-
- /* return skb */
- ctx->tx_curr_skb = NULL;
-- dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
-
- /* keep private stats: framing overhead and number of NTBs */
- ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
- ctx->tx_ntbs++;
-
-- /* usbnet has already counted all the framing overhead.
-+ /* usbnet will count all the framing overhead by default.
- * Adjust the stats so that the tx_bytes counter show real
- * payload data instead.
- */
-- dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
-+ usbnet_set_skb_tx_stats(skb_out, n,
-+ ctx->tx_curr_frame_payload - skb_out->len);
-
- return skb_out;
-
-diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
-index b94a0fb..953de13 100644
---- a/drivers/net/usb/sr9800.c
-+++ b/drivers/net/usb/sr9800.c
-@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- skb_put(skb, sizeof(padbytes));
- }
-
-+ usbnet_set_skb_tx_stats(skb, 1, 0);
- return skb;
- }
-
-diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
-index 3a6770a..e7ed251 100644
---- a/drivers/net/usb/usbnet.c
-+++ b/drivers/net/usb/usbnet.c
-@@ -1189,8 +1189,7 @@ static void tx_complete (struct urb *urb)
- struct usbnet *dev = entry->dev;
-
- if (urb->status == 0) {
-- if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
-- dev->net->stats.tx_packets++;
-+ dev->net->stats.tx_packets += entry->packets;
- dev->net->stats.tx_bytes += entry->length;
- } else {
- dev->net->stats.tx_errors++;
-@@ -1348,7 +1347,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
- } else
- urb->transfer_flags |= URB_ZERO_PACKET;
- }
-- entry->length = urb->transfer_buffer_length = length;
-+ urb->transfer_buffer_length = length;
-+
-+ if (info->flags & FLAG_MULTI_PACKET) {
-+ /* Driver has set number of packets and a length delta.
-+ * Calculate the complete length and ensure that it's
-+ * positive.
-+ */
-+ entry->length += length;
-+ if (WARN_ON_ONCE(entry->length <= 0))
-+ entry->length = length;
-+ } else {
-+ usbnet_set_skb_tx_stats(skb, 1, length);
-+ }
-
- spin_lock_irqsave(&dev->txq.lock, flags);
- retval = usb_autopm_get_interface_async(dev->intf);
-diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index a8c755d..6c83846 100644
---- a/drivers/net/vxlan.c
-+++ b/drivers/net/vxlan.c
-@@ -1578,12 +1578,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
- int err;
- bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
-
-- skb = udp_tunnel_handle_offloads(skb, udp_sum);
-- if (IS_ERR(skb)) {
-- err = -EINVAL;
-- goto err;
-- }
--
- skb_scrub_packet(skb, xnet);
-
- min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
-@@ -1603,6 +1597,12 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
- goto err;
- }
-
-+ skb = udp_tunnel_handle_offloads(skb, udp_sum);
-+ if (IS_ERR(skb)) {
-+ err = -EINVAL;
-+ goto err;
-+ }
-+
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = vni;
-@@ -1628,10 +1628,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
- int err;
- bool udp_sum = !vs->sock->sk->sk_no_check_tx;
-
-- skb = udp_tunnel_handle_offloads(skb, udp_sum);
-- if (IS_ERR(skb))
-- return PTR_ERR(skb);
--
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + VXLAN_HLEN + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
-@@ -1647,6 +1643,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
- if (WARN_ON(!skb))
- return -ENOMEM;
-
-+ skb = udp_tunnel_handle_offloads(skb, udp_sum);
-+ if (IS_ERR(skb))
-+ return PTR_ERR(skb);
-+
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = vni;
-diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
-index a5186bb..8c45cf4 100644
---- a/drivers/net/wireless/rtlwifi/pci.c
-+++ b/drivers/net/wireless/rtlwifi/pci.c
-@@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
- else
- entry = (u8 *)(&ring->desc[ring->idx]);
-
-+ if (rtlpriv->cfg->ops->get_available_desc &&
-+ rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
-+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
-+ "no available desc!\n");
-+ return;
-+ }
-+
- if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
- return;
- ring->idx = (ring->idx + 1) % ring->entries;
-@@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
-
- ieee80211_tx_status_irqsafe(hw, skb);
-
-- if ((ring->entries - skb_queue_len(&ring->queue))
-- == 2) {
-+ if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
-
-- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
-+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
-@@ -793,7 +799,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
- rx_remained_cnt =
- rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
- hw_queue);
-- if (rx_remained_cnt < 1)
-+ if (rx_remained_cnt == 0)
- return;
-
- } else { /* rx descriptor */
-@@ -845,18 +851,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
- else
- skb_reserve(skb, stats.rx_drvinfo_size +
- stats.rx_bufshift);
--
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
-- break;
-+ dev_kfree_skb_any(skb);
-+ goto new_trx_end;
- }
- /* handle command packet here */
- if (rtlpriv->cfg->ops->rx_command_packet &&
- rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
- dev_kfree_skb_any(skb);
-- goto end;
-+ goto new_trx_end;
- }
-
- /*
-@@ -906,6 +912,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
- } else {
- dev_kfree_skb_any(skb);
- }
-+new_trx_end:
- if (rtlpriv->use_new_trx_flow) {
- rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
- rtlpci->rx_ring[hw_queue].next_rx_rp %=
-@@ -921,7 +928,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
- rtlpriv->enter_ps = false;
- schedule_work(&rtlpriv->works.lps_change_work);
- }
--end:
- skb = new_skb;
- no_new:
- if (rtlpriv->use_new_trx_flow) {
-@@ -1695,6 +1701,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
- }
- }
-
-+ if (rtlpriv->cfg->ops->get_available_desc &&
-+ rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
-+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-+ "get_available_desc fail\n");
-+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
-+ flags);
-+ return skb->len;
-+ }
-+
- if (ieee80211_is_data_qos(fc)) {
- tid = rtl_get_tid(skb);
- if (sta) {
-diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
-index 9b5a7d5..c31c6bf 100644
---- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
-+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
-@@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
- RCR_HTC_LOC_CTRL |
- RCR_AMF |
- RCR_ACF |
-- RCR_ADF |
-- RCR_AICV |
- RCR_ACRC32 |
- RCR_AB |
- RCR_AM |
-@@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
- .set_desc = rtl92ee_set_desc,
- .get_desc = rtl92ee_get_desc,
- .is_tx_desc_closed = rtl92ee_is_tx_desc_closed,
-+ .get_available_desc = rtl92ee_get_available_desc,
- .tx_polling = rtl92ee_tx_polling,
- .enable_hw_sec = rtl92ee_enable_hw_security_config,
- .set_key = rtl92ee_set_key,
-diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
-index 0069004..1f6d160 100644
---- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
-+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
-@@ -707,7 +707,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
- return desc_address;
- }
-
--void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
-+u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
- {
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-@@ -721,11 +721,12 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
- current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
-
- point_diff = ((current_tx_read_point > current_tx_write_point) ?
-- (current_tx_read_point - current_tx_write_point) :
-- (TX_DESC_NUM_92E - current_tx_write_point +
-+ (current_tx_read_point - current_tx_write_point - 1) :
-+ (TX_DESC_NUM_92E - 1 - current_tx_write_point +
- current_tx_read_point));
-
- rtlpci->tx_ring[q_idx].avl_desc = point_diff;
-+ return point_diff;
- }
-
- void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
-diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
-index 8effef9..b489dd9 100644
---- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
-+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
-@@ -831,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
- u8 queue_index);
- u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
- u8 queue_index);
--void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
-+u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
- void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
- u8 *tx_bd_desc, u8 *desc, u8 queue_index,
- struct sk_buff *skb, dma_addr_t addr);
-diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
-index 6866dcf..27822fe 100644
---- a/drivers/net/wireless/rtlwifi/wifi.h
-+++ b/drivers/net/wireless/rtlwifi/wifi.h
-@@ -2161,6 +2161,7 @@ struct rtl_hal_ops {
- void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
- struct rtl_wow_pattern *rtl_pattern,
- u8 index);
-+ u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
- };
-
- struct rtl_intf_ops {
-diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
-index d8c1076..76ce69c 100644
---- a/drivers/net/xen-netfront.c
-+++ b/drivers/net/xen-netfront.c
-@@ -1062,8 +1062,7 @@ err:
-
- static int xennet_change_mtu(struct net_device *dev, int mtu)
- {
-- int max = xennet_can_sg(dev) ?
-- XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
-+ int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
-
- if (mtu > max)
- return -EINVAL;
-@@ -1333,8 +1332,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
- netdev->ethtool_ops = &xennet_ethtool_ops;
- SET_NETDEV_DEV(netdev, &dev->dev);
-
-- netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
--
- np->netdev = netdev;
-
- netif_carrier_off(netdev);
-diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
-index d02df7d..57b7bc2 100644
---- a/drivers/staging/comedi/drivers/adv_pci1710.c
-+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
-@@ -455,7 +455,6 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
- struct comedi_insn *insn, unsigned int *data)
- {
- struct pci1710_private *devpriv = dev->private;
-- unsigned int chan = CR_CHAN(insn->chanspec);
- int ret = 0;
- int i;
-
-@@ -477,7 +476,7 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
- break;
-
- val = inw(dev->iobase + PCI171x_AD_DATA);
-- ret = pci171x_ai_dropout(dev, s, chan, val);
-+ ret = pci171x_ai_dropout(dev, s, 0, val);
- if (ret)
- break;
-
-diff --git a/fs/exec.c b/fs/exec.c
-index ad8798e..4617a4e 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -1259,6 +1259,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
- spin_unlock(&p->fs->lock);
- }
-
-+static void bprm_fill_uid(struct linux_binprm *bprm)
-+{
-+ struct inode *inode;
-+ unsigned int mode;
-+ kuid_t uid;
-+ kgid_t gid;
-+
-+ /* clear any previous set[ug]id data from a previous binary */
-+ bprm->cred->euid = current_euid();
-+ bprm->cred->egid = current_egid();
-+
-+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
-+ return;
-+
-+ if (task_no_new_privs(current))
-+ return;
-+
-+ inode = file_inode(bprm->file);
-+ mode = READ_ONCE(inode->i_mode);
-+ if (!(mode & (S_ISUID|S_ISGID)))
-+ return;
-+
-+ /* Be careful if suid/sgid is set */
-+ mutex_lock(&inode->i_mutex);
-+
-+ /* reload atomically mode/uid/gid now that lock held */
-+ mode = inode->i_mode;
-+ uid = inode->i_uid;
-+ gid = inode->i_gid;
-+ mutex_unlock(&inode->i_mutex);
-+
-+ /* We ignore suid/sgid if there are no mappings for them in the ns */
-+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
-+ !kgid_has_mapping(bprm->cred->user_ns, gid))
-+ return;
-+
-+ if (mode & S_ISUID) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->euid = uid;
-+ }
-+
-+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->egid = gid;
-+ }
-+}
-+
- /*
- * Fill the binprm structure from the inode.
- * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
-@@ -1267,36 +1314,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
- */
- int prepare_binprm(struct linux_binprm *bprm)
- {
-- struct inode *inode = file_inode(bprm->file);
-- umode_t mode = inode->i_mode;
- int retval;
-
--
-- /* clear any previous set[ug]id data from a previous binary */
-- bprm->cred->euid = current_euid();
-- bprm->cred->egid = current_egid();
--
-- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
-- !task_no_new_privs(current) &&
-- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
-- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
-- /* Set-uid? */
-- if (mode & S_ISUID) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->euid = inode->i_uid;
-- }
--
-- /* Set-gid? */
-- /*
-- * If setgid is set but no group execute bit then this
-- * is a candidate for mandatory locking, not a setgid
-- * executable.
-- */
-- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->egid = inode->i_gid;
-- }
-- }
-+ bprm_fill_uid(bprm);
-
- /* fill in binprm security blob */
- retval = security_bprm_set_creds(bprm);
-diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
-index 431b7fc..e235ec5 100644
---- a/include/linux/hugetlb.h
-+++ b/include/linux/hugetlb.h
-@@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
- struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write);
- struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write);
-+ pmd_t *pmd, int flags);
- struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
-- pud_t *pud, int write);
-+ pud_t *pud, int flags);
- int pmd_huge(pmd_t pmd);
- int pud_huge(pud_t pmd);
- unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
-@@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
- static inline void hugetlb_show_meminfo(void)
- {
- }
--#define follow_huge_pmd(mm, addr, pmd, write) NULL
--#define follow_huge_pud(mm, addr, pud, write) NULL
-+#define follow_huge_pmd(mm, addr, pmd, flags) NULL
-+#define follow_huge_pud(mm, addr, pud, flags) NULL
- #define prepare_hugepage_range(file, addr, len) (-EINVAL)
- #define pmd_huge(x) 0
- #define pud_huge(x) 0
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 52fd8e8..840fb7f 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2159,6 +2159,12 @@ void netdev_freemem(struct net_device *dev);
- void synchronize_net(void);
- int init_dummy_netdev(struct net_device *dev);
-
-+DECLARE_PER_CPU(int, xmit_recursion);
-+static inline int dev_recursion_level(void)
-+{
-+ return this_cpu_read(xmit_recursion);
-+}
-+
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
- struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-diff --git a/include/linux/swapops.h b/include/linux/swapops.h
-index 6adfb7b..e288d5c 100644
---- a/include/linux/swapops.h
-+++ b/include/linux/swapops.h
-@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
- *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
- }
-
-+extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-+ spinlock_t *ptl);
- extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address);
- extern void migration_entry_wait_huge(struct vm_area_struct *vma,
-@@ -150,6 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
- }
- #define migration_entry_to_page(swp) NULL
- static inline void make_migration_entry_read(swp_entry_t *entryp) { }
-+static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-+ spinlock_t *ptl) { }
- static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address) { }
- static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
-diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
-index d9a4905..6e0ce8c 100644
---- a/include/linux/usb/usbnet.h
-+++ b/include/linux/usb/usbnet.h
-@@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */
- struct urb *urb;
- struct usbnet *dev;
- enum skb_state state;
-- size_t length;
-+ long length;
-+ unsigned long packets;
- };
-
-+/* Drivers that set FLAG_MULTI_PACKET must call this in their
-+ * tx_fixup method before returning an skb.
-+ */
-+static inline void
-+usbnet_set_skb_tx_stats(struct sk_buff *skb,
-+ unsigned long packets, long bytes_delta)
-+{
-+ struct skb_data *entry = (struct skb_data *) skb->cb;
-+
-+ entry->packets = packets;
-+ entry->length = bytes_delta;
-+}
-+
- extern int usbnet_open(struct net_device *net);
- extern int usbnet_stop(struct net_device *net);
- extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
-diff --git a/include/net/ip.h b/include/net/ip.h
-index 09cf5ae..c0c26c3 100644
---- a/include/net/ip.h
-+++ b/include/net/ip.h
-@@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
-
- #endif
-
--static inline int sk_mc_loop(struct sock *sk)
--{
-- if (!sk)
-- return 1;
-- switch (sk->sk_family) {
-- case AF_INET:
-- return inet_sk(sk)->mc_loop;
--#if IS_ENABLED(CONFIG_IPV6)
-- case AF_INET6:
-- return inet6_sk(sk)->mc_loop;
--#endif
-- }
-- WARN_ON(1);
-- return 1;
--}
--
- bool ip_call_ra_chain(struct sk_buff *skb);
-
- /*
-diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
-index 1d09b46..eda131d 100644
---- a/include/net/ip6_route.h
-+++ b/include/net/ip6_route.h
-@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-
- static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
- {
-- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
-+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
-+ inet6_sk(skb->sk) : NULL;
-
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-diff --git a/include/net/sock.h b/include/net/sock.h
-index 2210fec..45b54d3 100644
---- a/include/net/sock.h
-+++ b/include/net/sock.h
-@@ -1812,6 +1812,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
-
- struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
-
-+bool sk_mc_loop(struct sock *sk);
-+
- static inline bool sk_can_gso(const struct sock *sk)
- {
- return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index a28e09c..36508e6 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -1380,7 +1380,8 @@ peek_stack:
- /* tell verifier to check for equivalent states
- * after every call and jump
- */
-- env->explored_states[t + 1] = STATE_LIST_MARK;
-+ if (t + 1 < insn_cnt)
-+ env->explored_states[t + 1] = STATE_LIST_MARK;
- } else {
- /* conditional jump with two edges */
- ret = push_insn(t, t + 1, FALLTHROUGH, env);
-diff --git a/mm/gup.c b/mm/gup.c
-index 9b2afbf..e29c374 100644
---- a/mm/gup.c
-+++ b/mm/gup.c
-@@ -167,10 +167,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
- if (pud_none(*pud))
- return no_page_table(vma, flags);
- if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
-- if (flags & FOLL_GET)
-- return NULL;
-- page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
-- return page;
-+ page = follow_huge_pud(mm, address, pud, flags);
-+ if (page)
-+ return page;
-+ return no_page_table(vma, flags);
- }
- if (unlikely(pud_bad(*pud)))
- return no_page_table(vma, flags);
-@@ -179,19 +179,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
- if (pmd_none(*pmd))
- return no_page_table(vma, flags);
- if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
-- page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
-- if (flags & FOLL_GET) {
-- /*
-- * Refcount on tail pages are not well-defined and
-- * shouldn't be taken. The caller should handle a NULL
-- * return when trying to follow tail pages.
-- */
-- if (PageHead(page))
-- get_page(page);
-- else
-- page = NULL;
-- }
-- return page;
-+ page = follow_huge_pmd(mm, address, pmd, flags);
-+ if (page)
-+ return page;
-+ return no_page_table(vma, flags);
- }
- if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
- return no_page_table(vma, flags);
-diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 267e419..a2bfd02 100644
---- a/mm/hugetlb.c
-+++ b/mm/hugetlb.c
-@@ -3700,44 +3700,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
- return (pte_t *) pmd;
- }
-
--struct page *
--follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-- pmd_t *pmd, int write)
--{
-- struct page *page;
-+#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-
-- if (!pmd_present(*pmd))
-- return NULL;
-- page = pte_page(*(pte_t *)pmd);
-- if (page)
-- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
-- return page;
-+/*
-+ * These functions are overwritable if your architecture needs its own
-+ * behavior.
-+ */
-+struct page * __weak
-+follow_huge_addr(struct mm_struct *mm, unsigned long address,
-+ int write)
-+{
-+ return ERR_PTR(-EINVAL);
- }
-
--struct page *
--follow_huge_pud(struct mm_struct *mm, unsigned long address,
-- pud_t *pud, int write)
-+struct page * __weak
-+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-+ pmd_t *pmd, int flags)
- {
-- struct page *page;
--
-- page = pte_page(*(pte_t *)pud);
-- if (page)
-- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
-+ struct page *page = NULL;
-+ spinlock_t *ptl;
-+retry:
-+ ptl = pmd_lockptr(mm, pmd);
-+ spin_lock(ptl);
-+ /*
-+ * make sure that the address range covered by this pmd is not
-+ * unmapped from other threads.
-+ */
-+ if (!pmd_huge(*pmd))
-+ goto out;
-+ if (pmd_present(*pmd)) {
-+ page = pte_page(*(pte_t *)pmd) +
-+ ((address & ~PMD_MASK) >> PAGE_SHIFT);
-+ if (flags & FOLL_GET)
-+ get_page(page);
-+ } else {
-+ if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
-+ spin_unlock(ptl);
-+ __migration_entry_wait(mm, (pte_t *)pmd, ptl);
-+ goto retry;
-+ }
-+ /*
-+ * hwpoisoned entry is treated as no_page_table in
-+ * follow_page_mask().
-+ */
-+ }
-+out:
-+ spin_unlock(ptl);
- return page;
- }
-
--#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
--
--/* Can be overriden by architectures */
- struct page * __weak
- follow_huge_pud(struct mm_struct *mm, unsigned long address,
-- pud_t *pud, int write)
-+ pud_t *pud, int flags)
- {
-- BUG();
-- return NULL;
--}
-+ if (flags & FOLL_GET)
-+ return NULL;
-
--#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
-+ return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
-+}
-
- #ifdef CONFIG_MEMORY_FAILURE
-
-diff --git a/mm/migrate.c b/mm/migrate.c
-index 344cdf6..be6d1ed 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -229,7 +229,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
- * get to the page and wait until migration is finished.
- * When we return from this function the fault will be retried.
- */
--static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-+void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl)
- {
- pte_t pte;
-@@ -1268,7 +1268,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
- goto put_and_set;
-
- if (PageHuge(page)) {
-- isolate_huge_page(page, &pagelist);
-+ if (PageHead(page))
-+ isolate_huge_page(page, &pagelist);
- goto put_and_set;
- }
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 4ff46f8..5dd905c 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -2821,7 +2821,9 @@ static void skb_update_prio(struct sk_buff *skb)
- #define skb_update_prio(skb)
- #endif
-
--static DEFINE_PER_CPU(int, xmit_recursion);
-+DEFINE_PER_CPU(int, xmit_recursion);
-+EXPORT_SYMBOL(xmit_recursion);
-+
- #define RECURSION_LIMIT 10
-
- /**
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 62c67be..39c444c 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -4141,18 +4141,20 @@ EXPORT_SYMBOL(skb_try_coalesce);
- */
- void skb_scrub_packet(struct sk_buff *skb, bool xnet)
- {
-- if (xnet)
-- skb_orphan(skb);
- skb->tstamp.tv64 = 0;
- skb->pkt_type = PACKET_HOST;
- skb->skb_iif = 0;
- skb->ignore_df = 0;
- skb_dst_drop(skb);
-- skb->mark = 0;
-- skb_init_secmark(skb);
- secpath_reset(skb);
- nf_reset(skb);
- nf_reset_trace(skb);
-+
-+ if (!xnet)
-+ return;
-+
-+ skb_orphan(skb);
-+ skb->mark = 0;
- }
- EXPORT_SYMBOL_GPL(skb_scrub_packet);
-
-diff --git a/net/core/sock.c b/net/core/sock.c
-index 1c7a33d..a91f99f 100644
---- a/net/core/sock.c
-+++ b/net/core/sock.c
-@@ -651,6 +651,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
- sock_reset_flag(sk, bit);
- }
-
-+bool sk_mc_loop(struct sock *sk)
-+{
-+ if (dev_recursion_level())
-+ return false;
-+ if (!sk)
-+ return true;
-+ switch (sk->sk_family) {
-+ case AF_INET:
-+ return inet_sk(sk)->mc_loop;
-+#if IS_ENABLED(CONFIG_IPV6)
-+ case AF_INET6:
-+ return inet6_sk(sk)->mc_loop;
-+#endif
-+ }
-+ WARN_ON(1);
-+ return true;
-+}
-+EXPORT_SYMBOL(sk_mc_loop);
-+
- /*
- * This is meant for all protocols to use and covers goings on
- * at the socket level. Everything here is generic.
-diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
-index 394a200..69711d8 100644
---- a/net/ipv4/geneve.c
-+++ b/net/ipv4/geneve.c
-@@ -121,10 +121,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- int min_headroom;
- int err;
-
-- skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
-- if (IS_ERR(skb))
-- return PTR_ERR(skb);
--
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
-@@ -139,6 +135,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- if (unlikely(!skb))
- return -ENOMEM;
-
-+ skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
-+ if (IS_ERR(skb))
-+ return PTR_ERR(skb);
-+
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 075ab4d..08ccca6 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -3104,10 +3104,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- if (!first_ackt.v64)
- first_ackt = last_ackt;
-
-- if (!(sacked & TCPCB_SACKED_ACKED))
-+ if (!(sacked & TCPCB_SACKED_ACKED)) {
- reord = min(pkts_acked, reord);
-- if (!after(scb->end_seq, tp->high_seq))
-- flag |= FLAG_ORIG_SACK_ACKED;
-+ if (!after(scb->end_seq, tp->high_seq))
-+ flag |= FLAG_ORIG_SACK_ACKED;
-+ }
- }
-
- if (sacked & TCPCB_SACKED_ACKED)
-diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index d22f544..982347e 100644
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -1516,7 +1516,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
- skb->sk = sk;
- skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
-- struct dst_entry *dst = sk->sk_rx_dst;
-+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
-
- if (dst)
- dst = dst_check(dst, 0);
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 9790f39..9f29453 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -2931,6 +2931,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- }
- #endif
-
-+ /* Do not fool tcpdump (if any), clean our debris */
-+ skb->tstamp.tv64 = 0;
- return skb;
- }
- EXPORT_SYMBOL(tcp_make_synack);
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
-index 3f5aa99..0bf56e5 100644
---- a/net/ipv6/ip6_output.c
-+++ b/net/ipv6/ip6_output.c
-@@ -541,7 +541,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
- {
- struct sk_buff *frag;
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
-- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
-+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
-+ inet6_sk(skb->sk) : NULL;
- struct ipv6hdr *tmp_hdr;
- struct frag_hdr *fh;
- unsigned int mtu, hlen, left, len;
-diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
-index 6828667..d375ce6 100644
---- a/net/ipv6/ndisc.c
-+++ b/net/ipv6/ndisc.c
-@@ -1216,7 +1216,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
- if (rt)
- rt6_set_expires(rt, jiffies + (HZ * lifetime));
- if (ra_msg->icmph.icmp6_hop_limit) {
-- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
-+ /* Only set hop_limit on the interface if it is higher than
-+ * the current hop_limit.
-+ */
-+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
-+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
-+ } else {
-+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
-+ }
- if (rt)
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
- ra_msg->icmph.icmp6_hop_limit);
-diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 9c0b54e..b899793 100644
---- a/net/ipv6/tcp_ipv6.c
-+++ b/net/ipv6/tcp_ipv6.c
-@@ -1409,6 +1409,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
- TCP_SKB_CB(skb)->sacked = 0;
- }
-
-+static void tcp_v6_restore_cb(struct sk_buff *skb)
-+{
-+ /* We need to move header back to the beginning if xfrm6_policy_check()
-+ * and tcp_v6_fill_cb() are going to be called again.
-+ */
-+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-+ sizeof(struct inet6_skb_parm));
-+}
-+
- static int tcp_v6_rcv(struct sk_buff *skb)
- {
- const struct tcphdr *th;
-@@ -1541,6 +1550,7 @@ do_time_wait:
- inet_twsk_deschedule(tw, &tcp_death_row);
- inet_twsk_put(tw);
- sk = sk2;
-+ tcp_v6_restore_cb(skb);
- goto process;
- }
- /* Fall through to ACK */
-@@ -1549,6 +1559,7 @@ do_time_wait:
- tcp_v6_timewait_ack(sk, skb);
- break;
- case TCP_TW_RST:
-+ tcp_v6_restore_cb(skb);
- goto no_tcp_socket;
- case TCP_TW_SUCCESS:
- ;
-@@ -1583,7 +1594,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
- skb->sk = sk;
- skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
-- struct dst_entry *dst = sk->sk_rx_dst;
-+ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
-
- if (dst)
- dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
-diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
-index 2034c6d..296cc24 100644
---- a/net/openvswitch/vport.c
-+++ b/net/openvswitch/vport.c
-@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
- ASSERT_OVSL();
-
- hlist_del_rcu(&vport->hash_node);
--
-- vport->ops->destroy(vport);
--
- module_put(vport->ops->owner);
-+ vport->ops->destroy(vport);
- }
-
- /**
diff --git a/3.19.6/4420_grsecurity-3.1-3.19.6-201505021013.patch b/3.19.6/4420_grsecurity-3.1-3.19.6-201505021013.patch
deleted file mode 100644
index 7e681c9..0000000
--- a/3.19.6/4420_grsecurity-3.1-3.19.6-201505021013.patch
+++ /dev/null
@@ -1,148994 +0,0 @@
-diff --git a/Documentation/dontdiff b/Documentation/dontdiff
-index 9de9813..1462492 100644
---- a/Documentation/dontdiff
-+++ b/Documentation/dontdiff
-@@ -3,9 +3,11 @@
- *.bc
- *.bin
- *.bz2
-+*.c.[012]*.*
- *.cis
- *.cpio
- *.csp
-+*.dbg
- *.dsp
- *.dvi
- *.elf
-@@ -15,6 +17,7 @@
- *.gcov
- *.gen.S
- *.gif
-+*.gmo
- *.grep
- *.grp
- *.gz
-@@ -51,14 +54,17 @@
- *.tab.h
- *.tex
- *.ver
-+*.vim
- *.xml
- *.xz
- *_MODULES
-+*_reg_safe.h
- *_vga16.c
- *~
- \#*#
- *.9
--.*
-+.[^g]*
-+.gen*
- .*.d
- .mm
- 53c700_d.h
-@@ -72,9 +78,11 @@ Image
- Module.markers
- Module.symvers
- PENDING
-+PERF*
- SCCS
- System.map*
- TAGS
-+TRACEEVENT-CFLAGS
- aconf
- af_names.h
- aic7*reg.h*
-@@ -83,6 +91,7 @@ aic7*seq.h*
- aicasm
- aicdb.h*
- altivec*.c
-+ashldi3.S
- asm-offsets.h
- asm_offsets.h
- autoconf.h*
-@@ -95,32 +104,40 @@ bounds.h
- bsetup
- btfixupprep
- build
-+builtin-policy.h
- bvmlinux
- bzImage*
- capability_names.h
- capflags.c
- classlist.h*
-+clut_vga16.c
-+common-cmds.h
- comp*.log
- compile.h*
- conf
- config
- config-*
- config_data.h*
-+config.c
- config.mak
- config.mak.autogen
-+config.tmp
- conmakehash
- consolemap_deftbl.c*
- cpustr.h
- crc32table.h*
- cscope.*
- defkeymap.c
-+devicetable-offsets.h
- devlist.h*
- dnotify_test
- docproc
- dslm
-+dtc-lexer.lex.c
- elf2ecoff
- elfconfig.h*
- evergreen_reg_safe.h
-+exception_policy.conf
- fixdep
- flask.h
- fore200e_mkfirm
-@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
- gconf
- gconf.glade.h
- gen-devlist
-+gen-kdb_cmds.c
- gen_crc32table
- gen_init_cpio
- generated
- genheaders
- genksyms
- *_gray256.c
-+hash
-+hid-example
- hpet_example
- hugepage-mmap
- hugepage-shm
-@@ -148,14 +168,14 @@ int32.c
- int4.c
- int8.c
- kallsyms
--kconfig
-+kern_constants.h
- keywords.c
- ksym.c*
- ksym.h*
- kxgettext
- lex.c
- lex.*.c
--linux
-+lib1funcs.S
- logo_*.c
- logo_*_clut224.c
- logo_*_mono.c
-@@ -165,14 +185,15 @@ mach-types.h
- machtypes.h
- map
- map_hugetlb
--media
- mconf
-+mdp
- miboot*
- mk_elfconfig
- mkboot
- mkbugboot
- mkcpustr
- mkdep
-+mkpiggy
- mkprep
- mkregtable
- mktables
-@@ -188,6 +209,8 @@ oui.c*
- page-types
- parse.c
- parse.h
-+parse-events*
-+pasyms.h
- patches*
- pca200e.bin
- pca200e_ecd.bin2
-@@ -197,6 +220,7 @@ perf-archive
- piggyback
- piggy.gzip
- piggy.S
-+pmu-*
- pnmtologo
- ppc_defs.h*
- pss_boot.h
-@@ -206,7 +230,12 @@ r200_reg_safe.h
- r300_reg_safe.h
- r420_reg_safe.h
- r600_reg_safe.h
-+randomize_layout_hash.h
-+randomize_layout_seed.h
-+realmode.lds
-+realmode.relocs
- recordmcount
-+regdb.c
- relocs
- rlim_names.h
- rn50_reg_safe.h
-@@ -216,8 +245,12 @@ series
- setup
- setup.bin
- setup.elf
-+signing_key*
-+size_overflow_hash.h
- sImage
-+slabinfo
- sm_tbl*
-+sortextable
- split-include
- syscalltab.h
- tables.c
-@@ -227,6 +260,7 @@ tftpboot.img
- timeconst.h
- times.h*
- trix_boot.h
-+user_constants.h
- utsrelease.h*
- vdso-syms.lds
- vdso.lds
-@@ -238,13 +272,17 @@ vdso32.lds
- vdso32.so.dbg
- vdso64.lds
- vdso64.so.dbg
-+vdsox32.lds
-+vdsox32-syms.lds
- version.h*
- vmImage
- vmlinux
- vmlinux-*
- vmlinux.aout
- vmlinux.bin.all
-+vmlinux.bin.bz2
- vmlinux.lds
-+vmlinux.relocs
- vmlinuz
- voffset.h
- vsyscall.lds
-@@ -252,9 +290,12 @@ vsyscall_32.lds
- wanxlfw.inc
- uImage
- unifdef
-+utsrelease.h
- wakeup.bin
- wakeup.elf
- wakeup.lds
-+x509*
- zImage*
- zconf.hash.c
-+zconf.lex.c
- zoffset.h
-diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
-index a311db8..415b28c 100644
---- a/Documentation/kbuild/makefiles.txt
-+++ b/Documentation/kbuild/makefiles.txt
-@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
- === 4 Host Program support
- --- 4.1 Simple Host Program
- --- 4.2 Composite Host Programs
-- --- 4.3 Using C++ for host programs
-- --- 4.4 Controlling compiler options for host programs
-- --- 4.5 When host programs are actually built
-- --- 4.6 Using hostprogs-$(CONFIG_FOO)
-+ --- 4.3 Defining shared libraries
-+ --- 4.4 Using C++ for host programs
-+ --- 4.5 Controlling compiler options for host programs
-+ --- 4.6 When host programs are actually built
-+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
-
- === 5 Kbuild clean infrastructure
-
-@@ -642,7 +643,29 @@ Both possibilities are described in the following.
- Finally, the two .o files are linked to the executable, lxdialog.
- Note: The syntax <executable>-y is not permitted for host-programs.
-
----- 4.3 Using C++ for host programs
-+--- 4.3 Defining shared libraries
-+
-+ Objects with extension .so are considered shared libraries, and
-+ will be compiled as position independent objects.
-+ Kbuild provides support for shared libraries, but the usage
-+ shall be restricted.
-+ In the following example the libkconfig.so shared library is used
-+ to link the executable conf.
-+
-+ Example:
-+ #scripts/kconfig/Makefile
-+ hostprogs-y := conf
-+ conf-objs := conf.o libkconfig.so
-+ libkconfig-objs := expr.o type.o
-+
-+ Shared libraries always require a corresponding -objs line, and
-+ in the example above the shared library libkconfig is composed by
-+ the two objects expr.o and type.o.
-+ expr.o and type.o will be built as position independent code and
-+ linked as a shared library libkconfig.so. C++ is not supported for
-+ shared libraries.
-+
-+--- 4.4 Using C++ for host programs
-
- kbuild offers support for host programs written in C++. This was
- introduced solely to support kconfig, and is not recommended
-@@ -665,7 +688,7 @@ Both possibilities are described in the following.
- qconf-cxxobjs := qconf.o
- qconf-objs := check.o
-
----- 4.4 Controlling compiler options for host programs
-+--- 4.5 Controlling compiler options for host programs
-
- When compiling host programs, it is possible to set specific flags.
- The programs will always be compiled utilising $(HOSTCC) passed
-@@ -693,7 +716,7 @@ Both possibilities are described in the following.
- When linking qconf, it will be passed the extra option
- "-L$(QTDIR)/lib".
-
----- 4.5 When host programs are actually built
-+--- 4.6 When host programs are actually built
-
- Kbuild will only build host-programs when they are referenced
- as a prerequisite.
-@@ -724,7 +747,7 @@ Both possibilities are described in the following.
- This will tell kbuild to build lxdialog even if not referenced in
- any rule.
-
----- 4.6 Using hostprogs-$(CONFIG_FOO)
-+--- 4.7 Using hostprogs-$(CONFIG_FOO)
-
- A typical pattern in a Kbuild file looks like this:
-
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 176d4fe..6eabd3c 100644
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -1191,6 +1191,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
- Default: 1024
-
-+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
-+ ignore grsecurity's /proc restrictions
-+
-+ grsec_sysfs_restrict= Format: 0 | 1
-+ Default: 1
-+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
-+
- hashdist= [KNL,NUMA] Large hashes allocated during boot
- are distributed across NUMA nodes. Defaults on
- for 64-bit NUMA, off otherwise.
-@@ -2283,6 +2290,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- noexec=on: enable non-executable mappings (default)
- noexec=off: disable non-executable mappings
-
-+ nopcid [X86-64]
-+ Disable PCID (Process-Context IDentifier) even if it
-+ is supported by the processor.
-+
- nosmap [X86]
- Disable SMAP (Supervisor Mode Access Prevention)
- even if it is supported by processor.
-@@ -2584,6 +2595,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- the specified number of seconds. This is to be used if
- your oopses keep scrolling off the screen.
-
-+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
-+ virtualization environments that don't cope well with the
-+ expand down segment used by UDEREF on X86-32 or the frequent
-+ page table updates on X86-64.
-+
-+ pax_sanitize_slab=
-+ Format: { 0 | 1 | off | fast | full }
-+ Options '0' and '1' are only provided for backward
-+ compatibility, 'off' or 'fast' should be used instead.
-+ 0|off : disable slab object sanitization
-+ 1|fast: enable slab object sanitization excluding
-+ whitelisted slabs (default)
-+ full : sanitize all slabs, even the whitelisted ones
-+
-+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
-+
-+ pax_extra_latent_entropy
-+ Enable a very simple form of latent entropy extraction
-+ from the first 4GB of memory as the bootmem allocator
-+ passes the memory pages to the buddy allocator.
-+
-+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
-+ when the processor supports PCID.
-+
- pcbit= [HW,ISDN]
-
- pcd. [PARIDE]
-diff --git a/Makefile b/Makefile
-index 65c7c87..abe7f93 100644
---- a/Makefile
-+++ b/Makefile
-@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
- HOSTCC = gcc
- HOSTCXX = g++
- HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
--HOSTCXXFLAGS = -O2
-+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
-+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
-+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
-
- ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
- HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
- # Rules shared between *config targets and build targets
-
- # Basic helpers built in scripts/
--PHONY += scripts_basic
--scripts_basic:
-+PHONY += scripts_basic gcc-plugins
-+scripts_basic: gcc-plugins
- $(Q)$(MAKE) $(build)=scripts/basic
- $(Q)rm -f .tmp_quiet_recordmcount
-
-@@ -622,6 +624,72 @@ endif
- # Tell gcc to never replace conditional load with a non-conditional one
- KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
-
-+ifndef DISABLE_PAX_PLUGINS
-+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
-+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
-+else
-+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
-+endif
-+ifneq ($(PLUGINCC),)
-+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
-+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
-+endif
-+ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
-+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
-+endif
-+ifdef CONFIG_KALLOCSTAT_PLUGIN
-+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
-+endif
-+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
-+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
-+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
-+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
-+endif
-+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
-+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
-+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
-+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
-+endif
-+endif
-+ifdef CONFIG_CHECKER_PLUGIN
-+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
-+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
-+endif
-+endif
-+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
-+ifdef CONFIG_PAX_SIZE_OVERFLOW
-+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
-+endif
-+ifdef CONFIG_PAX_LATENT_ENTROPY
-+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
-+endif
-+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
-+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
-+endif
-+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
-+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
-+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
-+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
-+ifeq ($(KBUILD_EXTMOD),)
-+gcc-plugins:
-+ $(Q)$(MAKE) $(build)=tools/gcc
-+else
-+gcc-plugins: ;
-+endif
-+else
-+gcc-plugins:
-+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
-+else
-+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
-+endif
-+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
-+endif
-+endif
-+
- ifdef CONFIG_READABLE_ASM
- # Disable optimizations that make assembler listings hard to read.
- # reorder blocks reorders the control in the function
-@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
- else
- KBUILD_CFLAGS += -g
- endif
--KBUILD_AFLAGS += -Wa,-gdwarf-2
-+KBUILD_AFLAGS += -Wa,--gdwarf-2
- endif
- ifdef CONFIG_DEBUG_INFO_DWARF4
- KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -879,7 +947,7 @@ export mod_sign_cmd
-
-
- ifeq ($(KBUILD_EXTMOD),)
--core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
-
- vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
- $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -926,6 +994,8 @@ endif
-
- # The actual objects are generated when descending,
- # make sure no implicit rule kicks in
-+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
-
- # Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
- # Error messages still appears in the original language
-
- PHONY += $(vmlinux-dirs)
--$(vmlinux-dirs): prepare scripts
-+$(vmlinux-dirs): gcc-plugins prepare scripts
- $(Q)$(MAKE) $(build)=$@
-
- define filechk_kernel.release
-@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
-
- archprepare: archheaders archscripts prepare1 scripts_basic
-
-+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- prepare0: archprepare FORCE
- $(Q)$(MAKE) $(build)=.
-
- # All the preparing..
-+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
- prepare: prepare0
-
- # Generate some files
-@@ -1095,6 +1168,8 @@ all: modules
- # using awk while concatenating to the final file.
-
- PHONY += modules
-+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
- $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
- @$(kecho) ' Building modules, stage 2.';
-@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
-
- # Target to prepare building external modules
- PHONY += modules_prepare
--modules_prepare: prepare scripts
-+modules_prepare: gcc-plugins prepare scripts
-
- # Target to install modules
- PHONY += modules_install
-@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
- Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
- signing_key.priv signing_key.x509 x509.genkey \
- extra_certificates signing_key.x509.keyid \
-- signing_key.x509.signer
-+ signing_key.x509.signer \
-+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
-+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
-+ tools/gcc/randomize_layout_seed.h
-
- # clean - Delete most, but leave enough to build external modules
- #
-@@ -1215,7 +1293,7 @@ distclean: mrproper
- @find $(srctree) $(RCS_FIND_IGNORE) \
- \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
- -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
-+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
- -type f -print | xargs rm -f
-
-
-@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
- $(module-dirs): crmodverdir $(objtree)/Module.symvers
- $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
-
-+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
- modules: $(module-dirs)
- @$(kecho) ' Building modules, stage 2.';
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1521,17 +1601,21 @@ else
- target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
- endif
-
--%.s: %.c prepare scripts FORCE
-+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.s: %.c gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.i: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.o: %.c prepare scripts FORCE
-+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.o: %.c gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.lst: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.s: %.S prepare scripts FORCE
-+%.s: %.S gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
--%.o: %.S prepare scripts FORCE
-+%.o: %.S gcc-plugins prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
- %.symtypes: %.c prepare scripts FORCE
- $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1543,11 +1627,15 @@ endif
- $(build)=$(build-dir)
- # Make sure the latest headers are built for Documentation
- Documentation/: headers_install
--%/: prepare scripts FORCE
-+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%/: gcc-plugins prepare scripts FORCE
- $(cmd_crmodverdir)
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir)
--%.ko: prepare scripts FORCE
-+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
-+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
-+%.ko: gcc-plugins prepare scripts FORCE
- $(cmd_crmodverdir)
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
- $(build)=$(build-dir) $(@:.ko=.o)
-diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
-index 8f8eafb..3405f46 100644
---- a/arch/alpha/include/asm/atomic.h
-+++ b/arch/alpha/include/asm/atomic.h
-@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
- #define atomic_dec(v) atomic_sub(1,(v))
- #define atomic64_dec(v) atomic64_sub(1,(v))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* _ALPHA_ATOMIC_H */
-diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
-index ad368a9..fbe0f25 100644
---- a/arch/alpha/include/asm/cache.h
-+++ b/arch/alpha/include/asm/cache.h
-@@ -4,19 +4,19 @@
- #ifndef __ARCH_ALPHA_CACHE_H
- #define __ARCH_ALPHA_CACHE_H
-
-+#include <linux/const.h>
-
- /* Bytes per L1 (data) cache line. */
- #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
--# define L1_CACHE_BYTES 64
- # define L1_CACHE_SHIFT 6
- #else
- /* Both EV4 and EV5 are write-through, read-allocate,
- direct-mapped, physical.
- */
--# define L1_CACHE_BYTES 32
- # define L1_CACHE_SHIFT 5
- #endif
-
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
- #endif
-diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
-index 968d999..d36b2df 100644
---- a/arch/alpha/include/asm/elf.h
-+++ b/arch/alpha/include/asm/elf.h
-@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
-+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
-+#endif
-+
- /* $0 is set by ld.so to a pointer to a function which might be
- registered using atexit. This provides a mean for the dynamic
- linker to call DT_FINI functions for shared libraries that have
-diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
-index aab14a0..b4fa3e7 100644
---- a/arch/alpha/include/asm/pgalloc.h
-+++ b/arch/alpha/include/asm/pgalloc.h
-@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
- pgd_set(pgd, pmd);
- }
-
-+static inline void
-+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
-+{
-+ pgd_populate(mm, pgd, pmd);
-+}
-+
- extern pgd_t *pgd_alloc(struct mm_struct *mm);
-
- static inline void
-diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
-index d8f9b7e..f6222fa 100644
---- a/arch/alpha/include/asm/pgtable.h
-+++ b/arch/alpha/include/asm/pgtable.h
-@@ -102,6 +102,17 @@ struct vm_area_struct;
- #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
- #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
- #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
-+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
-
- #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
-diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
-index 2fd00b7..cfd5069 100644
---- a/arch/alpha/kernel/module.c
-+++ b/arch/alpha/kernel/module.c
-@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
-
- /* The small sections were sorted to the end of the segment.
- The following should definitely cover them. */
-- gp = (u64)me->module_core + me->core_size - 0x8000;
-+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
- got = sechdrs[me->arch.gotsecindex].sh_addr;
-
- for (i = 0; i < n; i++) {
-diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index e51f578..16c64a3 100644
---- a/arch/alpha/kernel/osf_sys.c
-+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
- generic version except that we know how to honor ADDR_LIMIT_32BIT. */
-
- static unsigned long
--arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
-- unsigned long limit)
-+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
-+ unsigned long limit, unsigned long flags)
- {
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- info.flags = 0;
- info.length = len;
-@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
- info.high_limit = limit;
- info.align_mask = 0;
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- return vm_unmapped_area(&info);
- }
-
-@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- merely specific addresses, but regions of memory -- perhaps
- this feature should be incorporated into all ports? */
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
-- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
- if (addr != (unsigned long) -ENOMEM)
- return addr;
- }
-
- /* Next, try allocating at TASK_UNMAPPED_BASE. */
-- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
-- len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
-+
- if (addr != (unsigned long) -ENOMEM)
- return addr;
-
- /* Finally, try allocating in low memory. */
-- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
-+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
-
- return addr;
- }
-diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
-index 9d0ac09..479a962 100644
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
- __reload_thread(pcb);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int ldah, ldq, jmp;
-+
-+ err = get_user(ldah, (unsigned int *)regs->pc);
-+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
-+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
-+ jmp == 0x6BFB0000U)
-+ {
-+ unsigned long r27, addr;
-+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
-+
-+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+ err = get_user(r27, (unsigned long *)addr);
-+ if (err)
-+ break;
-+
-+ regs->r27 = r27;
-+ regs->pc = r27;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ldah, lda, br;
-+
-+ err = get_user(ldah, (unsigned int *)regs->pc);
-+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
-+ err |= get_user(br, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
-+ (br & 0xFFE00000U) == 0xC3E00000U)
-+ {
-+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
-+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
-+
-+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation */
-+ unsigned int br;
-+
-+ err = get_user(br, (unsigned int *)regs->pc);
-+
-+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
-+ unsigned int br2, ldq, nop, jmp;
-+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
-+
-+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+ err = get_user(br2, (unsigned int *)addr);
-+ err |= get_user(ldq, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ err |= get_user(jmp, (unsigned int *)(addr+12));
-+ err |= get_user(resolver, (unsigned long *)(addr+16));
-+
-+ if (err)
-+ break;
-+
-+ if (br2 == 0xC3600000U &&
-+ ldq == 0xA77B000CU &&
-+ nop == 0x47FF041FU &&
-+ jmp == 0x6B7B0000U)
-+ {
-+ regs->r28 = regs->pc+4;
-+ regs->r27 = addr+16;
-+ regs->pc = resolver;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-
- /*
- * This routine handles page faults. It determines the address,
-@@ -133,8 +251,29 @@ retry:
- good_area:
- si_code = SEGV_ACCERR;
- if (cause < 0) {
-- if (!(vma->vm_flags & VM_EXEC))
-+ if (!(vma->vm_flags & VM_EXEC)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
-+ goto bad_area;
-+
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
-+ do_group_exit(SIGKILL);
-+#else
- goto bad_area;
-+#endif
-+
-+ }
- } else if (!cause) {
- /* Allow reads even for write-only mappings */
- if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 97d07ed..2931f2b 100644
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
-
- config UACCESS_WITH_MEMCPY
- bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
-- depends on MMU
-+ depends on MMU && !PAX_MEMORY_UDEREF
- default y if CPU_FEROCEON
- help
- Implement faster copy_to_user and clear_user methods for CPU
-@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
- config KEXEC
- bool "Kexec system call (EXPERIMENTAL)"
- depends on (!SMP || PM_SLEEP_SMP)
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index e22c119..abe7041 100644
---- a/arch/arm/include/asm/atomic.h
-+++ b/arch/arm/include/asm/atomic.h
-@@ -18,17 +18,41 @@
- #include <asm/barrier.h>
- #include <asm/cmpxchg.h>
-
-+#ifdef CONFIG_GENERIC_ATOMIC64
-+#include <asm-generic/atomic64.h>
-+#endif
-+
- #define ATOMIC_INIT(i) { (i) }
-
- #ifdef __KERNEL__
-
-+#ifdef CONFIG_THUMB2_KERNEL
-+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
-+#else
-+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
-+#endif
-+
-+#define _ASM_EXTABLE(from, to) \
-+" .pushsection __ex_table,\"a\"\n"\
-+" .align 3\n" \
-+" .long " #from ", " #to"\n" \
-+" .popsection"
-+
- /*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
- #define atomic_read(v) ACCESS_ONCE((v)->counter)
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
- #define atomic_set(v,i) (((v)->counter) = (i))
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
-
- #if __LINUX_ARM_ARCH__ >= 6
-
-@@ -38,26 +62,50 @@
- * to ensure that the update happens.
- */
-
--#define ATOMIC_OP(op, c_op, asm_op) \
--static inline void atomic_##op(int i, atomic_t *v) \
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define __OVERFLOW_POST \
-+ " bvc 3f\n" \
-+ "2: " REFCOUNT_TRAP_INSN "\n"\
-+ "3:\n"
-+#define __OVERFLOW_POST_RETURN \
-+ " bvc 3f\n" \
-+" mov %0, %1\n" \
-+ "2: " REFCOUNT_TRAP_INSN "\n"\
-+ "3:\n"
-+#define __OVERFLOW_EXTABLE \
-+ "4:\n" \
-+ _ASM_EXTABLE(2b, 4b)
-+#else
-+#define __OVERFLOW_POST
-+#define __OVERFLOW_POST_RETURN
-+#define __OVERFLOW_EXTABLE
-+#endif
-+
-+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
-+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
- { \
- unsigned long tmp; \
- int result; \
- \
- prefetchw(&v->counter); \
-- __asm__ __volatile__("@ atomic_" #op "\n" \
-+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
- "1: ldrex %0, [%3]\n" \
- " " #asm_op " %0, %0, %4\n" \
-+ post_op \
- " strex %1, %0, [%3]\n" \
- " teq %1, #0\n" \
--" bne 1b" \
-+" bne 1b\n" \
-+ extable \
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
- : "r" (&v->counter), "Ir" (i) \
- : "cc"); \
- } \
-
--#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
--static inline int atomic_##op##_return(int i, atomic_t *v) \
-+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
-+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
-+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
- { \
- unsigned long tmp; \
- int result; \
-@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
- smp_mb(); \
- prefetchw(&v->counter); \
- \
-- __asm__ __volatile__("@ atomic_" #op "_return\n" \
-+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
- "1: ldrex %0, [%3]\n" \
- " " #asm_op " %0, %0, %4\n" \
-+ post_op \
- " strex %1, %0, [%3]\n" \
- " teq %1, #0\n" \
--" bne 1b" \
-+" bne 1b\n" \
-+ extable \
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
- : "r" (&v->counter), "Ir" (i) \
- : "cc"); \
-@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
- return result; \
- }
-
-+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
-+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
-+
- static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
- {
- int oldval;
-@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- __asm__ __volatile__ ("@ atomic_add_unless\n"
- "1: ldrex %0, [%4]\n"
- " teq %0, %5\n"
--" beq 2f\n"
--" add %1, %0, %6\n"
-+" beq 4f\n"
-+" adds %1, %0, %6\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: " REFCOUNT_TRAP_INSN "\n"
-+"3:\n"
-+#endif
-+
- " strex %2, %1, [%4]\n"
- " teq %2, #0\n"
- " bne 1b\n"
--"2:"
-+"4:"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (u), "r" (a)
- : "cc");
-@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- return oldval;
- }
-
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
-+{
-+ unsigned long oldval, res;
-+
-+ smp_mb();
-+
-+ do {
-+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
-+ "ldrex %1, [%3]\n"
-+ "mov %0, #0\n"
-+ "teq %1, %4\n"
-+ "strexeq %0, %5, [%3]\n"
-+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
-+ : "cc");
-+ } while (res);
-+
-+ smp_mb();
-+
-+ return oldval;
-+}
-+
- #else /* ARM_ARCH_6 */
-
- #ifdef CONFIG_SMP
- #error SMP not supported on pre-ARMv6 CPUs
- #endif
-
--#define ATOMIC_OP(op, c_op, asm_op) \
--static inline void atomic_##op(int i, atomic_t *v) \
-+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
-+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
- { \
- unsigned long flags; \
- \
-@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
- raw_local_irq_restore(flags); \
- } \
-
--#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
--static inline int atomic_##op##_return(int i, atomic_t *v) \
-+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
-+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
-+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
- { \
- unsigned long flags; \
- int val; \
-@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
- return val; \
- }
-
-+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
-+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
-+
- static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
- {
- int ret;
-@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
- return ret;
- }
-
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
-+{
-+ return atomic_cmpxchg((atomic_t *)v, old, new);
-+}
-+
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
- int c, old;
-@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
-
- #undef ATOMIC_OPS
- #undef ATOMIC_OP_RETURN
-+#undef __ATOMIC_OP_RETURN
- #undef ATOMIC_OP
-+#undef __ATOMIC_OP
-
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- #define atomic_inc(v) atomic_add(1, v)
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
-+}
- #define atomic_dec(v) atomic_sub(1, v)
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_sub_unchecked(1, v);
-+}
-
- #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v) == 0;
-+}
- #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
- #define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
- #define atomic_dec_return(v) (atomic_sub_return(1, v))
- #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-@@ -216,6 +336,14 @@ typedef struct {
- long long counter;
- } atomic64_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ long long counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
-+
- #define ATOMIC64_INIT(i) { (i) }
-
- #ifdef CONFIG_ARM_LPAE
-@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
- return result;
- }
-
-+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ long long result;
-+
-+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
-+" ldrd %0, %H0, [%1]"
-+ : "=&r" (result)
-+ : "r" (&v->counter), "Qo" (v->counter)
-+ );
-+
-+ return result;
-+}
-+
- static inline void atomic64_set(atomic64_t *v, long long i)
- {
- __asm__ __volatile__("@ atomic64_set\n"
-@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
- : "r" (&v->counter), "r" (i)
- );
- }
-+
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
-+{
-+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
-+" strd %2, %H2, [%1]"
-+ : "=Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ );
-+}
- #else
- static inline long long atomic64_read(const atomic64_t *v)
- {
-@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
- return result;
- }
-
-+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ long long result;
-+
-+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
-+" ldrexd %0, %H0, [%1]"
-+ : "=&r" (result)
-+ : "r" (&v->counter), "Qo" (v->counter)
-+ );
-+
-+ return result;
-+}
-+
- static inline void atomic64_set(atomic64_t *v, long long i)
- {
- long long tmp;
-@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
- : "r" (&v->counter), "r" (i)
- : "cc");
- }
-+
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
-+{
-+ long long tmp;
-+
-+ prefetchw(&v->counter);
-+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
-+"1: ldrexd %0, %H0, [%2]\n"
-+" strexd %0, %3, %H3, [%2]\n"
-+" teq %0, #0\n"
-+" bne 1b"
-+ : "=&r" (tmp), "=Qo" (v->counter)
-+ : "r" (&v->counter), "r" (i)
-+ : "cc");
-+}
- #endif
-
--#define ATOMIC64_OP(op, op1, op2) \
--static inline void atomic64_##op(long long i, atomic64_t *v) \
-+#undef __OVERFLOW_POST_RETURN
-+#define __OVERFLOW_POST_RETURN \
-+ " bvc 3f\n" \
-+" mov %0, %1\n" \
-+" mov %H0, %H1\n" \
-+ "2: " REFCOUNT_TRAP_INSN "\n"\
-+ "3:\n"
-+
-+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
-+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
- { \
- long long result; \
- unsigned long tmp; \
- \
- prefetchw(&v->counter); \
-- __asm__ __volatile__("@ atomic64_" #op "\n" \
-+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
- "1: ldrexd %0, %H0, [%3]\n" \
- " " #op1 " %Q0, %Q0, %Q4\n" \
- " " #op2 " %R0, %R0, %R4\n" \
-+ post_op \
- " strexd %1, %0, %H0, [%3]\n" \
- " teq %1, #0\n" \
--" bne 1b" \
-+" bne 1b\n" \
-+ extable \
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
- : "r" (&v->counter), "r" (i) \
- : "cc"); \
- } \
-
--#define ATOMIC64_OP_RETURN(op, op1, op2) \
--static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
-+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
-+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
-+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
- { \
- long long result; \
- unsigned long tmp; \
-@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
- smp_mb(); \
- prefetchw(&v->counter); \
- \
-- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
-+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
- "1: ldrexd %0, %H0, [%3]\n" \
- " " #op1 " %Q0, %Q0, %Q4\n" \
- " " #op2 " %R0, %R0, %R4\n" \
-+ post_op \
- " strexd %1, %0, %H0, [%3]\n" \
- " teq %1, #0\n" \
--" bne 1b" \
-+" bne 1b\n" \
-+ extable \
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
- : "r" (&v->counter), "r" (i) \
- : "cc"); \
-@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
- return result; \
- }
-
-+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
-+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
-+
- #define ATOMIC64_OPS(op, op1, op2) \
- ATOMIC64_OP(op, op1, op2) \
- ATOMIC64_OP_RETURN(op, op1, op2)
-@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
-
- #undef ATOMIC64_OPS
- #undef ATOMIC64_OP_RETURN
-+#undef __ATOMIC64_OP_RETURN
- #undef ATOMIC64_OP
-+#undef __ATOMIC64_OP
-+#undef __OVERFLOW_EXTABLE
-+#undef __OVERFLOW_POST_RETURN
-+#undef __OVERFLOW_POST
-
- static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- long long new)
-@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- return oldval;
- }
-
-+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
-+ long long new)
-+{
-+ long long oldval;
-+ unsigned long res;
-+
-+ smp_mb();
-+
-+ do {
-+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
-+ "ldrexd %1, %H1, [%3]\n"
-+ "mov %0, #0\n"
-+ "teq %1, %4\n"
-+ "teqeq %H1, %H4\n"
-+ "strexdeq %0, %5, %H5, [%3]"
-+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-+ : "r" (&ptr->counter), "r" (old), "r" (new)
-+ : "cc");
-+ } while (res);
-+
-+ smp_mb();
-+
-+ return oldval;
-+}
-+
- static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
- {
- long long result;
-@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
- static inline long long atomic64_dec_if_positive(atomic64_t *v)
- {
- long long result;
-- unsigned long tmp;
-+ u64 tmp;
-
- smp_mb();
- prefetchw(&v->counter);
-
- __asm__ __volatile__("@ atomic64_dec_if_positive\n"
--"1: ldrexd %0, %H0, [%3]\n"
--" subs %Q0, %Q0, #1\n"
--" sbc %R0, %R0, #0\n"
-+"1: ldrexd %1, %H1, [%3]\n"
-+" subs %Q0, %Q1, #1\n"
-+" sbcs %R0, %R1, #0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+" mov %Q0, %Q1\n"
-+" mov %R0, %R1\n"
-+"2: " REFCOUNT_TRAP_INSN "\n"
-+"3:\n"
-+#endif
-+
- " teq %R0, #0\n"
--" bmi 2f\n"
-+" bmi 4f\n"
- " strexd %1, %0, %H0, [%3]\n"
- " teq %1, #0\n"
- " bne 1b\n"
--"2:"
-+"4:\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter)
- : "cc");
-@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
- " teq %0, %5\n"
- " teqeq %H0, %H5\n"
- " moveq %1, #0\n"
--" beq 2f\n"
-+" beq 4f\n"
- " adds %Q0, %Q0, %Q6\n"
--" adc %R0, %R0, %R6\n"
-+" adcs %R0, %R0, %R6\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" bvc 3f\n"
-+"2: " REFCOUNT_TRAP_INSN "\n"
-+"3:\n"
-+#endif
-+
- " strexd %2, %0, %H0, [%4]\n"
- " teq %2, #0\n"
- " bne 1b\n"
--"2:"
-+"4:\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (u), "r" (a)
- : "cc");
-@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
-
- #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
- #define atomic64_inc(v) atomic64_add(1LL, (v))
-+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
- #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
- #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
- #define atomic64_dec(v) atomic64_sub(1LL, (v))
-+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
- #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
- #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
-index d2f81e6..3c4dba5 100644
---- a/arch/arm/include/asm/barrier.h
-+++ b/arch/arm/include/asm/barrier.h
-@@ -67,7 +67,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
-index 75fe66b..ba3dee4 100644
---- a/arch/arm/include/asm/cache.h
-+++ b/arch/arm/include/asm/cache.h
-@@ -4,8 +4,10 @@
- #ifndef __ASMARM_CACHE_H
- #define __ASMARM_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-@@ -24,5 +26,6 @@
- #endif
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-+#define __read_only __attribute__ ((__section__(".data..read_only")))
-
- #endif
-diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index 2d46862..a35415b 100644
---- a/arch/arm/include/asm/cacheflush.h
-+++ b/arch/arm/include/asm/cacheflush.h
-@@ -116,7 +116,7 @@ struct cpu_cache_fns {
- void (*dma_unmap_area)(const void *, size_t, int);
-
- void (*dma_flush_range)(const void *, const void *);
--};
-+} __no_const;
-
- /*
- * Select the calling method
-diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
-index 5233151..87a71fa 100644
---- a/arch/arm/include/asm/checksum.h
-+++ b/arch/arm/include/asm/checksum.h
-@@ -37,7 +37,19 @@ __wsum
- csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
-
- __wsum
--csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
-+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
-+
-+static inline __wsum
-+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
-+{
-+ __wsum ret;
-+ pax_open_userland();
-+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
-+ pax_close_userland();
-+ return ret;
-+}
-+
-+
-
- /*
- * Fold a partial checksum without adding pseudo headers
-diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
-index abb2c37..96db950 100644
---- a/arch/arm/include/asm/cmpxchg.h
-+++ b/arch/arm/include/asm/cmpxchg.h
-@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
-
- #define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-+#define xchg_unchecked(ptr,x) \
-+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
- #include <asm-generic/cmpxchg-local.h>
-
-diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
-index 6ddbe44..b5e38b1a 100644
---- a/arch/arm/include/asm/domain.h
-+++ b/arch/arm/include/asm/domain.h
-@@ -48,18 +48,37 @@
- * Domain types
- */
- #define DOMAIN_NOACCESS 0
--#define DOMAIN_CLIENT 1
- #ifdef CONFIG_CPU_USE_DOMAINS
-+#define DOMAIN_USERCLIENT 1
-+#define DOMAIN_KERNELCLIENT 1
- #define DOMAIN_MANAGER 3
-+#define DOMAIN_VECTORS DOMAIN_USER
- #else
-+
-+#ifdef CONFIG_PAX_KERNEXEC
- #define DOMAIN_MANAGER 1
-+#define DOMAIN_KERNEXEC 3
-+#else
-+#define DOMAIN_MANAGER 1
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+#define DOMAIN_USERCLIENT 0
-+#define DOMAIN_UDEREF 1
-+#define DOMAIN_VECTORS DOMAIN_KERNEL
-+#else
-+#define DOMAIN_USERCLIENT 1
-+#define DOMAIN_VECTORS DOMAIN_USER
-+#endif
-+#define DOMAIN_KERNELCLIENT 1
-+
- #endif
-
- #define domain_val(dom,type) ((type) << (2*(dom)))
-
- #ifndef __ASSEMBLY__
-
--#ifdef CONFIG_CPU_USE_DOMAINS
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
- static inline void set_domain(unsigned val)
- {
- asm volatile(
-@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
- isb();
- }
-
--#define modify_domain(dom,type) \
-- do { \
-- struct thread_info *thread = current_thread_info(); \
-- unsigned int domain = thread->cpu_domain; \
-- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
-- thread->cpu_domain = domain | domain_val(dom, type); \
-- set_domain(thread->cpu_domain); \
-- } while (0)
--
-+extern void modify_domain(unsigned int dom, unsigned int type);
- #else
- static inline void set_domain(unsigned val) { }
- static inline void modify_domain(unsigned dom, unsigned type) { }
-diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index afb9caf..9a0bac0 100644
---- a/arch/arm/include/asm/elf.h
-+++ b/arch/arm/include/asm/elf.h
-@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
-+
-+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#endif
-
- /* When the program starts, a1 contains a pointer to a function to be
- registered with atexit, as per the SVR4 ABI. A value of 0 means we
-@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- extern void elf_set_personality(const struct elf32_hdr *);
- #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- #ifdef CONFIG_MMU
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- struct linux_binprm;
-diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
-index de53547..52b9a28 100644
---- a/arch/arm/include/asm/fncpy.h
-+++ b/arch/arm/include/asm/fncpy.h
-@@ -81,7 +81,9 @@
- BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
- (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
- \
-+ pax_open_kernel(); \
- memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
-+ pax_close_kernel(); \
- flush_icache_range((unsigned long)(dest_buf), \
- (unsigned long)(dest_buf) + (size)); \
- \
-diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index 53e69da..3fdc896 100644
---- a/arch/arm/include/asm/futex.h
-+++ b/arch/arm/include/asm/futex.h
-@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-+ pax_open_userland();
-+
- smp_mb();
- /* Prefetching cannot fault */
- prefetchw(uaddr);
-@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- : "cc", "memory");
- smp_mb();
-
-+ pax_close_userland();
-+
- *uval = val;
- return ret;
- }
-@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-+ pax_open_userland();
-+
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " TUSER(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
-@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
- : "cc", "memory");
-
-+ pax_close_userland();
-+
- *uval = val;
- return ret;
- }
-@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- return -EFAULT;
-
- pagefault_disable(); /* implies preempt_disable() */
-+ pax_open_userland();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- ret = -ENOSYS;
- }
-
-+ pax_close_userland();
- pagefault_enable(); /* subsumes preempt_enable() */
-
- if (!ret) {
-diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
-index 83eb2f7..ed77159 100644
---- a/arch/arm/include/asm/kmap_types.h
-+++ b/arch/arm/include/asm/kmap_types.h
-@@ -4,6 +4,6 @@
- /*
- * This is the "bare minimum". AIO seems to require this.
- */
--#define KM_TYPE_NR 16
-+#define KM_TYPE_NR 17
-
- #endif
-diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
-index 9e614a1..3302cca 100644
---- a/arch/arm/include/asm/mach/dma.h
-+++ b/arch/arm/include/asm/mach/dma.h
-@@ -22,7 +22,7 @@ struct dma_ops {
- int (*residue)(unsigned int, dma_t *); /* optional */
- int (*setspeed)(unsigned int, dma_t *, int); /* optional */
- const char *type;
--};
-+} __do_const;
-
- struct dma_struct {
- void *addr; /* single DMA address */
-diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
-index f98c7f3..e5c626d 100644
---- a/arch/arm/include/asm/mach/map.h
-+++ b/arch/arm/include/asm/mach/map.h
-@@ -23,17 +23,19 @@ struct map_desc {
-
- /* types 0-3 are defined in asm/io.h */
- enum {
-- MT_UNCACHED = 4,
-- MT_CACHECLEAN,
-- MT_MINICLEAN,
-+ MT_UNCACHED_RW = 4,
-+ MT_CACHECLEAN_RO,
-+ MT_MINICLEAN_RO,
- MT_LOW_VECTORS,
- MT_HIGH_VECTORS,
-- MT_MEMORY_RWX,
-+ __MT_MEMORY_RWX,
- MT_MEMORY_RW,
-- MT_ROM,
-- MT_MEMORY_RWX_NONCACHED,
-+ MT_MEMORY_RX,
-+ MT_ROM_RX,
-+ MT_MEMORY_RW_NONCACHED,
-+ MT_MEMORY_RX_NONCACHED,
- MT_MEMORY_RW_DTCM,
-- MT_MEMORY_RWX_ITCM,
-+ MT_MEMORY_RX_ITCM,
- MT_MEMORY_RW_SO,
- MT_MEMORY_DMA_READY,
- };
-diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
-index 891a56b..48f337e 100644
---- a/arch/arm/include/asm/outercache.h
-+++ b/arch/arm/include/asm/outercache.h
-@@ -36,7 +36,7 @@ struct outer_cache_fns {
-
- /* This is an ARM L2C thing */
- void (*write_sec)(unsigned long, unsigned);
--};
-+} __no_const;
-
- extern struct outer_cache_fns outer_cache;
-
-diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
-index 4355f0e..cd9168e 100644
---- a/arch/arm/include/asm/page.h
-+++ b/arch/arm/include/asm/page.h
-@@ -23,6 +23,7 @@
-
- #else
-
-+#include <linux/compiler.h>
- #include <asm/glue.h>
-
- /*
-@@ -114,7 +115,7 @@ struct cpu_user_fns {
- void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
- void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma);
--};
-+} __no_const;
-
- #ifdef MULTI_USER
- extern struct cpu_user_fns cpu_user;
-diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
-index 19cfab5..3f5c7e9 100644
---- a/arch/arm/include/asm/pgalloc.h
-+++ b/arch/arm/include/asm/pgalloc.h
-@@ -17,6 +17,7 @@
- #include <asm/processor.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
-+#include <asm/system_info.h>
-
- #define check_pgt_cache() do { } while (0)
-
-@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
- }
-
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
-+
- #else /* !CONFIG_ARM_LPAE */
-
- /*
-@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
- #define pmd_free(mm, pmd) do { } while (0)
- #define pud_populate(mm,pmd,pte) BUG()
-+#define pud_populate_kernel(mm,pmd,pte) BUG()
-
- #endif /* CONFIG_ARM_LPAE */
-
-@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
- __free_page(pte);
- }
-
-+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
-+{
-+#ifdef CONFIG_ARM_LPAE
-+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
-+#else
-+ if (addr & SECTION_SIZE)
-+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
-+ else
-+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
-+#endif
-+ flush_pmd_entry(pmdp);
-+}
-+
- static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
- pmdval_t prot)
- {
-diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
-index 5e68278..1869bae 100644
---- a/arch/arm/include/asm/pgtable-2level-hwdef.h
-+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
-@@ -27,7 +27,7 @@
- /*
- * - section
- */
--#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
-+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
- #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
- #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
- #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
-@@ -39,6 +39,7 @@
- #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
- #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
- #define PMD_SECT_AF (_AT(pmdval_t, 0))
-+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
-
- #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
- #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
-@@ -68,6 +69,7 @@
- * - extended small page/tiny page
- */
- #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
-+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
- #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
- #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
- #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
-diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
-index f027941..f36ce30 100644
---- a/arch/arm/include/asm/pgtable-2level.h
-+++ b/arch/arm/include/asm/pgtable-2level.h
-@@ -126,6 +126,9 @@
- #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
- #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
-
-+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
-+#define L_PTE_PXN (_AT(pteval_t, 0))
-+
- /*
- * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
-diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
-index a31ecdad..95e98d4 100644
---- a/arch/arm/include/asm/pgtable-3level.h
-+++ b/arch/arm/include/asm/pgtable-3level.h
-@@ -81,6 +81,7 @@
- #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
- #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
- #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
-+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
- #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
- #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
- #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-@@ -92,10 +93,12 @@
- #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
- #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
- #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
-+#define PMD_SECT_RDONLY PMD_SECT_AP2
-
- /*
- * To be used in assembly code with the upper page attributes.
- */
-+#define L_PTE_PXN_HIGH (1 << (53 - 32))
- #define L_PTE_XN_HIGH (1 << (54 - 32))
- #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
-
-diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index d5cac54..906ea3e 100644
---- a/arch/arm/include/asm/pgtable.h
-+++ b/arch/arm/include/asm/pgtable.h
-@@ -33,6 +33,9 @@
- #include <asm/pgtable-2level.h>
- #endif
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- /*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
-@@ -48,6 +51,9 @@
- #define LIBRARY_TEXT_START 0x0c000000
-
- #ifndef __ASSEMBLY__
-+extern pteval_t __supported_pte_mask;
-+extern pmdval_t __supported_pmd_mask;
-+
- extern void __pte_error(const char *file, int line, pte_t);
- extern void __pmd_error(const char *file, int line, pmd_t);
- extern void __pgd_error(const char *file, int line, pgd_t);
-@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
- #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
- #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
-
-+#define __HAVE_ARCH_PAX_OPEN_KERNEL
-+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
-+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+#include <asm/domain.h>
-+#include <linux/thread_info.h>
-+#include <linux/preempt.h>
-+
-+static inline int test_domain(int domain, int domaintype)
-+{
-+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+static inline unsigned long pax_open_kernel(void) {
-+#ifdef CONFIG_ARM_LPAE
-+ /* TODO */
-+#else
-+ preempt_disable();
-+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
-+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
-+#endif
-+ return 0;
-+}
-+
-+static inline unsigned long pax_close_kernel(void) {
-+#ifdef CONFIG_ARM_LPAE
-+ /* TODO */
-+#else
-+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
-+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
-+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
-+ preempt_enable_no_resched();
-+#endif
-+ return 0;
-+}
-+#else
-+static inline unsigned long pax_open_kernel(void) { return 0; }
-+static inline unsigned long pax_close_kernel(void) { return 0; }
-+#endif
-+
- /*
- * This is the lowest virtual address we can permit any user space
- * mapping to be mapped at. This is particularly important for
-@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
- /*
- * The pgprot_* and protection_map entries will be fixed up in runtime
- * to include the cachable and bufferable bits based on memory policy,
-- * as well as any architecture dependent bits like global/ASID and SMP
-- * shared mapping bits.
-+ * as well as any architecture dependent bits like global/ASID, PXN,
-+ * and SMP shared mapping bits.
- */
- #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
-
-@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- {
- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
-- L_PTE_NONE | L_PTE_VALID;
-+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
- pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
- return pte;
- }
-diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
-index c25ef3e..735f14b 100644
---- a/arch/arm/include/asm/psci.h
-+++ b/arch/arm/include/asm/psci.h
-@@ -32,7 +32,7 @@ struct psci_operations {
- int (*affinity_info)(unsigned long target_affinity,
- unsigned long lowest_affinity_level);
- int (*migrate_info_type)(void);
--};
-+} __no_const;
-
- extern struct psci_operations psci_ops;
- extern struct smp_operations psci_smp_ops;
-diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
-index 18f5a55..5072a40 100644
---- a/arch/arm/include/asm/smp.h
-+++ b/arch/arm/include/asm/smp.h
-@@ -107,7 +107,7 @@ struct smp_operations {
- int (*cpu_disable)(unsigned int cpu);
- #endif
- #endif
--};
-+} __no_const;
-
- struct of_cpu_method {
- const char *method;
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index d890e41..3921292 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -78,9 +78,9 @@ struct thread_info {
- .flags = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
-- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
-+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
-+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
-+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
-@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- #define TIF_SYSCALL_AUDIT 9
- #define TIF_SYSCALL_TRACEPOINT 10
- #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
--#define TIF_NOHZ 12 /* in adaptive nohz mode */
-+/* within 8 bits of TIF_SYSCALL_TRACE
-+ * to meet flexible second operand requirements
-+ */
-+#define TIF_GRSEC_SETXID 12
-+#define TIF_NOHZ 13 /* in adaptive nohz mode */
- #define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
- #define TIF_RESTORE_SIGMASK 20
-@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
- #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
-
- /* Checks for any syscall work in entry-common.S */
- #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
-
- /*
- * Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
-index 5f833f7..76e6644 100644
---- a/arch/arm/include/asm/tls.h
-+++ b/arch/arm/include/asm/tls.h
-@@ -3,6 +3,7 @@
-
- #include <linux/compiler.h>
- #include <asm/thread_info.h>
-+#include <asm/pgtable.h>
-
- #ifdef __ASSEMBLY__
- #include <asm/asm-offsets.h>
-@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
- * at 0xffff0fe0 must be used instead. (see
- * entry-armv.S for details)
- */
-+ pax_open_kernel();
- *((unsigned int *)0xffff0ff0) = val;
-+ pax_close_kernel();
- #endif
- }
-
-diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 4767eb9..bf00668 100644
---- a/arch/arm/include/asm/uaccess.h
-+++ b/arch/arm/include/asm/uaccess.h
-@@ -18,6 +18,7 @@
- #include <asm/domain.h>
- #include <asm/unified.h>
- #include <asm/compiler.h>
-+#include <asm/pgtable.h>
-
- #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- #include <asm-generic/uaccess-unaligned.h>
-@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
- static inline void set_fs(mm_segment_t fs)
- {
- current_thread_info()->addr_limit = fs;
-- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
-+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
- }
-
- #define segment_eq(a,b) ((a) == (b))
-
-+#define __HAVE_ARCH_PAX_OPEN_USERLAND
-+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
-+
-+static inline void pax_open_userland(void)
-+{
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (segment_eq(get_fs(), USER_DS)) {
-+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
-+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
-+ }
-+#endif
-+
-+}
-+
-+static inline void pax_close_userland(void)
-+{
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (segment_eq(get_fs(), USER_DS)) {
-+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
-+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
-+ }
-+#endif
-+
-+}
-+
- #define __addr_ok(addr) ({ \
- unsigned long flag; \
- __asm__("cmp %2, %0; movlo %0, #0" \
-@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
-
- #define get_user(x,p) \
- ({ \
-+ int __e; \
- might_fault(); \
-- __get_user_check(x,p); \
-+ pax_open_userland(); \
-+ __e = __get_user_check(x,p); \
-+ pax_close_userland(); \
-+ __e; \
- })
-
- extern int __put_user_1(void *, unsigned int);
-@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
-
- #define put_user(x,p) \
- ({ \
-+ int __e; \
- might_fault(); \
-- __put_user_check(x,p); \
-+ pax_open_userland(); \
-+ __e = __put_user_check(x,p); \
-+ pax_close_userland(); \
-+ __e; \
- })
-
- #else /* CONFIG_MMU */
-@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
-
- #endif /* CONFIG_MMU */
-
-+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
- #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
- #define user_addr_max() \
-@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
- #define __get_user(x,ptr) \
- ({ \
- long __gu_err = 0; \
-+ pax_open_userland(); \
- __get_user_err((x),(ptr),__gu_err); \
-+ pax_close_userland(); \
- __gu_err; \
- })
-
- #define __get_user_error(x,ptr,err) \
- ({ \
-+ pax_open_userland(); \
- __get_user_err((x),(ptr),err); \
-+ pax_close_userland(); \
- (void) 0; \
- })
-
-@@ -368,13 +409,17 @@ do { \
- #define __put_user(x,ptr) \
- ({ \
- long __pu_err = 0; \
-+ pax_open_userland(); \
- __put_user_err((x),(ptr),__pu_err); \
-+ pax_close_userland(); \
- __pu_err; \
- })
-
- #define __put_user_error(x,ptr,err) \
- ({ \
-+ pax_open_userland(); \
- __put_user_err((x),(ptr),err); \
-+ pax_close_userland(); \
- (void) 0; \
- })
-
-@@ -474,11 +519,44 @@ do { \
-
-
- #ifdef CONFIG_MMU
--extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
--extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
-+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
-+
-+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ unsigned long ret;
-+
-+ check_object_size(to, n, false);
-+ pax_open_userland();
-+ ret = ___copy_from_user(to, from, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
-+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
-+{
-+ unsigned long ret;
-+
-+ check_object_size(from, n, true);
-+ pax_open_userland();
-+ ret = ___copy_to_user(to, from, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
- extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
--extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
- extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
-+
-+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
-+{
-+ unsigned long ret;
-+ pax_open_userland();
-+ ret = ___clear_user(addr, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
- #else
- #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
- #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
-@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
-
- static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
-@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
-
- static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
-index 5af0ed1..cea83883 100644
---- a/arch/arm/include/uapi/asm/ptrace.h
-+++ b/arch/arm/include/uapi/asm/ptrace.h
-@@ -92,7 +92,7 @@
- * ARMv7 groups of PSR bits
- */
- #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
--#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
-+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
- #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
- #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
-
-diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
-index a88671c..1cc895e 100644
---- a/arch/arm/kernel/armksyms.c
-+++ b/arch/arm/kernel/armksyms.c
-@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
-
- /* networking */
- EXPORT_SYMBOL(csum_partial);
--EXPORT_SYMBOL(csum_partial_copy_from_user);
-+EXPORT_SYMBOL(__csum_partial_copy_from_user);
- EXPORT_SYMBOL(csum_partial_copy_nocheck);
- EXPORT_SYMBOL(__csum_ipv6_magic);
-
-@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
- #ifdef CONFIG_MMU
- EXPORT_SYMBOL(copy_page);
-
--EXPORT_SYMBOL(__copy_from_user);
--EXPORT_SYMBOL(__copy_to_user);
--EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(___copy_from_user);
-+EXPORT_SYMBOL(___copy_to_user);
-+EXPORT_SYMBOL(___clear_user);
-
- EXPORT_SYMBOL(__get_user_1);
- EXPORT_SYMBOL(__get_user_2);
-diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index 2f5555d..d493c91 100644
---- a/arch/arm/kernel/entry-armv.S
-+++ b/arch/arm/kernel/entry-armv.S
-@@ -47,6 +47,87 @@
- 9997:
- .endm
-
-+ .macro pax_enter_kernel
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ @ make aligned space for saved DACR
-+ sub sp, sp, #8
-+ @ save regs
-+ stmdb sp!, {r1, r2}
-+ @ read DACR from cpu_domain into r1
-+ mov r2, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r2, r2, #(0x1fc0)
-+ bic r2, r2, #(0x3f)
-+ ldr r1, [r2, #TI_CPU_DOMAIN]
-+ @ store old DACR on stack
-+ str r1, [sp, #8]
-+#ifdef CONFIG_PAX_KERNEXEC
-+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
-+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
-+#endif
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+#endif
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r2, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r1, r2}
-+#endif
-+ .endm
-+
-+ .macro pax_open_userland
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ save regs
-+ stmdb sp!, {r0, r1}
-+ @ read DACR from cpu_domain into r1
-+ mov r0, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r0, r0, #(0x1fc0)
-+ bic r0, r0, #(0x3f)
-+ ldr r1, [r0, #TI_CPU_DOMAIN]
-+ @ set current DOMAIN_USER to DOMAIN_CLIENT
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r0, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r0, r1}
-+#endif
-+ .endm
-+
-+ .macro pax_close_userland
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ save regs
-+ stmdb sp!, {r0, r1}
-+ @ read DACR from cpu_domain into r1
-+ mov r0, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r0, r0, #(0x1fc0)
-+ bic r0, r0, #(0x3f)
-+ ldr r1, [r0, #TI_CPU_DOMAIN]
-+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r0, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r0, r1}
-+#endif
-+ .endm
-+
- .macro pabt_helper
- @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
- #ifdef MULTI_PABORT
-@@ -89,11 +170,15 @@
- * Invalid mode handlers
- */
- .macro inv_entry, reason
-+
-+ pax_enter_kernel
-+
- sub sp, sp, #S_FRAME_SIZE
- ARM( stmib sp, {r1 - lr} )
- THUMB( stmia sp, {r0 - r12} )
- THUMB( str sp, [sp, #S_SP] )
- THUMB( str lr, [sp, #S_LR] )
-+
- mov r1, #\reason
- .endm
-
-@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
- .macro svc_entry, stack_hole=0, trace=1
- UNWIND(.fnstart )
- UNWIND(.save {r0 - pc} )
-+
-+ pax_enter_kernel
-+
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
-+
- #ifdef CONFIG_THUMB2_KERNEL
- SPFIX( str r0, [sp] ) @ temporarily saved
- SPFIX( mov r0, sp )
-@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
- ldmia r0, {r3 - r5}
- add r7, sp, #S_SP - 4 @ here for interlock avoidance
- mov r6, #-1 @ "" "" "" ""
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ @ offset sp by 8 as done in pax_enter_kernel
-+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
-+#else
- add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
-+#endif
- SPFIX( addeq r2, r2, #4 )
- str r3, [sp, #-4]! @ save the "real" r0 copied
- @ from the exception stack
-@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
- .macro usr_entry, trace=1
- UNWIND(.fnstart )
- UNWIND(.cantunwind ) @ don't unwind the user space
-+
-+ pax_enter_kernel_user
-+
- sub sp, sp, #S_FRAME_SIZE
- ARM( stmib sp, {r1 - r12} )
- THUMB( stmia sp, {r0 - r12} )
-@@ -478,7 +575,9 @@ __und_usr:
- tst r3, #PSR_T_BIT @ Thumb mode?
- bne __und_usr_thumb
- sub r4, r2, #4 @ ARM instr at LR - 4
-+ pax_open_userland
- 1: ldrt r0, [r4]
-+ pax_close_userland
- ARM_BE8(rev r0, r0) @ little endian instruction
-
- @ r0 = 32-bit ARM instruction which caused the exception
-@@ -512,11 +611,15 @@ __und_usr_thumb:
- */
- .arch armv6t2
- #endif
-+ pax_open_userland
- 2: ldrht r5, [r4]
-+ pax_close_userland
- ARM_BE8(rev16 r5, r5) @ little endian instruction
- cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16 @ 16bit undefined instruction
-+ pax_open_userland
- 3: ldrht r0, [r2]
-+ pax_close_userland
- ARM_BE8(rev16 r0, r0) @ little endian instruction
- add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
-@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
- */
- .pushsection .fixup, "ax"
- .align 2
--4: str r4, [sp, #S_PC] @ retry current instruction
-+4: pax_close_userland
-+ str r4, [sp, #S_PC] @ retry current instruction
- ret r9
- .popsection
- .pushsection __ex_table,"a"
-@@ -766,7 +870,7 @@ ENTRY(__switch_to)
- THUMB( str lr, [ip], #4 )
- ldr r4, [r2, #TI_TP_VALUE]
- ldr r5, [r2, #TI_TP_VALUE + 4]
--#ifdef CONFIG_CPU_USE_DOMAINS
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
- ldr r6, [r2, #TI_CPU_DOMAIN]
- #endif
- switch_tls r1, r4, r5, r3, r7
-@@ -775,7 +879,7 @@ ENTRY(__switch_to)
- ldr r8, =__stack_chk_guard
- ldr r7, [r7, #TSK_STACK_CANARY]
- #endif
--#ifdef CONFIG_CPU_USE_DOMAINS
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
- mcr p15, 0, r6, c3, c0, 0 @ Set domain register
- #endif
- mov r5, r0
-diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index f8ccc21..83d192f 100644
---- a/arch/arm/kernel/entry-common.S
-+++ b/arch/arm/kernel/entry-common.S
-@@ -11,18 +11,46 @@
- #include <asm/assembler.h>
- #include <asm/unistd.h>
- #include <asm/ftrace.h>
-+#include <asm/domain.h>
- #include <asm/unwind.h>
-
-+#include "entry-header.S"
-+
- #ifdef CONFIG_NEED_RET_TO_USER
- #include <mach/entry-macro.S>
- #else
- .macro arch_ret_to_user, tmp1, tmp2
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ @ save regs
-+ stmdb sp!, {r1, r2}
-+ @ read DACR from cpu_domain into r1
-+ mov r2, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r2, r2, #(0x1fc0)
-+ bic r2, r2, #(0x3f)
-+ ldr r1, [r2, #TI_CPU_DOMAIN]
-+#ifdef CONFIG_PAX_KERNEXEC
-+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
-+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
-+#endif
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ set current DOMAIN_USER to DOMAIN_UDEREF
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
-+#endif
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r2, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r1, r2}
-+#endif
- .endm
- #endif
-
--#include "entry-header.S"
--
--
- .align 5
- /*
- * This is the fast syscall return path. We do as little as
-@@ -171,6 +199,12 @@ ENTRY(vector_swi)
- USER( ldr scno, [lr, #-4] ) @ get SWI instruction
- #endif
-
-+ /*
-+ * do this here to avoid a performance hit of wrapping the code above
-+ * that directly dereferences userland to parse the SWI instruction
-+ */
-+ pax_enter_kernel_user
-+
- adr tbl, sys_call_table @ load syscall table pointer
-
- #if defined(CONFIG_OABI_COMPAT)
-diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
-index 1a0045a..9b4f34d 100644
---- a/arch/arm/kernel/entry-header.S
-+++ b/arch/arm/kernel/entry-header.S
-@@ -196,6 +196,60 @@
- msr cpsr_c, \rtemp @ switch back to the SVC mode
- .endm
-
-+ .macro pax_enter_kernel_user
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ @ save regs
-+ stmdb sp!, {r0, r1}
-+ @ read DACR from cpu_domain into r1
-+ mov r0, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r0, r0, #(0x1fc0)
-+ bic r0, r0, #(0x3f)
-+ ldr r1, [r0, #TI_CPU_DOMAIN]
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+#endif
-+#ifdef CONFIG_PAX_KERNEXEC
-+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
-+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
-+#endif
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r0, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r0, r1}
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ @ save regs
-+ stmdb sp!, {r0, r1}
-+ @ read old DACR from stack into r1
-+ ldr r1, [sp, #(8 + S_SP)]
-+ sub r1, r1, #8
-+ ldr r1, [r1]
-+
-+ @ write r1 to current_thread_info()->cpu_domain
-+ mov r0, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r0, r0, #(0x1fc0)
-+ bic r0, r0, #(0x3f)
-+ str r1, [r0, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
-+ @ restore regs
-+ ldmia sp!, {r0, r1}
-+#endif
-+ .endm
-+
- #ifndef CONFIG_THUMB2_KERNEL
- .macro svc_exit, rpsr, irq = 0
- .if \irq != 0
-@@ -215,6 +269,9 @@
- blne trace_hardirqs_off
- #endif
- .endif
-+
-+ pax_exit_kernel
-+
- msr spsr_cxsf, \rpsr
- #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
- @ We must avoid clrex due to Cortex-A15 erratum #830321
-@@ -291,6 +348,9 @@
- blne trace_hardirqs_off
- #endif
- .endif
-+
-+ pax_exit_kernel
-+
- ldr lr, [sp, #S_SP] @ top of the stack
- ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
-diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
-index 059c3da..8e45cfc 100644
---- a/arch/arm/kernel/fiq.c
-+++ b/arch/arm/kernel/fiq.c
-@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
- void *base = vectors_page;
- unsigned offset = FIQ_OFFSET;
-
-+ pax_open_kernel();
- memcpy(base + offset, start, length);
-+ pax_close_kernel();
-+
- if (!cache_is_vipt_nonaliasing())
- flush_icache_range((unsigned long)base + offset, offset +
- length);
-diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
-index 664eee8..f470938 100644
---- a/arch/arm/kernel/head.S
-+++ b/arch/arm/kernel/head.S
-@@ -437,7 +437,7 @@ __enable_mmu:
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
-+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
- mcr p15, 0, r5, c3, c0, 0 @ load domain access register
- mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
- #endif
-diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index bea7db9..a210d10 100644
---- a/arch/arm/kernel/module.c
-+++ b/arch/arm/kernel/module.c
-@@ -38,12 +38,39 @@
- #endif
-
- #ifdef CONFIG_MMU
--void *module_alloc(unsigned long size)
-+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
- {
-+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
-+ return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
-+ GFP_KERNEL, prot, NUMA_NO_NODE,
- __builtin_return_address(0));
- }
-+
-+void *module_alloc(unsigned long size)
-+{
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ return __module_alloc(size, PAGE_KERNEL);
-+#else
-+ return __module_alloc(size, PAGE_KERNEL_EXEC);
-+#endif
-+
-+}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+void module_memfree_exec(void *module_region)
-+{
-+ module_memfree(module_region);
-+}
-+EXPORT_SYMBOL(module_memfree_exec);
-+
-+void *module_alloc_exec(unsigned long size)
-+{
-+ return __module_alloc(size, PAGE_KERNEL_EXEC);
-+}
-+EXPORT_SYMBOL(module_alloc_exec);
-+#endif
- #endif
-
- int
-diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
-index 5038960..4aa71d8 100644
---- a/arch/arm/kernel/patch.c
-+++ b/arch/arm/kernel/patch.c
-@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
- else
- __acquire(&patch_lock);
-
-+ pax_open_kernel();
- if (thumb2 && __opcode_is_thumb16(insn)) {
- *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
- size = sizeof(u16);
-@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
- *(u32 *)waddr = insn;
- size = sizeof(u32);
- }
-+ pax_close_kernel();
-
- if (waddr != addr) {
- flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
-diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index fdfa3a7..5d208b8 100644
---- a/arch/arm/kernel/process.c
-+++ b/arch/arm/kernel/process.c
-@@ -207,6 +207,7 @@ void machine_power_off(void)
-
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
-
- /*
-@@ -220,7 +221,7 @@ void machine_power_off(void)
- * executing pre-reset code, and using RAM that the primary CPU's code wishes
- * to use. Implementing such co-ordination would be essentially impossible.
- */
--void machine_restart(char *cmd)
-+__noreturn void machine_restart(char *cmd)
- {
- local_irq_disable();
- smp_send_stop();
-@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
-
- show_regs_print_info(KERN_DEFAULT);
-
-- print_symbol("PC is at %s\n", instruction_pointer(regs));
-- print_symbol("LR is at %s\n", regs->ARM_lr);
-+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
-+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
- printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
- "sp : %08lx ip : %08lx fp : %08lx\n",
- regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
- return 0;
- }
-
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long range_end = mm->brk + 0x02000000;
-- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
--}
--
- #ifdef CONFIG_MMU
- #ifdef CONFIG_KUSER_HELPERS
- /*
-@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
-
- static int __init gate_vma_init(void)
- {
-- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
-+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
- return 0;
- }
- arch_initcall(gate_vma_init);
-@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
- return is_gate_vma(vma) ? "[vectors]" : NULL;
- }
-
--/* If possible, provide a placement hint at a random offset from the
-- * stack for the signal page.
-- */
--static unsigned long sigpage_addr(const struct mm_struct *mm,
-- unsigned int npages)
--{
-- unsigned long offset;
-- unsigned long first;
-- unsigned long last;
-- unsigned long addr;
-- unsigned int slots;
--
-- first = PAGE_ALIGN(mm->start_stack);
--
-- last = TASK_SIZE - (npages << PAGE_SHIFT);
--
-- /* No room after stack? */
-- if (first > last)
-- return 0;
--
-- /* Just enough room? */
-- if (first == last)
-- return first;
--
-- slots = ((last - first) >> PAGE_SHIFT) + 1;
--
-- offset = get_random_int() % slots;
--
-- addr = first + (offset << PAGE_SHIFT);
--
-- return addr;
--}
--
--static struct page *signal_page;
--extern struct page *get_signal_page(void);
--
--static const struct vm_special_mapping sigpage_mapping = {
-- .name = "[sigpage]",
-- .pages = &signal_page,
--};
--
- int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- {
- struct mm_struct *mm = current->mm;
-- struct vm_area_struct *vma;
-- unsigned long addr;
-- unsigned long hint;
-- int ret = 0;
--
-- if (!signal_page)
-- signal_page = get_signal_page();
-- if (!signal_page)
-- return -ENOMEM;
-
- down_write(&mm->mmap_sem);
-- hint = sigpage_addr(mm, 1);
-- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
-- if (IS_ERR_VALUE(addr)) {
-- ret = addr;
-- goto up_fail;
-- }
--
-- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
-- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
-- &sigpage_mapping);
--
-- if (IS_ERR(vma)) {
-- ret = PTR_ERR(vma);
-- goto up_fail;
-- }
--
-- mm->context.sigpage = addr;
--
-- up_fail:
-+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
- up_write(&mm->mmap_sem);
-- return ret;
-+ return 0;
- }
- #endif
-diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
-index f73891b..cf3004e 100644
---- a/arch/arm/kernel/psci.c
-+++ b/arch/arm/kernel/psci.c
-@@ -28,7 +28,7 @@
- #include <asm/psci.h>
- #include <asm/system_misc.h>
-
--struct psci_operations psci_ops;
-+struct psci_operations psci_ops __read_only;
-
- static int (*invoke_psci_fn)(u32, u32, u32, u32);
- typedef int (*psci_initcall_t)(const struct device_node *);
-diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
-index ef9119f..31995a3 100644
---- a/arch/arm/kernel/ptrace.c
-+++ b/arch/arm/kernel/ptrace.c
-@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
- regs->ARM_ip = ip;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
- {
- current_thread_info()->syscall = scno;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- /* Do the secure computing check first; failures should be fast. */
- #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
- if (secure_computing() == -1)
-diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index e55408e..14d9998 100644
---- a/arch/arm/kernel/setup.c
-+++ b/arch/arm/kernel/setup.c
-@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
- unsigned int elf_hwcap2 __read_mostly;
- EXPORT_SYMBOL(elf_hwcap2);
-
-+pteval_t __supported_pte_mask __read_only;
-+pmdval_t __supported_pmd_mask __read_only;
-
- #ifdef MULTI_CPU
--struct processor processor __read_mostly;
-+struct processor processor __read_only;
- #endif
- #ifdef MULTI_TLB
--struct cpu_tlb_fns cpu_tlb __read_mostly;
-+struct cpu_tlb_fns cpu_tlb __read_only;
- #endif
- #ifdef MULTI_USER
--struct cpu_user_fns cpu_user __read_mostly;
-+struct cpu_user_fns cpu_user __read_only;
- #endif
- #ifdef MULTI_CACHE
--struct cpu_cache_fns cpu_cache __read_mostly;
-+struct cpu_cache_fns cpu_cache __read_only;
- #endif
- #ifdef CONFIG_OUTER_CACHE
--struct outer_cache_fns outer_cache __read_mostly;
-+struct outer_cache_fns outer_cache __read_only;
- EXPORT_SYMBOL(outer_cache);
- #endif
-
-@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
- asm("mrc p15, 0, %0, c0, c1, 4"
- : "=r" (mmfr0));
- if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
-- (mmfr0 & 0x000000f0) >= 0x00000030)
-+ (mmfr0 & 0x000000f0) >= 0x00000030) {
- cpu_arch = CPU_ARCH_ARMv7;
-- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
-+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
-+ __supported_pte_mask |= L_PTE_PXN;
-+ __supported_pmd_mask |= PMD_PXNTABLE;
-+ }
-+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
- (mmfr0 & 0x000000f0) == 0x00000020)
- cpu_arch = CPU_ARCH_ARMv6;
- else
-diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 8aa6f1b..0899e08 100644
---- a/arch/arm/kernel/signal.c
-+++ b/arch/arm/kernel/signal.c
-@@ -24,8 +24,6 @@
-
- extern const unsigned long sigreturn_codes[7];
-
--static unsigned long signal_return_offset;
--
- #ifdef CONFIG_CRUNCH
- static int preserve_crunch_context(struct crunch_sigframe __user *frame)
- {
-@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
- * except when the MPU has protected the vectors
- * page from PL0
- */
-- retcode = mm->context.sigpage + signal_return_offset +
-- (idx << 2) + thumb;
-+ retcode = mm->context.sigpage + (idx << 2) + thumb;
- } else
- #endif
- {
-@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- } while (thread_flags & _TIF_WORK_MASK);
- return 0;
- }
--
--struct page *get_signal_page(void)
--{
-- unsigned long ptr;
-- unsigned offset;
-- struct page *page;
-- void *addr;
--
-- page = alloc_pages(GFP_KERNEL, 0);
--
-- if (!page)
-- return NULL;
--
-- addr = page_address(page);
--
-- /* Give the signal return code some randomness */
-- offset = 0x200 + (get_random_int() & 0x7fc);
-- signal_return_offset = offset;
--
-- /*
-- * Copy signal return handlers into the vector page, and
-- * set sigreturn to be a pointer to these.
-- */
-- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
--
-- ptr = (unsigned long)addr + offset;
-- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
--
-- return page;
--}
-diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 86ef244..c518451 100644
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -76,7 +76,7 @@ enum ipi_msg_type {
-
- static DECLARE_COMPLETION(cpu_running);
-
--static struct smp_operations smp_ops;
-+static struct smp_operations smp_ops __read_only;
-
- void __init smp_set_ops(struct smp_operations *ops)
- {
-diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
-index 7a3be1d..b00c7de 100644
---- a/arch/arm/kernel/tcm.c
-+++ b/arch/arm/kernel/tcm.c
-@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
- .virtual = ITCM_OFFSET,
- .pfn = __phys_to_pfn(ITCM_OFFSET),
- .length = 0,
-- .type = MT_MEMORY_RWX_ITCM,
-+ .type = MT_MEMORY_RX_ITCM,
- }
- };
-
-@@ -267,7 +267,9 @@ no_dtcm:
- start = &__sitcm_text;
- end = &__eitcm_text;
- ram = &__itcm_start;
-+ pax_open_kernel();
- memcpy(start, ram, itcm_code_sz);
-+ pax_close_kernel();
- pr_debug("CPU ITCM: copied code from %p - %p\n",
- start, end);
- itcm_present = true;
-diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 788e23f..6fa06a1 100644
---- a/arch/arm/kernel/traps.c
-+++ b/arch/arm/kernel/traps.c
-@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
- void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
- {
- #ifdef CONFIG_KALLSYMS
-- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
-+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
- #else
- printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
- #endif
-@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
- static int die_owner = -1;
- static unsigned int die_nest_count;
-
-+extern void gr_handle_kernel_exploit(void);
-+
- static unsigned long oops_begin(void)
- {
- int cpu;
-@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception");
-+
-+ gr_handle_kernel_exploit();
-+
- if (signr)
- do_exit(signr);
- }
-@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
- kuser_init(vectors_base);
-
- flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
-+
-+#ifndef CONFIG_PAX_MEMORY_UDEREF
-+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
-+#endif
-+
- #else /* ifndef CONFIG_CPU_V7M */
- /*
- * on V7-M there is no need to copy the vector table to a dedicated
-diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
-index b31aa73..cc4b7a1 100644
---- a/arch/arm/kernel/vmlinux.lds.S
-+++ b/arch/arm/kernel/vmlinux.lds.S
-@@ -37,7 +37,7 @@
- #endif
-
- #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
-- defined(CONFIG_GENERIC_BUG)
-+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
- #define ARM_EXIT_KEEP(x) x
- #define ARM_EXIT_DISCARD(x)
- #else
-@@ -123,6 +123,8 @@ SECTIONS
- #ifdef CONFIG_DEBUG_RODATA
- . = ALIGN(1<<SECTION_SHIFT);
- #endif
-+ _etext = .; /* End of text section */
-+
- RO_DATA(PAGE_SIZE)
-
- . = ALIGN(4);
-@@ -153,8 +155,6 @@ SECTIONS
-
- NOTES
-
-- _etext = .; /* End of text and rodata section */
--
- #ifndef CONFIG_XIP_KERNEL
- # ifdef CONFIG_ARM_KERNMEM_PERMS
- . = ALIGN(1<<SECTION_SHIFT);
-diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index 0b0d58a..988cb45 100644
---- a/arch/arm/kvm/arm.c
-+++ b/arch/arm/kvm/arm.c
-@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
- static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
- /* The VMID used in the VTTBR */
--static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
- static u8 kvm_next_vmid;
- static DEFINE_SPINLOCK(kvm_vmid_lock);
-
-@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
- */
- static bool need_new_vmid_gen(struct kvm *kvm)
- {
-- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
-+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
- }
-
- /**
-@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
-
- /* First user of a new VMID generation? */
- if (unlikely(kvm_next_vmid == 0)) {
-- atomic64_inc(&kvm_vmid_gen);
-+ atomic64_inc_unchecked(&kvm_vmid_gen);
- kvm_next_vmid = 1;
-
- /*
-@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
- kvm_call_hyp(__kvm_flush_vm_context);
- }
-
-- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
-+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
- kvm->arch.vmid = kvm_next_vmid;
- kvm_next_vmid++;
-
-@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
- /**
- * Initialize Hyp-mode and memory mappings on all CPUs.
- */
--int kvm_arch_init(void *opaque)
-+int kvm_arch_init(const void *opaque)
- {
- int err;
- int ret, cpu;
-diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
-index 14a0d98..7771a7d 100644
---- a/arch/arm/lib/clear_user.S
-+++ b/arch/arm/lib/clear_user.S
-@@ -12,14 +12,14 @@
-
- .text
-
--/* Prototype: int __clear_user(void *addr, size_t sz)
-+/* Prototype: int ___clear_user(void *addr, size_t sz)
- * Purpose : clear some user memory
- * Params : addr - user memory address to clear
- * : sz - number of bytes to clear
- * Returns : number of bytes NOT cleared
- */
- ENTRY(__clear_user_std)
--WEAK(__clear_user)
-+WEAK(___clear_user)
- stmfd sp!, {r1, lr}
- mov r2, #0
- cmp r1, #4
-@@ -44,7 +44,7 @@ WEAK(__clear_user)
- USER( strnebt r2, [r0])
- mov r0, #0
- ldmfd sp!, {r1, pc}
--ENDPROC(__clear_user)
-+ENDPROC(___clear_user)
- ENDPROC(__clear_user_std)
-
- .pushsection .fixup,"ax"
-diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
-index 7a235b9..73a0556 100644
---- a/arch/arm/lib/copy_from_user.S
-+++ b/arch/arm/lib/copy_from_user.S
-@@ -17,7 +17,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_from_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -89,11 +89,11 @@
-
- .text
-
--ENTRY(__copy_from_user)
-+ENTRY(___copy_from_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_from_user)
-+ENDPROC(___copy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
-diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
-index 6ee2f67..d1cce76 100644
---- a/arch/arm/lib/copy_page.S
-+++ b/arch/arm/lib/copy_page.S
-@@ -10,6 +10,7 @@
- * ASM optimised string functions
- */
- #include <linux/linkage.h>
-+#include <linux/const.h>
- #include <asm/assembler.h>
- #include <asm/asm-offsets.h>
- #include <asm/cache.h>
-diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
-index a9d3db1..164b089 100644
---- a/arch/arm/lib/copy_to_user.S
-+++ b/arch/arm/lib/copy_to_user.S
-@@ -17,7 +17,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_to_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -93,11 +93,11 @@
- .text
-
- ENTRY(__copy_to_user_std)
--WEAK(__copy_to_user)
-+WEAK(___copy_to_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_to_user)
-+ENDPROC(___copy_to_user)
- ENDPROC(__copy_to_user_std)
-
- .pushsection .fixup,"ax"
-diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
-index 7d08b43..f7ca7ea 100644
---- a/arch/arm/lib/csumpartialcopyuser.S
-+++ b/arch/arm/lib/csumpartialcopyuser.S
-@@ -57,8 +57,8 @@
- * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
- */
-
--#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
--#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
-+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
-+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
-
- #include "csumpartialcopygeneric.S"
-
-diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
-index 312d43e..21d2322 100644
---- a/arch/arm/lib/delay.c
-+++ b/arch/arm/lib/delay.c
-@@ -29,7 +29,7 @@
- /*
- * Default to the loop-based delay implementation.
- */
--struct arm_delay_ops arm_delay_ops = {
-+struct arm_delay_ops arm_delay_ops __read_only = {
- .delay = __loop_delay,
- .const_udelay = __loop_const_udelay,
- .udelay = __loop_udelay,
-diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
-index 3e58d71..029817c 100644
---- a/arch/arm/lib/uaccess_with_memcpy.c
-+++ b/arch/arm/lib/uaccess_with_memcpy.c
-@@ -136,7 +136,7 @@ out:
- }
-
- unsigned long
--__copy_to_user(void __user *to, const void *from, unsigned long n)
-+___copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- /*
- * This test is stubbed out of the main function above to keep
-@@ -190,7 +190,7 @@ out:
- return n;
- }
-
--unsigned long __clear_user(void __user *addr, unsigned long n)
-+unsigned long ___clear_user(void __user *addr, unsigned long n)
- {
- /* See rational for this in __copy_to_user() above. */
- if (n < 64)
-diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
-index ce25e85..3dd7850 100644
---- a/arch/arm/mach-at91/setup.c
-+++ b/arch/arm/mach-at91/setup.c
-@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
-
- desc->pfn = __phys_to_pfn(base);
- desc->length = length;
-- desc->type = MT_MEMORY_RWX_NONCACHED;
-+ desc->type = MT_MEMORY_RW_NONCACHED;
-
- pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
- base, length, desc->virtual);
-diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
-index f8e7dcd..17ee921 100644
---- a/arch/arm/mach-exynos/suspend.c
-+++ b/arch/arm/mach-exynos/suspend.c
-@@ -18,6 +18,7 @@
- #include <linux/syscore_ops.h>
- #include <linux/cpu_pm.h>
- #include <linux/io.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
- #include <linux/err.h>
- #include <linux/regulator/machine.h>
-@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
- tmp |= pm_data->wake_disable_mask;
- pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
-
-- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
-- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
-+ pax_open_kernel();
-+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
-+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
-+ pax_close_kernel();
-
- register_syscore_ops(&exynos_pm_syscore_ops);
- suspend_set_ops(&exynos_suspend_ops);
-diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
-index 7f352de..6dc0929 100644
---- a/arch/arm/mach-keystone/keystone.c
-+++ b/arch/arm/mach-keystone/keystone.c
-@@ -27,7 +27,7 @@
-
- #include "keystone.h"
-
--static struct notifier_block platform_nb;
-+static notifier_block_no_const platform_nb;
- static unsigned long keystone_dma_pfn_offset __read_mostly;
-
- static int keystone_platform_notifier(struct notifier_block *nb,
-diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
-index ccef880..5dfad80 100644
---- a/arch/arm/mach-mvebu/coherency.c
-+++ b/arch/arm/mach-mvebu/coherency.c
-@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
-
- /*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
-- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
-+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
- */
-@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
-- mtype = MT_UNCACHED;
-+ mtype = MT_UNCACHED_RW;
-
- return __arm_ioremap_caller(phys_addr, size, mtype, caller);
- }
-diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
-index b6443a4..20a0b74 100644
---- a/arch/arm/mach-omap2/board-n8x0.c
-+++ b/arch/arm/mach-omap2/board-n8x0.c
-@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
- }
- #endif
-
--struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
-+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
- .late_init = n8x0_menelaus_late_init,
- };
-
-diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-index 79f49d9..70bf184 100644
---- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-@@ -86,7 +86,7 @@ struct cpu_pm_ops {
- void (*resume)(void);
- void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
- void (*hotplug_restart)(void);
--};
-+} __no_const;
-
- static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
- static struct powerdomain *mpuss_pd;
-@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
- static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
- {}
-
--struct cpu_pm_ops omap_pm_ops = {
-+static struct cpu_pm_ops omap_pm_ops __read_only = {
- .finish_suspend = default_finish_suspend,
- .resume = dummy_cpu_resume,
- .scu_prepare = dummy_scu_prepare,
-diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
-index 5305ec7..6d74045 100644
---- a/arch/arm/mach-omap2/omap-smp.c
-+++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -19,6 +19,7 @@
- #include <linux/device.h>
- #include <linux/smp.h>
- #include <linux/io.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
-
- #include <asm/smp_scu.h>
-diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
-index f961c46..4a453dc 100644
---- a/arch/arm/mach-omap2/omap-wakeupgen.c
-+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
-@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata irq_hotplug_notifier = {
-+static struct notifier_block irq_hotplug_notifier = {
- .notifier_call = irq_cpu_hotplug_notify,
- };
-
-diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
-index be9541e..821805f 100644
---- a/arch/arm/mach-omap2/omap_device.c
-+++ b/arch/arm/mach-omap2/omap_device.c
-@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
- struct platform_device __init *omap_device_build(const char *pdev_name,
- int pdev_id,
- struct omap_hwmod *oh,
-- void *pdata, int pdata_len)
-+ const void *pdata, int pdata_len)
- {
- struct omap_hwmod *ohs[] = { oh };
-
-@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
- struct platform_device __init *omap_device_build_ss(const char *pdev_name,
- int pdev_id,
- struct omap_hwmod **ohs,
-- int oh_cnt, void *pdata,
-+ int oh_cnt, const void *pdata,
- int pdata_len)
- {
- int ret = -ENOMEM;
-diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
-index 78c02b3..c94109a 100644
---- a/arch/arm/mach-omap2/omap_device.h
-+++ b/arch/arm/mach-omap2/omap_device.h
-@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
- /* Core code interface */
-
- struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
-- struct omap_hwmod *oh, void *pdata,
-+ struct omap_hwmod *oh, const void *pdata,
- int pdata_len);
-
- struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
- struct omap_hwmod **oh, int oh_cnt,
-- void *pdata, int pdata_len);
-+ const void *pdata, int pdata_len);
-
- struct omap_device *omap_device_alloc(struct platform_device *pdev,
- struct omap_hwmod **ohs, int oh_cnt);
-diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 9025fff..3555702 100644
---- a/arch/arm/mach-omap2/omap_hwmod.c
-+++ b/arch/arm/mach-omap2/omap_hwmod.c
-@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
- int (*init_clkdm)(struct omap_hwmod *oh);
- void (*update_context_lost)(struct omap_hwmod *oh);
- int (*get_context_lost)(struct omap_hwmod *oh);
--};
-+} __no_const;
-
- /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
--static struct omap_hwmod_soc_ops soc_ops;
-+static struct omap_hwmod_soc_ops soc_ops __read_only;
-
- /* omap_hwmod_list contains all registered struct omap_hwmods */
- static LIST_HEAD(omap_hwmod_list);
-diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
-index 95fee54..cfa9cf1 100644
---- a/arch/arm/mach-omap2/powerdomains43xx_data.c
-+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
-@@ -10,6 +10,7 @@
-
- #include <linux/kernel.h>
- #include <linux/init.h>
-+#include <asm/pgtable.h>
-
- #include "powerdomain.h"
-
-@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
-
- void __init am43xx_powerdomains_init(void)
- {
-- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
-+ pax_open_kernel();
-+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
-+ pax_close_kernel();
- pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
- pwrdm_register_pwrdms(powerdomains_am43xx);
- pwrdm_complete_init();
-diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
-index ff0a68c..b312aa0 100644
---- a/arch/arm/mach-omap2/wd_timer.c
-+++ b/arch/arm/mach-omap2/wd_timer.c
-@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
- struct omap_hwmod *oh;
- char *oh_name = "wd_timer2";
- char *dev_name = "omap_wdt";
-- struct omap_wd_timer_platform_data pdata;
-+ static struct omap_wd_timer_platform_data pdata = {
-+ .read_reset_sources = prm_read_reset_sources
-+ };
-
- if (!cpu_class_is_omap2() || of_have_populated_dt())
- return 0;
-@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
- return -EINVAL;
- }
-
-- pdata.read_reset_sources = prm_read_reset_sources;
--
- pdev = omap_device_build(dev_name, id, oh, &pdata,
- sizeof(struct omap_wd_timer_platform_data));
- WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
-diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
-index 4f25a7c..a81be85 100644
---- a/arch/arm/mach-tegra/cpuidle-tegra20.c
-+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
-@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
- bool entered_lp2 = false;
-
- if (tegra_pending_sgi())
-- ACCESS_ONCE(abort_flag) = true;
-+ ACCESS_ONCE_RW(abort_flag) = true;
-
- cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
-
-diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
-index ab95f53..4b977a7 100644
---- a/arch/arm/mach-tegra/irq.c
-+++ b/arch/arm/mach-tegra/irq.c
-@@ -20,6 +20,7 @@
- #include <linux/cpu_pm.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
- #include <linux/irq.h>
- #include <linux/kernel.h>
-diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
-index 2cb587b..6ddfebf 100644
---- a/arch/arm/mach-ux500/pm.c
-+++ b/arch/arm/mach-ux500/pm.c
-@@ -10,6 +10,7 @@
- */
-
- #include <linux/kernel.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
- #include <linux/delay.h>
- #include <linux/io.h>
-diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
-index 2dea8b5..6499da2 100644
---- a/arch/arm/mach-ux500/setup.h
-+++ b/arch/arm/mach-ux500/setup.h
-@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
- .type = MT_DEVICE, \
- }
-
--#define __MEM_DEV_DESC(x, sz) { \
-- .virtual = IO_ADDRESS(x), \
-- .pfn = __phys_to_pfn(x), \
-- .length = sz, \
-- .type = MT_MEMORY_RWX, \
--}
--
- extern struct smp_operations ux500_smp_ops;
- extern void ux500_cpu_die(unsigned int cpu);
-
-diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
-index 52d768f..5f93180 100644
---- a/arch/arm/mach-zynq/platsmp.c
-+++ b/arch/arm/mach-zynq/platsmp.c
-@@ -24,6 +24,7 @@
- #include <linux/io.h>
- #include <asm/cacheflush.h>
- #include <asm/smp_scu.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
- #include "common.h"
-
-diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index c43c714..4f8f7b9 100644
---- a/arch/arm/mm/Kconfig
-+++ b/arch/arm/mm/Kconfig
-@@ -446,6 +446,7 @@ config CPU_32v5
-
- config CPU_32v6
- bool
-+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
- select TLS_REG_EMUL if !CPU_32v6K && !MMU
-
- config CPU_32v6K
-@@ -600,6 +601,7 @@ config CPU_CP15_MPU
-
- config CPU_USE_DOMAINS
- bool
-+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
- help
- This option enables or disables the use of domain switching
- via the set_fs() function.
-@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
-
- config KUSER_HELPERS
- bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
-- depends on MMU
-+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
- default y
- help
- Warning: disabling this option may break user programs.
-@@ -812,7 +814,7 @@ config KUSER_HELPERS
- See Documentation/arm/kernel_user_helpers.txt for details.
-
- However, the fixed address nature of these helpers can be used
-- by ROP (return orientated programming) authors when creating
-+ by ROP (Return Oriented Programming) authors when creating
- exploits.
-
- If all of the binaries and libraries which run on your platform
-diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index 2c0c541..4585df9 100644
---- a/arch/arm/mm/alignment.c
-+++ b/arch/arm/mm/alignment.c
-@@ -216,10 +216,12 @@ union offset_union {
- #define __get16_unaligned_check(ins,val,addr) \
- do { \
- unsigned int err = 0, v, a = addr; \
-+ pax_open_userland(); \
- __get8_unaligned_check(ins,v,a,err); \
- val = v << ((BE) ? 8 : 0); \
- __get8_unaligned_check(ins,v,a,err); \
- val |= v << ((BE) ? 0 : 8); \
-+ pax_close_userland(); \
- if (err) \
- goto fault; \
- } while (0)
-@@ -233,6 +235,7 @@ union offset_union {
- #define __get32_unaligned_check(ins,val,addr) \
- do { \
- unsigned int err = 0, v, a = addr; \
-+ pax_open_userland(); \
- __get8_unaligned_check(ins,v,a,err); \
- val = v << ((BE) ? 24 : 0); \
- __get8_unaligned_check(ins,v,a,err); \
-@@ -241,6 +244,7 @@ union offset_union {
- val |= v << ((BE) ? 8 : 16); \
- __get8_unaligned_check(ins,v,a,err); \
- val |= v << ((BE) ? 0 : 24); \
-+ pax_close_userland(); \
- if (err) \
- goto fault; \
- } while (0)
-@@ -254,6 +258,7 @@ union offset_union {
- #define __put16_unaligned_check(ins,val,addr) \
- do { \
- unsigned int err = 0, v = val, a = addr; \
-+ pax_open_userland(); \
- __asm__( FIRST_BYTE_16 \
- ARM( "1: "ins" %1, [%2], #1\n" ) \
- THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -273,6 +278,7 @@ union offset_union {
- " .popsection\n" \
- : "=r" (err), "=&r" (v), "=&r" (a) \
- : "0" (err), "1" (v), "2" (a)); \
-+ pax_close_userland(); \
- if (err) \
- goto fault; \
- } while (0)
-@@ -286,6 +292,7 @@ union offset_union {
- #define __put32_unaligned_check(ins,val,addr) \
- do { \
- unsigned int err = 0, v = val, a = addr; \
-+ pax_open_userland(); \
- __asm__( FIRST_BYTE_32 \
- ARM( "1: "ins" %1, [%2], #1\n" ) \
- THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -315,6 +322,7 @@ union offset_union {
- " .popsection\n" \
- : "=r" (err), "=&r" (v), "=&r" (a) \
- : "0" (err), "1" (v), "2" (a)); \
-+ pax_close_userland(); \
- if (err) \
- goto fault; \
- } while (0)
-diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
-index 5e65ca8..879e7b3 100644
---- a/arch/arm/mm/cache-l2x0.c
-+++ b/arch/arm/mm/cache-l2x0.c
-@@ -42,7 +42,7 @@ struct l2c_init_data {
- void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
- void (*save)(void __iomem *);
- struct outer_cache_fns outer_cache;
--};
-+} __do_const;
-
- #define CACHE_LINE_SIZE 32
-
-diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
-index 845769e..4278fd7 100644
---- a/arch/arm/mm/context.c
-+++ b/arch/arm/mm/context.c
-@@ -43,7 +43,7 @@
- #define NUM_USER_ASIDS ASID_FIRST_VERSION
-
- static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
--static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
-+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
- static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-
- static DEFINE_PER_CPU(atomic64_t, active_asids);
-@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
- {
- static u32 cur_idx = 1;
- u64 asid = atomic64_read(&mm->context.id);
-- u64 generation = atomic64_read(&asid_generation);
-+ u64 generation = atomic64_read_unchecked(&asid_generation);
-
- if (asid != 0) {
- /*
-@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
- */
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
- if (asid == NUM_USER_ASIDS) {
-- generation = atomic64_add_return(ASID_FIRST_VERSION,
-+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
- &asid_generation);
- flush_context(cpu);
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
-@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
- cpu_set_reserved_ttbr0();
-
- asid = atomic64_read(&mm->context.id);
-- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
-+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
- goto switch_mm_fastpath;
-
- raw_spin_lock_irqsave(&cpu_asid_lock, flags);
- /* Check that our ASID belongs to the current generation. */
- asid = atomic64_read(&mm->context.id);
-- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
-+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
- asid = new_context(mm, cpu);
- atomic64_set(&mm->context.id, asid);
- }
-diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index a982dc3..2d9f5f7 100644
---- a/arch/arm/mm/fault.c
-+++ b/arch/arm/mm/fault.c
-@@ -25,6 +25,7 @@
- #include <asm/system_misc.h>
- #include <asm/system_info.h>
- #include <asm/tlbflush.h>
-+#include <asm/sections.h>
-
- #include "fault.h"
-
-@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- if (fixup_exception(regs))
- return;
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (addr < TASK_SIZE) {
-+ if (current->signal->curr_ip)
-+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
-+ else
-+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ if ((fsr & FSR_WRITE) &&
-+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
-+ (MODULES_VADDR <= addr && addr < MODULES_END)))
-+ {
-+ if (current->signal->curr_ip)
-+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
-+ else
-+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
-+ }
-+#endif
-+
- /*
- * No handler, we'll have to terminate things with extreme prejudice.
- */
-@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
- }
- #endif
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (fsr & FSR_LNX_PF) {
-+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- tsk->thread.address = addr;
- tsk->thread.error_code = fsr;
- tsk->thread.trap_no = 14;
-@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- }
- #endif /* CONFIG_MMU */
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 20; i++) {
-+ unsigned char c;
-+ if (get_user(c, (__force unsigned char __user *)pc+i))
-+ printk(KERN_CONT "?? ");
-+ else
-+ printk(KERN_CONT "%02x ", c);
-+ }
-+ printk("\n");
-+
-+ printk(KERN_ERR "PAX: bytes at SP-4: ");
-+ for (i = -1; i < 20; i++) {
-+ unsigned long c;
-+ if (get_user(c, (__force unsigned long __user *)sp+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08lx ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * First Level Translation Fault Handler
- *
-@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
- struct siginfo info;
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
-+ if (current->signal->curr_ip)
-+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
-+ else
-+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
-+ goto die;
-+ }
-+#endif
-+
- if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
- return;
-
-+die:
- pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
- inf->name, fsr, addr);
-
-@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
- ifsr_info[nr].name = name;
- }
-
-+asmlinkage int sys_sigreturn(struct pt_regs *regs);
-+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
-+
- asmlinkage void __exception
- do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
- {
- const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
- struct siginfo info;
-+ unsigned long pc = instruction_pointer(regs);
-+
-+ if (user_mode(regs)) {
-+ unsigned long sigpage = current->mm->context.sigpage;
-+
-+ if (sigpage <= pc && pc < sigpage + 7*4) {
-+ if (pc < sigpage + 3*4)
-+ sys_sigreturn(regs);
-+ else
-+ sys_rt_sigreturn(regs);
-+ return;
-+ }
-+ if (pc == 0xffff0f60UL) {
-+ /*
-+ * PaX: __kuser_cmpxchg64 emulation
-+ */
-+ // TODO
-+ //regs->ARM_pc = regs->ARM_lr;
-+ //return;
-+ }
-+ if (pc == 0xffff0fa0UL) {
-+ /*
-+ * PaX: __kuser_memory_barrier emulation
-+ */
-+ // dmb(); implied by the exception
-+ regs->ARM_pc = regs->ARM_lr;
-+ return;
-+ }
-+ if (pc == 0xffff0fc0UL) {
-+ /*
-+ * PaX: __kuser_cmpxchg emulation
-+ */
-+ // TODO
-+ //long new;
-+ //int op;
-+
-+ //op = FUTEX_OP_SET << 28;
-+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
-+ //regs->ARM_r0 = old != new;
-+ //regs->ARM_pc = regs->ARM_lr;
-+ //return;
-+ }
-+ if (pc == 0xffff0fe0UL) {
-+ /*
-+ * PaX: __kuser_get_tls emulation
-+ */
-+ regs->ARM_r0 = current_thread_info()->tp_value[0];
-+ regs->ARM_pc = regs->ARM_lr;
-+ return;
-+ }
-+ }
-+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
-+ if (current->signal->curr_ip)
-+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
-+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
-+ else
-+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
-+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
-+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
-+ goto die;
-+ }
-+#endif
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
-+#ifdef CONFIG_THUMB2_KERNEL
-+ unsigned short bkpt;
-+
-+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
-+#else
-+ unsigned int bkpt;
-+
-+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
-+#endif
-+ current->thread.error_code = ifsr;
-+ current->thread.trap_no = 0;
-+ pax_report_refcount_overflow(regs);
-+ fixup_exception(regs);
-+ return;
-+ }
-+ }
-+#endif
-
- if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
- return;
-
-+die:
- pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
- inf->name, ifsr, addr);
-
-diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
-index cf08bdf..772656c 100644
---- a/arch/arm/mm/fault.h
-+++ b/arch/arm/mm/fault.h
-@@ -3,6 +3,7 @@
-
- /*
- * Fault status register encodings. We steal bit 31 for our own purposes.
-+ * Set when the FSR value is from an instruction fault.
- */
- #define FSR_LNX_PF (1 << 31)
- #define FSR_WRITE (1 << 11)
-@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
- }
- #endif
-
-+/* valid for LPAE and !LPAE */
-+static inline int is_xn_fault(unsigned int fsr)
-+{
-+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
-+}
-+
-+static inline int is_domain_fault(unsigned int fsr)
-+{
-+ return ((fsr_fs(fsr) & 0xD) == 0x9);
-+}
-+
- void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
- unsigned long search_exception_table(unsigned long addr);
-
-diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
-index 2495c8c..415b7fc 100644
---- a/arch/arm/mm/init.c
-+++ b/arch/arm/mm/init.c
-@@ -758,7 +758,46 @@ void free_tcmmem(void)
- {
- #ifdef CONFIG_HAVE_TCM
- extern char __tcm_start, __tcm_end;
-+#endif
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ unsigned long addr;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ int cpu_arch = cpu_architecture();
-+ unsigned int cr = get_cr();
-+
-+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
-+ /* make pages tables, etc before .text NX */
-+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ __section_update(pmd, addr, PMD_SECT_XN);
-+ }
-+ /* make init NX */
-+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ __section_update(pmd, addr, PMD_SECT_XN);
-+ }
-+ /* make kernel code/rodata RX */
-+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+#ifdef CONFIG_ARM_LPAE
-+ __section_update(pmd, addr, PMD_SECT_RDONLY);
-+#else
-+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
-+#endif
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_HAVE_TCM
- poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
- free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
- #endif
-diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
-index d1e5ad7..84dcbf2 100644
---- a/arch/arm/mm/ioremap.c
-+++ b/arch/arm/mm/ioremap.c
-@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
- unsigned int mtype;
-
- if (cached)
-- mtype = MT_MEMORY_RWX;
-+ mtype = MT_MEMORY_RX;
- else
-- mtype = MT_MEMORY_RWX_NONCACHED;
-+ mtype = MT_MEMORY_RX_NONCACHED;
-
- return __arm_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
-diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index 5e85ed3..b10a7ed 100644
---- a/arch/arm/mm/mmap.c
-+++ b/arch/arm/mm/mmap.c
-@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- struct vm_area_struct *vma;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- /*
-@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- if (len > TASK_SIZE)
- return -ENOMEM;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
-@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
- info.high_limit = TASK_SIZE;
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-+ info.threadstack_offset = offset;
- return vm_unmapped_area(&info);
- }
-
-@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- unsigned long addr = addr0;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- /*
-@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- return addr;
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* requesting a specific address */
- if (addr) {
- if (do_align)
-@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- else
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
-@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- info.high_limit = mm->mmap_base;
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- /*
-@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- {
- unsigned long random_factor = 0UL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
-@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
-
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
-index 4e6ef89..21c27f2 100644
---- a/arch/arm/mm/mmu.c
-+++ b/arch/arm/mm/mmu.c
-@@ -41,6 +41,22 @@
- #include "mm.h"
- #include "tcm.h"
-
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+void modify_domain(unsigned int dom, unsigned int type)
-+{
-+ struct thread_info *thread = current_thread_info();
-+ unsigned int domain = thread->cpu_domain;
-+ /*
-+ * DOMAIN_MANAGER might be defined to some other value,
-+ * use the arch-defined constant
-+ */
-+ domain &= ~domain_val(dom, 3);
-+ thread->cpu_domain = domain | domain_val(dom, type);
-+ set_domain(thread->cpu_domain);
-+}
-+EXPORT_SYMBOL(modify_domain);
-+#endif
-+
- /*
- * empty_zero_page is a special page that is used for
- * zero-initialized data and COW.
-@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
- #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
- #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
-
--static struct mem_type mem_types[] = {
-+#ifdef CONFIG_PAX_KERNEXEC
-+#define L_PTE_KERNEXEC L_PTE_RDONLY
-+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
-+#else
-+#define L_PTE_KERNEXEC L_PTE_DIRTY
-+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
-+#endif
-+
-+static struct mem_type mem_types[] __read_only = {
- [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
- .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
- L_PTE_SHARED,
-@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
- .prot_sect = PROT_SECT_DEVICE,
- .domain = DOMAIN_IO,
- },
-- [MT_UNCACHED] = {
-+ [MT_UNCACHED_RW] = {
- .prot_pte = PROT_PTE_DEVICE,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
- .domain = DOMAIN_IO,
- },
-- [MT_CACHECLEAN] = {
-- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
-+ [MT_CACHECLEAN_RO] = {
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
- .domain = DOMAIN_KERNEL,
- },
- #ifndef CONFIG_ARM_LPAE
-- [MT_MINICLEAN] = {
-- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
-+ [MT_MINICLEAN_RO] = {
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
- .domain = DOMAIN_KERNEL,
- },
- #endif
-@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_RDONLY,
- .prot_l1 = PMD_TYPE_TABLE,
-- .domain = DOMAIN_USER,
-+ .domain = DOMAIN_VECTORS,
- },
- [MT_HIGH_VECTORS] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_RDONLY,
- .prot_l1 = PMD_TYPE_TABLE,
-- .domain = DOMAIN_USER,
-+ .domain = DOMAIN_VECTORS,
- },
-- [MT_MEMORY_RWX] = {
-+ [__MT_MEMORY_RWX] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
-@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
- .domain = DOMAIN_KERNEL,
- },
-- [MT_ROM] = {
-- .prot_sect = PMD_TYPE_SECT,
-+ [MT_MEMORY_RX] = {
-+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
-+ .prot_l1 = PMD_TYPE_TABLE,
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
-+ .domain = DOMAIN_KERNEL,
-+ },
-+ [MT_ROM_RX] = {
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
- .domain = DOMAIN_KERNEL,
- },
-- [MT_MEMORY_RWX_NONCACHED] = {
-+ [MT_MEMORY_RW_NONCACHED] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_MT_BUFFERABLE,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
- .domain = DOMAIN_KERNEL,
- },
-+ [MT_MEMORY_RX_NONCACHED] = {
-+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
-+ L_PTE_MT_BUFFERABLE,
-+ .prot_l1 = PMD_TYPE_TABLE,
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
-+ .domain = DOMAIN_KERNEL,
-+ },
- [MT_MEMORY_RW_DTCM] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_XN,
-@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
- .domain = DOMAIN_KERNEL,
- },
-- [MT_MEMORY_RWX_ITCM] = {
-- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
-+ [MT_MEMORY_RX_ITCM] = {
-+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
- .prot_l1 = PMD_TYPE_TABLE,
-+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
- .domain = DOMAIN_KERNEL,
- },
- [MT_MEMORY_RW_SO] = {
-@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
- * Mark cache clean areas and XIP ROM read only
- * from SVC mode and no access from userspace.
- */
-- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+#endif
-+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
- #endif
-
- /*
-@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
- mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
-- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
-- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
-+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
-+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
-+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
-+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
- }
- }
-
-@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
- if (cpu_arch >= CPU_ARCH_ARMv6) {
- if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
- /* Non-cacheable Normal is XCB = 001 */
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
-+ PMD_SECT_BUFFERED;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
- PMD_SECT_BUFFERED;
- } else {
- /* For both ARMv6 and non-TEX-remapping ARMv7 */
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
-+ PMD_SECT_TEX(1);
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
- PMD_SECT_TEX(1);
- }
- } else {
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
- }
-
- #ifdef CONFIG_ARM_LPAE
-@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
- user_pgprot |= PTE_EXT_PXN;
- #endif
-
-+ user_pgprot |= __supported_pte_mask;
-+
- for (i = 0; i < 16; i++) {
- pteval_t v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | user_pgprot);
-@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
-
- mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
- mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
-- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
-- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
-+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
-+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
-+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
-+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
-- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
-- mem_types[MT_ROM].prot_sect |= cp->pmd;
-+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
-+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
-+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
-
- switch (cp->pmd) {
- case PMD_SECT_WT:
-- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
-+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
- break;
- case PMD_SECT_WB:
- case PMD_SECT_WBWA:
-- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
-+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
- break;
- }
- pr_info("Memory policy: %sData cache %s\n",
-@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
- return;
- }
-
-- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
- md->virtual >= PAGE_OFFSET &&
- (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
-@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
- */
-+
-+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
-+
- static void __init devicemaps_init(const struct machine_desc *mdesc)
- {
- struct map_desc map;
- unsigned long addr;
-- void *vectors;
-
-- /*
-- * Allocate the vector page early.
-- */
-- vectors = early_alloc(PAGE_SIZE * 2);
--
-- early_trap_init(vectors);
-+ early_trap_init(&vectors);
-
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
- pmd_clear(pmd_off_k(addr));
-@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
- map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
- map.virtual = MODULES_VADDR;
- map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
-- map.type = MT_ROM;
-+ map.type = MT_ROM_RX;
- create_mapping(&map);
- #endif
-
-@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
- map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
- map.virtual = FLUSH_BASE;
- map.length = SZ_1M;
-- map.type = MT_CACHECLEAN;
-+ map.type = MT_CACHECLEAN_RO;
- create_mapping(&map);
- #endif
- #ifdef FLUSH_BASE_MINICACHE
- map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
- map.virtual = FLUSH_BASE_MINICACHE;
- map.length = SZ_1M;
-- map.type = MT_MINICLEAN;
-+ map.type = MT_MINICLEAN_RO;
- create_mapping(&map);
- #endif
-
-@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
- * location (0xffff0000). If we aren't using high-vectors, also
- * create a mapping at the low-vectors virtual address.
- */
-- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
-+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
- map.virtual = 0xffff0000;
- map.length = PAGE_SIZE;
- #ifdef CONFIG_KUSER_HELPERS
-@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
- static void __init map_lowmem(void)
- {
- struct memblock_region *reg;
-+#ifndef CONFIG_PAX_KERNEXEC
- phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
-+#endif
-
- /* Map all the lowmem memory banks. */
- for_each_memblock(memory, reg) {
-@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
- if (start >= end)
- break;
-
-+#ifdef CONFIG_PAX_KERNEXEC
-+ map.pfn = __phys_to_pfn(start);
-+ map.virtual = __phys_to_virt(start);
-+ map.length = end - start;
-+
-+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
-+ struct map_desc kernel;
-+ struct map_desc initmap;
-+
-+ /* when freeing initmem we will make this RW */
-+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
-+ initmap.virtual = (unsigned long)__init_begin;
-+ initmap.length = _sdata - __init_begin;
-+ initmap.type = __MT_MEMORY_RWX;
-+ create_mapping(&initmap);
-+
-+ /* when freeing initmem we will make this RX */
-+ kernel.pfn = __phys_to_pfn(__pa(_stext));
-+ kernel.virtual = (unsigned long)_stext;
-+ kernel.length = __init_begin - _stext;
-+ kernel.type = __MT_MEMORY_RWX;
-+ create_mapping(&kernel);
-+
-+ if (map.virtual < (unsigned long)_stext) {
-+ map.length = (unsigned long)_stext - map.virtual;
-+ map.type = __MT_MEMORY_RWX;
-+ create_mapping(&map);
-+ }
-+
-+ map.pfn = __phys_to_pfn(__pa(_sdata));
-+ map.virtual = (unsigned long)_sdata;
-+ map.length = end - __pa(_sdata);
-+ }
-+
-+ map.type = MT_MEMORY_RW;
-+ create_mapping(&map);
-+#else
- if (end < kernel_x_start) {
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
-- map.type = MT_MEMORY_RWX;
-+ map.type = __MT_MEMORY_RWX;
-
- create_mapping(&map);
- } else if (start >= kernel_x_end) {
-@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
- map.pfn = __phys_to_pfn(kernel_x_start);
- map.virtual = __phys_to_virt(kernel_x_start);
- map.length = kernel_x_end - kernel_x_start;
-- map.type = MT_MEMORY_RWX;
-+ map.type = __MT_MEMORY_RWX;
-
- create_mapping(&map);
-
-@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
- create_mapping(&map);
- }
- }
-+#endif
- }
- }
-
-diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index e1268f9..a9755a7 100644
---- a/arch/arm/net/bpf_jit_32.c
-+++ b/arch/arm/net/bpf_jit_32.c
-@@ -20,6 +20,7 @@
- #include <asm/cacheflush.h>
- #include <asm/hwcap.h>
- #include <asm/opcodes.h>
-+#include <asm/pgtable.h>
-
- #include "bpf_jit_32.h"
-
-@@ -71,7 +72,11 @@ struct jit_ctx {
- #endif
- };
-
-+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
-+int bpf_jit_enable __read_only;
-+#else
- int bpf_jit_enable __read_mostly;
-+#endif
-
- static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
- {
-@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
- {
- u32 *ptr;
- /* We are guaranteed to have aligned memory. */
-+ pax_open_kernel();
- for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
- *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
-+ pax_close_kernel();
- }
-
- static void build_prologue(struct jit_ctx *ctx)
-diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
-index 5b217f4..c23f40e 100644
---- a/arch/arm/plat-iop/setup.c
-+++ b/arch/arm/plat-iop/setup.c
-@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
- .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
- .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
- .length = IOP3XX_PERIPHERAL_SIZE,
-- .type = MT_UNCACHED,
-+ .type = MT_UNCACHED_RW,
- },
- };
-
-diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
-index a5bc92d..0bb4730 100644
---- a/arch/arm/plat-omap/sram.c
-+++ b/arch/arm/plat-omap/sram.c
-@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
- * Looks like we need to preserve some bootloader code at the
- * beginning of SRAM for jumping to flash for reboot to work...
- */
-+ pax_open_kernel();
- memset_io(omap_sram_base + omap_sram_skip, 0,
- omap_sram_size - omap_sram_skip);
-+ pax_close_kernel();
- }
-diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
-index ce6d763..cfea917 100644
---- a/arch/arm/plat-samsung/include/plat/dma-ops.h
-+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
-@@ -47,7 +47,7 @@ struct samsung_dma_ops {
- int (*started)(unsigned ch);
- int (*flush)(unsigned ch);
- int (*stop)(unsigned ch);
--};
-+} __no_const;
-
- extern void *samsung_dmadev_get_ops(void);
- extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
-index 7047051..44e8675 100644
---- a/arch/arm64/include/asm/atomic.h
-+++ b/arch/arm64/include/asm/atomic.h
-@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
- #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif
- #endif
-diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
-index a5abb00..9cbca9a 100644
---- a/arch/arm64/include/asm/barrier.h
-+++ b/arch/arm64/include/asm/barrier.h
-@@ -44,7 +44,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
-index 4fde8c1..441f84f 100644
---- a/arch/arm64/include/asm/percpu.h
-+++ b/arch/arm64/include/asm/percpu.h
-@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
- {
- switch (size) {
- case 1:
-- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
-+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
- break;
- case 2:
-- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
-+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
- break;
- case 4:
-- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
-+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
- break;
- case 8:
-- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
-+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
- break;
- default:
- BUILD_BUG();
-diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
-index e20df38..027ede3 100644
---- a/arch/arm64/include/asm/pgalloc.h
-+++ b/arch/arm64/include/asm/pgalloc.h
-@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
- }
-
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
-+
- #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
-
- #if CONFIG_ARM64_PGTABLE_LEVELS > 3
-diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
-index 3bf8f4e..5dd5491 100644
---- a/arch/arm64/include/asm/uaccess.h
-+++ b/arch/arm64/include/asm/uaccess.h
-@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
- flag; \
- })
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) __range_ok(addr, size)
- #define user_addr_max get_fs
-
-diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
-index df34a70..5727a75 100644
---- a/arch/arm64/mm/dma-mapping.c
-+++ b/arch/arm64/mm/dma-mapping.c
-@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
- phys_to_page(paddr),
- size >> PAGE_SHIFT);
- if (!freed)
-- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
- }
-
- static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
-diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
-index c3a58a1..78fbf54 100644
---- a/arch/avr32/include/asm/cache.h
-+++ b/arch/avr32/include/asm/cache.h
-@@ -1,8 +1,10 @@
- #ifndef __ASM_AVR32_CACHE_H
- #define __ASM_AVR32_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
-index d232888..87c8df1 100644
---- a/arch/avr32/include/asm/elf.h
-+++ b/arch/avr32/include/asm/elf.h
-@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
-+
-+#define PAX_DELTA_MMAP_LEN 15
-+#define PAX_DELTA_STACK_LEN 15
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
-index 479330b..53717a8 100644
---- a/arch/avr32/include/asm/kmap_types.h
-+++ b/arch/avr32/include/asm/kmap_types.h
-@@ -2,9 +2,9 @@
- #define __ASM_AVR32_KMAP_TYPES_H
-
- #ifdef CONFIG_DEBUG_HIGHMEM
--# define KM_TYPE_NR 29
-+# define KM_TYPE_NR 30
- #else
--# define KM_TYPE_NR 14
-+# define KM_TYPE_NR 15
- #endif
-
- #endif /* __ASM_AVR32_KMAP_TYPES_H */
-diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index d223a8b..69c5210 100644
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
-
- int exception_trace = 1;
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 20; i++) {
-+ unsigned char c;
-+ if (get_user(c, (unsigned char *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%02x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * This routine handles page faults. It determines the address and the
- * problem, and then passes it off to one of the appropriate routines.
-@@ -178,6 +195,16 @@ bad_area:
- up_read(&mm->mmap_sem);
-
- if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
-+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
-+ do_group_exit(SIGKILL);
-+ }
-+ }
-+#endif
-+
- if (exception_trace && printk_ratelimit())
- printk("%s%s[%d]: segfault at %08lx pc %08lx "
- "sp %08lx ecr %lu\n",
-diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
-index 568885a..f8008df 100644
---- a/arch/blackfin/include/asm/cache.h
-+++ b/arch/blackfin/include/asm/cache.h
-@@ -7,6 +7,7 @@
- #ifndef __ARCH_BLACKFIN_CACHE_H
- #define __ARCH_BLACKFIN_CACHE_H
-
-+#include <linux/const.h>
- #include <linux/linkage.h> /* for asmlinkage */
-
- /*
-@@ -14,7 +15,7 @@
- * Blackfin loads 32 bytes for cache
- */
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
- #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
-index aea2718..3639a60 100644
---- a/arch/cris/include/arch-v10/arch/cache.h
-+++ b/arch/cris/include/arch-v10/arch/cache.h
-@@ -1,8 +1,9 @@
- #ifndef _ASM_ARCH_CACHE_H
- #define _ASM_ARCH_CACHE_H
-
-+#include <linux/const.h>
- /* Etrax 100LX have 32-byte cache-lines. */
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_ARCH_CACHE_H */
-diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
-index 7caf25d..ee65ac5 100644
---- a/arch/cris/include/arch-v32/arch/cache.h
-+++ b/arch/cris/include/arch-v32/arch/cache.h
-@@ -1,11 +1,12 @@
- #ifndef _ASM_CRIS_ARCH_CACHE_H
- #define _ASM_CRIS_ARCH_CACHE_H
-
-+#include <linux/const.h>
- #include <arch/hwregs/dma.h>
-
- /* A cache-line is 32 bytes. */
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
-index 102190a..5334cea 100644
---- a/arch/frv/include/asm/atomic.h
-+++ b/arch/frv/include/asm/atomic.h
-@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
- #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
- #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- {
- int c, old;
-diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
-index 2797163..c2a401df9 100644
---- a/arch/frv/include/asm/cache.h
-+++ b/arch/frv/include/asm/cache.h
-@@ -12,10 +12,11 @@
- #ifndef __ASM_CACHE_H
- #define __ASM_CACHE_H
-
-+#include <linux/const.h>
-
- /* bytes per L1 cache line */
- #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
- #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
-diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
-index 43901f2..0d8b865 100644
---- a/arch/frv/include/asm/kmap_types.h
-+++ b/arch/frv/include/asm/kmap_types.h
-@@ -2,6 +2,6 @@
- #ifndef _ASM_KMAP_TYPES_H
- #define _ASM_KMAP_TYPES_H
-
--#define KM_TYPE_NR 17
-+#define KM_TYPE_NR 18
-
- #endif
-diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 836f147..4cf23f5 100644
---- a/arch/frv/mm/elf-fdpic.c
-+++ b/arch/frv/mm/elf-fdpic.c
-@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- {
- struct vm_area_struct *vma;
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(current->mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- goto success;
- }
-
-@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- info.high_limit = (current->mm->start_stack - 0x00200000);
- info.align_mask = 0;
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- goto success;
-diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
-index 69952c1..4fa2908 100644
---- a/arch/hexagon/include/asm/cache.h
-+++ b/arch/hexagon/include/asm/cache.h
-@@ -21,9 +21,11 @@
- #ifndef __ASM_CACHE_H
- #define __ASM_CACHE_H
-
-+#include <linux/const.h>
-+
- /* Bytes per L1 cache line */
--#define L1_CACHE_SHIFT (5)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-
-diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index 074e52b..76afdac 100644
---- a/arch/ia64/Kconfig
-+++ b/arch/ia64/Kconfig
-@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
- config KEXEC
- bool "kexec system call"
- depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
-index 970d0bd..e750b9b 100644
---- a/arch/ia64/Makefile
-+++ b/arch/ia64/Makefile
-@@ -98,5 +98,6 @@ endef
- archprepare: make_nr_irqs_h FORCE
- PHONY += make_nr_irqs_h FORCE
-
-+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
- make_nr_irqs_h: FORCE
- $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
-diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
-index 0bf0350..2ad1957 100644
---- a/arch/ia64/include/asm/atomic.h
-+++ b/arch/ia64/include/asm/atomic.h
-@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
- #define atomic64_inc(v) atomic64_add(1, (v))
- #define atomic64_dec(v) atomic64_sub(1, (v))
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* _ASM_IA64_ATOMIC_H */
-diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
-index f6769eb..1cdb590 100644
---- a/arch/ia64/include/asm/barrier.h
-+++ b/arch/ia64/include/asm/barrier.h
-@@ -66,7 +66,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
-index 988254a..e1ee885 100644
---- a/arch/ia64/include/asm/cache.h
-+++ b/arch/ia64/include/asm/cache.h
-@@ -1,6 +1,7 @@
- #ifndef _ASM_IA64_CACHE_H
- #define _ASM_IA64_CACHE_H
-
-+#include <linux/const.h>
-
- /*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
-@@ -9,7 +10,7 @@
-
- /* Bytes per L1 (data) cache line. */
- #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #ifdef CONFIG_SMP
- # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
-index 5a83c5c..4d7f553 100644
---- a/arch/ia64/include/asm/elf.h
-+++ b/arch/ia64/include/asm/elf.h
-@@ -42,6 +42,13 @@
- */
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#endif
-+
- #define PT_IA_64_UNWIND 0x70000001
-
- /* IA-64 relocations: */
-diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
-index 5767cdf..7462574 100644
---- a/arch/ia64/include/asm/pgalloc.h
-+++ b/arch/ia64/include/asm/pgalloc.h
-@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
- pgd_val(*pgd_entry) = __pa(pud);
- }
-
-+static inline void
-+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
-+{
-+ pgd_populate(mm, pgd_entry, pud);
-+}
-+
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
- return quicklist_alloc(0, GFP_KERNEL, NULL);
-@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
- pud_val(*pud_entry) = __pa(pmd);
- }
-
-+static inline void
-+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
-+{
-+ pud_populate(mm, pud_entry, pmd);
-+}
-+
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
- return quicklist_alloc(0, GFP_KERNEL, NULL);
-diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
-index 7935115..c0eca6a 100644
---- a/arch/ia64/include/asm/pgtable.h
-+++ b/arch/ia64/include/asm/pgtable.h
-@@ -12,7 +12,7 @@
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
--
-+#include <linux/const.h>
- #include <asm/mman.h>
- #include <asm/page.h>
- #include <asm/processor.h>
-@@ -142,6 +142,17 @@
- #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
-+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+#endif
-+
- #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
- #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
- #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
-diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
-index 45698cd..e8e2dbc 100644
---- a/arch/ia64/include/asm/spinlock.h
-+++ b/arch/ia64/include/asm/spinlock.h
-@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
- unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
-
- asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
-- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
-+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
- }
-
- static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
-index 103bedc..0210597 100644
---- a/arch/ia64/include/asm/uaccess.h
-+++ b/arch/ia64/include/asm/uaccess.h
-@@ -70,6 +70,7 @@
- && ((segment).seg == KERNEL_DS.seg \
- || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
- })
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
-
- /*
-@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
- static inline unsigned long
- __copy_to_user (void __user *to, const void *from, unsigned long count)
- {
-+ if (count > INT_MAX)
-+ return count;
-+
-+ if (!__builtin_constant_p(count))
-+ check_object_size(from, count, true);
-+
- return __copy_user(to, (__force void __user *) from, count);
- }
-
- static inline unsigned long
- __copy_from_user (void *to, const void __user *from, unsigned long count)
- {
-+ if (count > INT_MAX)
-+ return count;
-+
-+ if (!__builtin_constant_p(count))
-+ check_object_size(to, count, false);
-+
- return __copy_user((__force void __user *) to, from, count);
- }
-
-@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
- ({ \
- void __user *__cu_to = (to); \
- const void *__cu_from = (from); \
-- long __cu_len = (n); \
-+ unsigned long __cu_len = (n); \
- \
-- if (__access_ok(__cu_to, __cu_len, get_fs())) \
-+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
-+ if (!__builtin_constant_p(n)) \
-+ check_object_size(__cu_from, __cu_len, true); \
- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
-+ } \
- __cu_len; \
- })
-
-@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
- ({ \
- void *__cu_to = (to); \
- const void __user *__cu_from = (from); \
-- long __cu_len = (n); \
-+ unsigned long __cu_len = (n); \
- \
- __chk_user_ptr(__cu_from); \
-- if (__access_ok(__cu_from, __cu_len, get_fs())) \
-+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
-+ if (!__builtin_constant_p(n)) \
-+ check_object_size(__cu_to, __cu_len, false); \
- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
-+ } \
- __cu_len; \
- })
-
-diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
-index 29754aa..06d2838 100644
---- a/arch/ia64/kernel/module.c
-+++ b/arch/ia64/kernel/module.c
-@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
- }
-
- static inline int
-+in_init_rx (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
-+}
-+
-+static inline int
-+in_init_rw (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
-+}
-+
-+static inline int
- in_init (const struct module *mod, uint64_t addr)
- {
-- return addr - (uint64_t) mod->module_init < mod->init_size;
-+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
-+}
-+
-+static inline int
-+in_core_rx (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
-+}
-+
-+static inline int
-+in_core_rw (const struct module *mod, uint64_t addr)
-+{
-+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
- }
-
- static inline int
- in_core (const struct module *mod, uint64_t addr)
- {
-- return addr - (uint64_t) mod->module_core < mod->core_size;
-+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
- }
-
- static inline int
-@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
- break;
-
- case RV_BDREL:
-- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
-+ if (in_init_rx(mod, val))
-+ val -= (uint64_t) mod->module_init_rx;
-+ else if (in_init_rw(mod, val))
-+ val -= (uint64_t) mod->module_init_rw;
-+ else if (in_core_rx(mod, val))
-+ val -= (uint64_t) mod->module_core_rx;
-+ else if (in_core_rw(mod, val))
-+ val -= (uint64_t) mod->module_core_rw;
- break;
-
- case RV_LTV:
-@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
- * addresses have been selected...
- */
- uint64_t gp;
-- if (mod->core_size > MAX_LTOFF)
-+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
- /*
- * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
- * at the end of the module.
- */
-- gp = mod->core_size - MAX_LTOFF / 2;
-+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
- else
-- gp = mod->core_size / 2;
-- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
-+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
-+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
- mod->arch.gp = gp;
- DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
- }
-diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
-index c39c3cd..3c77738 100644
---- a/arch/ia64/kernel/palinfo.c
-+++ b/arch/ia64/kernel/palinfo.c
-@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata palinfo_cpu_notifier =
-+static struct notifier_block palinfo_cpu_notifier =
- {
- .notifier_call = palinfo_cpu_callback,
- .priority = 0,
-diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index 41e33f8..65180b2a 100644
---- a/arch/ia64/kernel/sys_ia64.c
-+++ b/arch/ia64/kernel/sys_ia64.c
-@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- unsigned long align_mask = 0;
- struct mm_struct *mm = current->mm;
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
-
- if (len > RGN_MAP_LIMIT)
- return -ENOMEM;
-@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- if (REGION_NUMBER(addr) == RGN_HPAGE)
- addr = 0;
- #endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ addr = mm->free_area_cache;
-+ else
-+#endif
-+
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
-
-@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
- info.high_limit = TASK_SIZE;
- info.align_mask = align_mask;
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- return vm_unmapped_area(&info);
- }
-
-diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
-index 84f8a52..7c76178 100644
---- a/arch/ia64/kernel/vmlinux.lds.S
-+++ b/arch/ia64/kernel/vmlinux.lds.S
-@@ -192,7 +192,7 @@ SECTIONS {
- /* Per-cpu data: */
- . = ALIGN(PERCPU_PAGE_SIZE);
- PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
-- __phys_per_cpu_start = __per_cpu_load;
-+ __phys_per_cpu_start = per_cpu_load;
- /*
- * ensure percpu data fits
- * into percpu page size
-diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
-index ba5ba7a..36e9d3a 100644
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
- return pte_present(pte);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- # define VM_READ_BIT 0
- # define VM_WRITE_BIT 1
- # define VM_EXEC_BIT 2
-@@ -151,8 +168,21 @@ retry:
- if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
- goto bad_area;
-
-- if ((vma->vm_flags & mask) != mask)
-+ if ((vma->vm_flags & mask) != mask) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
-+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
-+ goto bad_area;
-+
-+ up_read(&mm->mmap_sem);
-+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- goto bad_area;
-+ }
-
- /*
- * If for any reason at all we couldn't handle the fault, make
-diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 52b7604b..455cb85 100644
---- a/arch/ia64/mm/hugetlbpage.c
-+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
- unsigned long pgoff, unsigned long flags)
- {
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
-
- if (len > RGN_MAP_LIMIT)
- return -ENOMEM;
-@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
- info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
- info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- return vm_unmapped_area(&info);
- }
-
-diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index 6b33457..88b5124 100644
---- a/arch/ia64/mm/init.c
-+++ b/arch/ia64/mm/init.c
-@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
- vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
- vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
-+ vma->vm_flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
-+ vma->vm_flags &= ~VM_MAYEXEC;
-+#endif
-+
-+ }
-+#endif
-+
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(&current->mm->mmap_sem);
- if (insert_vm_struct(current->mm, vma)) {
-@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
- gate_vma.vm_start = FIXADDR_USER_START;
- gate_vma.vm_end = FIXADDR_USER_END;
- gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-- gate_vma.vm_page_prot = __P101;
-+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
-
- return 0;
- }
-diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
-index 40b3ee98..8c2c112 100644
---- a/arch/m32r/include/asm/cache.h
-+++ b/arch/m32r/include/asm/cache.h
-@@ -1,8 +1,10 @@
- #ifndef _ASM_M32R_CACHE_H
- #define _ASM_M32R_CACHE_H
-
-+#include <linux/const.h>
-+
- /* L1 cache line size */
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_M32R_CACHE_H */
-diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
-index 82abd15..d95ae5d 100644
---- a/arch/m32r/lib/usercopy.c
-+++ b/arch/m32r/lib/usercopy.c
-@@ -14,6 +14,9 @@
- unsigned long
- __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to,from,n);
-@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
- unsigned long
- __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to,from,n);
-diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
-index 0395c51..5f26031 100644
---- a/arch/m68k/include/asm/cache.h
-+++ b/arch/m68k/include/asm/cache.h
-@@ -4,9 +4,11 @@
- #ifndef __ARCH_M68K_CACHE_H
- #define __ARCH_M68K_CACHE_H
-
-+#include <linux/const.h>
-+
- /* bytes per L1 cache line */
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-
-diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
-index d703d8e..a8e2d70 100644
---- a/arch/metag/include/asm/barrier.h
-+++ b/arch/metag/include/asm/barrier.h
-@@ -90,7 +90,7 @@ static inline void fence(void)
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
-index 7ca80ac..794ba72 100644
---- a/arch/metag/mm/hugetlbpage.c
-+++ b/arch/metag/mm/hugetlbpage.c
-@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & HUGEPT_MASK;
- info.align_offset = 0;
-+ info.threadstack_offset = 0;
- return vm_unmapped_area(&info);
- }
-
-diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
-index 4efe96a..60e8699 100644
---- a/arch/microblaze/include/asm/cache.h
-+++ b/arch/microblaze/include/asm/cache.h
-@@ -13,11 +13,12 @@
- #ifndef _ASM_MICROBLAZE_CACHE_H
- #define _ASM_MICROBLAZE_CACHE_H
-
-+#include <linux/const.h>
- #include <asm/registers.h>
-
- #define L1_CACHE_SHIFT 5
- /* word-granular cache in microblaze */
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index 843713c..b6a87b9 100644
---- a/arch/mips/Kconfig
-+++ b/arch/mips/Kconfig
-@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
-
- config KEXEC
- bool "Kexec system call"
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
-index 3778655..1dff0a9 100644
---- a/arch/mips/cavium-octeon/dma-octeon.c
-+++ b/arch/mips/cavium-octeon/dma-octeon.c
-@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
- if (dma_release_from_coherent(dev, order, vaddr))
- return;
-
-- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
- }
-
- static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
-diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
-index 857da84..0fee5e2 100644
---- a/arch/mips/include/asm/atomic.h
-+++ b/arch/mips/include/asm/atomic.h
-@@ -22,15 +22,39 @@
- #include <asm/cmpxchg.h>
- #include <asm/war.h>
-
-+#ifdef CONFIG_GENERIC_ATOMIC64
-+#include <asm-generic/atomic64.h>
-+#endif
-+
- #define ATOMIC_INIT(i) { (i) }
-
-+#ifdef CONFIG_64BIT
-+#define _ASM_EXTABLE(from, to) \
-+" .section __ex_table,\"a\"\n" \
-+" .dword " #from ", " #to"\n" \
-+" .previous\n"
-+#else
-+#define _ASM_EXTABLE(from, to) \
-+" .section __ex_table,\"a\"\n" \
-+" .word " #from ", " #to"\n" \
-+" .previous\n"
-+#endif
-+
- /*
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
--#define atomic_read(v) ACCESS_ONCE((v)->counter)
-+static inline int atomic_read(const atomic_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
-+
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
-
- /*
- * atomic_set - set atomic variable
-@@ -39,47 +63,77 @@
- *
- * Atomically sets the value of @v to @i.
- */
--#define atomic_set(v, i) ((v)->counter = (i))
-+static inline void atomic_set(atomic_t *v, int i)
-+{
-+ v->counter = i;
-+}
-
--#define ATOMIC_OP(op, c_op, asm_op) \
--static __inline__ void atomic_##op(int i, atomic_t * v) \
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define __OVERFLOW_POST \
-+ " b 4f \n" \
-+ " .set noreorder \n" \
-+ "3: b 5f \n" \
-+ " move %0, %1 \n" \
-+ " .set reorder \n"
-+#define __OVERFLOW_EXTABLE \
-+ "3:\n" \
-+ _ASM_EXTABLE(2b, 3b)
-+#else
-+#define __OVERFLOW_POST
-+#define __OVERFLOW_EXTABLE
-+#endif
-+
-+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
-+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
- { \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- int temp; \
- \
- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- "1: ll %0, %1 # atomic_" #op " \n" \
-- " " #asm_op " %0, %2 \n" \
-+ " .set mips3 \n" \
-+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
-+ "2: " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- " beqzl %0, 1b \n" \
-+ extable \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
-- do { \
-- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- " ll %0, %1 # atomic_" #op "\n" \
-- " " #asm_op " %0, %2 \n" \
-- " sc %0, %1 \n" \
-- " .set mips0 \n" \
-- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
-- : "Ir" (i)); \
-- } while (unlikely(!temp)); \
-+ __asm__ __volatile__( \
-+ " .set mips3 \n" \
-+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
-+ "2: " #asm_op " %0, %2 \n" \
-+ " sc %0, %1 \n" \
-+ " beqz %0, 1b \n" \
-+ extable \
-+ " .set mips0 \n" \
-+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
-- v->counter c_op i; \
-+ __asm__ __volatile__( \
-+ "2: " #asm_op " %0, %1 \n" \
-+ extable \
-+ : "+r" (v->counter) : "Ir" (i)); \
- raw_local_irq_restore(flags); \
- } \
- }
-
--#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
--static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
-+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
-+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
-+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
- { \
- int result; \
- \
-@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
- int temp; \
- \
- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- "1: ll %1, %2 # atomic_" #op "_return \n" \
-- " " #asm_op " %0, %1, %3 \n" \
-+ " .set mips3 \n" \
-+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
-+ "2: " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- " beqzl %0, 1b \n" \
-- " " #asm_op " %0, %1, %3 \n" \
-+ post_op \
-+ extable \
-+ "4: " #asm_op " %0, %1, %3 \n" \
-+ "5: \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF12_ASM() (v->counter) \
-@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
- } else if (kernel_uses_llsc) { \
- int temp; \
- \
-- do { \
-- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- " ll %1, %2 # atomic_" #op "_return \n" \
-- " " #asm_op " %0, %1, %3 \n" \
-- " sc %0, %2 \n" \
-- " .set mips0 \n" \
-- : "=&r" (result), "=&r" (temp), \
-- "+" GCC_OFF12_ASM() (v->counter) \
-- : "Ir" (i)); \
-- } while (unlikely(!result)); \
-+ __asm__ __volatile__( \
-+ " .set mips3 \n" \
-+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
-+ "2: " #asm_op " %0, %1, %3 \n" \
-+ " sc %0, %2 \n" \
-+ post_op \
-+ extable \
-+ "4: " #asm_op " %0, %1, %3 \n" \
-+ "5: \n" \
-+ " .set mips0 \n" \
-+ : "=&r" (result), "=&r" (temp), \
-+ "+" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i)); \
- \
- result = temp; result c_op i; \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
-- result = v->counter; \
-- result c_op i; \
-- v->counter = result; \
-+ __asm__ __volatile__( \
-+ " lw %0, %1 \n" \
-+ "2: " #asm_op " %0, %1, %2 \n" \
-+ " sw %0, %1 \n" \
-+ "3: \n" \
-+ extable \
-+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i)); \
- raw_local_irq_restore(flags); \
- } \
- \
-@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
- return result; \
- }
-
--#define ATOMIC_OPS(op, c_op, asm_op) \
-- ATOMIC_OP(op, c_op, asm_op) \
-- ATOMIC_OP_RETURN(op, c_op, asm_op)
-+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
-+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-
--ATOMIC_OPS(add, +=, addu)
--ATOMIC_OPS(sub, -=, subu)
-+#define ATOMIC_OPS(op, asm_op) \
-+ ATOMIC_OP(op, asm_op) \
-+ ATOMIC_OP_RETURN(op, asm_op)
-+
-+ATOMIC_OPS(add, add)
-+ATOMIC_OPS(sub, sub)
-
- #undef ATOMIC_OPS
- #undef ATOMIC_OP_RETURN
-+#undef __ATOMIC_OP_RETURN
- #undef ATOMIC_OP
-+#undef __ATOMIC_OP
-
- /*
- * atomic_sub_if_positive - conditionally subtract integer from atomic variable
-@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
- * Atomically test @v and subtract @i if @v is greater or equal than @i.
- * The function returns the old value of @v minus @i.
- */
--static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
-+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
- {
- int result;
-
-@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
- return result;
- }
-
--#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
--#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
-+ int new)
-+{
-+ return cmpxchg(&(v->counter), old, new);
-+}
-+
-+static inline int atomic_xchg(atomic_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-+
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&(v->counter), new);
-+}
-
- /**
- * __atomic_add_unless - add unless the number is a given value
-@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
-
- #define atomic_dec_return(v) atomic_sub_return(1, (v))
- #define atomic_inc_return(v) atomic_add_return(1, (v))
-+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
-
- /*
- * atomic_sub_and_test - subtract value from variable and test result
-@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- * other cases.
- */
- #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v) == 0;
-+}
-
- /*
- * atomic_dec_and_test - decrement by 1 and test
-@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- * Atomically increments @v by 1.
- */
- #define atomic_inc(v) atomic_add(1, (v))
-+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
-+}
-
- /*
- * atomic_dec - decrement and test
-@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- * Atomically decrements @v by 1.
- */
- #define atomic_dec(v) atomic_sub(1, (v))
-+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_sub_unchecked(1, v);
-+}
-
- /*
- * atomic_add_negative - add and test if negative
-@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- * @v: pointer of type atomic64_t
- *
- */
--#define atomic64_read(v) ACCESS_ONCE((v)->counter)
-+static inline long atomic64_read(const atomic64_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
-+
-+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
-
- /*
- * atomic64_set - set atomic variable
- * @v: pointer of type atomic64_t
- * @i: required value
- */
--#define atomic64_set(v, i) ((v)->counter = (i))
-+static inline void atomic64_set(atomic64_t *v, long i)
-+{
-+ v->counter = i;
-+}
-
--#define ATOMIC64_OP(op, c_op, asm_op) \
--static __inline__ void atomic64_##op(long i, atomic64_t * v) \
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ v->counter = i;
-+}
-+
-+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
-+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
- { \
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
- long temp; \
- \
- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- "1: lld %0, %1 # atomic64_" #op " \n" \
-- " " #asm_op " %0, %2 \n" \
-+ " .set mips3 \n" \
-+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
-+ "2: " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- " beqzl %0, 1b \n" \
-+ extable \
- " .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
- : "Ir" (i)); \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
-- do { \
-- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- " lld %0, %1 # atomic64_" #op "\n" \
-- " " #asm_op " %0, %2 \n" \
-- " scd %0, %1 \n" \
-- " .set mips0 \n" \
-- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
-- : "Ir" (i)); \
-- } while (unlikely(!temp)); \
-+ __asm__ __volatile__( \
-+ " .set mips3 \n" \
-+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
-+ "2: " #asm_op " %0, %2 \n" \
-+ " scd %0, %1 \n" \
-+ " beqz %0, 1b \n" \
-+ extable \
-+ " .set mips0 \n" \
-+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
-- v->counter c_op i; \
-+ __asm__ __volatile__( \
-+ "2: " #asm_op " %0, %1 \n" \
-+ extable \
-+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
- raw_local_irq_restore(flags); \
- } \
- }
-
--#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
--static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
-+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
-+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
-+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
- { \
- long result; \
- \
-@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
- long temp; \
- \
- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-+ " .set mips3 \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
-- " " #asm_op " %0, %1, %3 \n" \
-+ "2: " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- " beqzl %0, 1b \n" \
-- " " #asm_op " %0, %1, %3 \n" \
-+ post_op \
-+ extable \
-+ "4: " #asm_op " %0, %1, %3 \n" \
-+ "5: \n" \
- " .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF12_ASM() (v->counter) \
-@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
- } else if (kernel_uses_llsc) { \
- long temp; \
- \
-- do { \
-- __asm__ __volatile__( \
-- " .set arch=r4000 \n" \
-- " lld %1, %2 # atomic64_" #op "_return\n" \
-- " " #asm_op " %0, %1, %3 \n" \
-- " scd %0, %2 \n" \
-- " .set mips0 \n" \
-- : "=&r" (result), "=&r" (temp), \
-- "=" GCC_OFF12_ASM() (v->counter) \
-- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
-- : "memory"); \
-- } while (unlikely(!result)); \
-+ __asm__ __volatile__( \
-+ " .set mips3 \n" \
-+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
-+ "2: " #asm_op " %0, %1, %3 \n" \
-+ " scd %0, %2 \n" \
-+ " beqz %0, 1b \n" \
-+ post_op \
-+ extable \
-+ "4: " #asm_op " %0, %1, %3 \n" \
-+ "5: \n" \
-+ " .set mips0 \n" \
-+ : "=&r" (result), "=&r" (temp), \
-+ "=" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
-+ : "memory"); \
- \
- result = temp; result c_op i; \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
-- result = v->counter; \
-- result c_op i; \
-- v->counter = result; \
-+ __asm__ __volatile__( \
-+ " ld %0, %1 \n" \
-+ "2: " #asm_op " %0, %1, %2 \n" \
-+ " sd %0, %1 \n" \
-+ "3: \n" \
-+ extable \
-+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
-+ : "Ir" (i)); \
- raw_local_irq_restore(flags); \
- } \
- \
-@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
- return result; \
- }
-
--#define ATOMIC64_OPS(op, c_op, asm_op) \
-- ATOMIC64_OP(op, c_op, asm_op) \
-- ATOMIC64_OP_RETURN(op, c_op, asm_op)
-+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
-+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-
--ATOMIC64_OPS(add, +=, daddu)
--ATOMIC64_OPS(sub, -=, dsubu)
-+#define ATOMIC64_OPS(op, asm_op) \
-+ ATOMIC64_OP(op, asm_op) \
-+ ATOMIC64_OP_RETURN(op, asm_op)
-+
-+ATOMIC64_OPS(add, dadd)
-+ATOMIC64_OPS(sub, dsub)
-
- #undef ATOMIC64_OPS
- #undef ATOMIC64_OP_RETURN
-+#undef __ATOMIC64_OP_RETURN
- #undef ATOMIC64_OP
-+#undef __ATOMIC64_OP
-+#undef __OVERFLOW_EXTABLE
-+#undef __OVERFLOW_POST
-
- /*
- * atomic64_sub_if_positive - conditionally subtract integer from atomic
-@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
- * Atomically test @v and subtract @i if @v is greater or equal than @i.
- * The function returns the old value of @v minus @i.
- */
--static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
-+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
- {
- long result;
-
-@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
- return result;
- }
-
--#define atomic64_cmpxchg(v, o, n) \
-- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
--#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
-+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
-+ long new)
-+{
-+ return cmpxchg(&(v->counter), old, new);
-+}
-+
-+static inline long atomic64_xchg(atomic64_t *v, long new)
-+{
-+ return xchg(&v->counter, new);
-+}
-+
-+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
-+{
-+ return xchg(&(v->counter), new);
-+}
-
- /**
- * atomic64_add_unless - add unless the number is a given value
-@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-
- #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
- #define atomic64_inc_return(v) atomic64_add_return(1, (v))
-+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
-
- /*
- * atomic64_sub_and_test - subtract value from variable and test result
-@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
- * other cases.
- */
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
-
- /*
- * atomic64_dec_and_test - decrement by 1 and test
-@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
- * Atomically increments @v by 1.
- */
- #define atomic64_inc(v) atomic64_add(1, (v))
-+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
-
- /*
- * atomic64_dec - decrement and test
-@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
- * Atomically decrements @v by 1.
- */
- #define atomic64_dec(v) atomic64_sub(1, (v))
-+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
-
- /*
- * atomic64_add_negative - add and test if negative
-diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
-index 2b8bbbc..4556df6 100644
---- a/arch/mips/include/asm/barrier.h
-+++ b/arch/mips/include/asm/barrier.h
-@@ -133,7 +133,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
-index b4db69f..8f3b093 100644
---- a/arch/mips/include/asm/cache.h
-+++ b/arch/mips/include/asm/cache.h
-@@ -9,10 +9,11 @@
- #ifndef _ASM_CACHE_H
- #define _ASM_CACHE_H
-
-+#include <linux/const.h>
- #include <kmalloc.h>
-
- #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index eb4d95d..f2f7f93 100644
---- a/arch/mips/include/asm/elf.h
-+++ b/arch/mips/include/asm/elf.h
-@@ -405,15 +405,18 @@ extern const char *__elf_platform;
- #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
- #endif
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- struct linux_binprm;
- extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp);
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- struct arch_elf_state {
- int fp_abi;
- int interp_fp_abi;
-diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
-index c1f6afa..38cc6e9 100644
---- a/arch/mips/include/asm/exec.h
-+++ b/arch/mips/include/asm/exec.h
-@@ -12,6 +12,6 @@
- #ifndef _ASM_EXEC_H
- #define _ASM_EXEC_H
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- #endif /* _ASM_EXEC_H */
-diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
-index 9e8ef59..1139d6b 100644
---- a/arch/mips/include/asm/hw_irq.h
-+++ b/arch/mips/include/asm/hw_irq.h
-@@ -10,7 +10,7 @@
-
- #include <linux/atomic.h>
-
--extern atomic_t irq_err_count;
-+extern atomic_unchecked_t irq_err_count;
-
- /*
- * interrupt-retrigger: NOP for now. This may not be appropriate for all
-diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
-index 46dfc3c..a16b13a 100644
---- a/arch/mips/include/asm/local.h
-+++ b/arch/mips/include/asm/local.h
-@@ -12,15 +12,25 @@ typedef struct
- atomic_long_t a;
- } local_t;
-
-+typedef struct {
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l, i) atomic_long_set(&(l)->a, (i))
-+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
-
- #define local_add(i, l) atomic_long_add((i), (&(l)->a))
-+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
- #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
-+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
- #define local_inc(l) atomic_long_inc(&(l)->a)
-+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
- #define local_dec(l) atomic_long_dec(&(l)->a)
-+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
-
- /*
- * Same as above, but return the result value
-@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
- return result;
- }
-
-+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
-+{
-+ unsigned long result;
-+
-+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
-+ unsigned long temp;
-+
-+ __asm__ __volatile__(
-+ " .set mips3 \n"
-+ "1:" __LL "%1, %2 # local_add_return \n"
-+ " addu %0, %1, %3 \n"
-+ __SC "%0, %2 \n"
-+ " beqzl %0, 1b \n"
-+ " addu %0, %1, %3 \n"
-+ " .set mips0 \n"
-+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
-+ : "Ir" (i), "m" (l->a.counter)
-+ : "memory");
-+ } else if (kernel_uses_llsc) {
-+ unsigned long temp;
-+
-+ __asm__ __volatile__(
-+ " .set mips3 \n"
-+ "1:" __LL "%1, %2 # local_add_return \n"
-+ " addu %0, %1, %3 \n"
-+ __SC "%0, %2 \n"
-+ " beqz %0, 1b \n"
-+ " addu %0, %1, %3 \n"
-+ " .set mips0 \n"
-+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
-+ : "Ir" (i), "m" (l->a.counter)
-+ : "memory");
-+ } else {
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ result = l->a.counter;
-+ result += i;
-+ l->a.counter = result;
-+ local_irq_restore(flags);
-+ }
-+
-+ return result;
-+}
-+
- static __inline__ long local_sub_return(long i, local_t * l)
- {
- unsigned long result;
-@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
-
- #define local_cmpxchg(l, o, n) \
- ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
-+#define local_cmpxchg_unchecked(l, o, n) \
-+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
- #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
-
- /**
-diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
-index 154b70a..426ae3d 100644
---- a/arch/mips/include/asm/page.h
-+++ b/arch/mips/include/asm/page.h
-@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
- #ifdef CONFIG_CPU_MIPS32
- typedef struct { unsigned long pte_low, pte_high; } pte_t;
- #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
- #else
- typedef struct { unsigned long long pte; } pte_t;
- #define pte_val(x) ((x).pte)
-diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
-index b336037..5b874cc 100644
---- a/arch/mips/include/asm/pgalloc.h
-+++ b/arch/mips/include/asm/pgalloc.h
-@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- {
- set_pud(pud, __pud((unsigned long)pmd));
- }
-+
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
- #endif
-
- /*
-diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 845016d..3303268 100644
---- a/arch/mips/include/asm/pgtable.h
-+++ b/arch/mips/include/asm/pgtable.h
-@@ -20,6 +20,9 @@
- #include <asm/io.h>
- #include <asm/pgtable-bits.h>
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- struct mm_struct;
- struct vm_area_struct;
-
-diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index e4440f9..8fb0005 100644
---- a/arch/mips/include/asm/thread_info.h
-+++ b/arch/mips/include/asm/thread_info.h
-@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_SECCOMP 4 /* secure computing */
- #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
- #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
-+/* li takes a 32bit immediate */
-+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
-+
- #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
- #define TIF_NOHZ 19 /* in adaptive nohz mode */
-@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_USEDMSA (1<<TIF_USEDMSA)
- #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
-
- #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
- _TIF_SYSCALL_AUDIT | \
-- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
-+ _TIF_GRSEC_SETXID)
-
- /* work to do in syscall_trace_leave() */
- #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
-- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
-+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
-
- /* work to do on interrupt/exception return */
- #define _TIF_WORK_MASK \
-@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
- /* work to do on any return to u-space */
- #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
- _TIF_WORK_SYSCALL_EXIT | \
-- _TIF_SYSCALL_TRACEPOINT)
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
-
- /*
- * We stash processor id into a COP0 register to retrieve it fast
-diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
-index bf8b324..cec5705 100644
---- a/arch/mips/include/asm/uaccess.h
-+++ b/arch/mips/include/asm/uaccess.h
-@@ -130,6 +130,7 @@ extern u64 __ua_limit;
- __ok == 0; \
- })
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- likely(__access_ok((addr), (size), __access_mask))
-
-diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
-index 1188e00..41cf144 100644
---- a/arch/mips/kernel/binfmt_elfn32.c
-+++ b/arch/mips/kernel/binfmt_elfn32.c
-@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
- #include <linux/module.h>
- #include <linux/elfcore.h>
-diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
-index 9287678..f870e47 100644
---- a/arch/mips/kernel/binfmt_elfo32.c
-+++ b/arch/mips/kernel/binfmt_elfo32.c
-@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
-
- #include <linux/module.h>
-diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
-index a74ec3a..4f06f18 100644
---- a/arch/mips/kernel/i8259.c
-+++ b/arch/mips/kernel/i8259.c
-@@ -202,7 +202,7 @@ spurious_8259A_irq:
- printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
-diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
-index 44a1f79..2bd6aa3 100644
---- a/arch/mips/kernel/irq-gt641xx.c
-+++ b/arch/mips/kernel/irq-gt641xx.c
-@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
- }
- }
-
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- }
-
- void __init gt641xx_irq_init(void)
-diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
-index d2bfbc2..a8eacd2 100644
---- a/arch/mips/kernel/irq.c
-+++ b/arch/mips/kernel/irq.c
-@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
- printk("unexpected IRQ # %d\n", irq);
- }
-
--atomic_t irq_err_count;
-+atomic_unchecked_t irq_err_count;
-
- int arch_show_interrupts(struct seq_file *p, int prec)
- {
-- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
-+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
- return 0;
- }
-
- asmlinkage void spurious_interrupt(void)
- {
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- }
-
- void __init init_IRQ(void)
-@@ -109,7 +109,10 @@ void __init init_IRQ(void)
- #endif
- }
-
-+
- #ifdef DEBUG_STACKOVERFLOW
-+extern void gr_handle_kernel_exploit(void);
-+
- static inline void check_stack_overflow(void)
- {
- unsigned long sp;
-@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
-+ gr_handle_kernel_exploit();
- }
- }
- #else
-diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
-index 0614717..002fa43 100644
---- a/arch/mips/kernel/pm-cps.c
-+++ b/arch/mips/kernel/pm-cps.c
-@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
- nc_core_ready_count = nc_addr;
-
- /* Ensure ready_count is zero-initialised before the assembly runs */
-- ACCESS_ONCE(*nc_core_ready_count) = 0;
-+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
- coupled_barrier(&per_cpu(pm_barrier, core), online);
-
- /* Run the generated entry code */
-diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
-index 85bff5d..39bc202 100644
---- a/arch/mips/kernel/process.c
-+++ b/arch/mips/kernel/process.c
-@@ -534,18 +534,6 @@ out:
- return pc;
- }
-
--/*
-- * Don't forget that the stack pointer must be aligned on a 8 bytes
-- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
-- */
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
--
-- return sp & ALMASK;
--}
--
- static void arch_dump_stack(void *info)
- {
- struct pt_regs *regs;
-diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
-index 5104528..950bbdc 100644
---- a/arch/mips/kernel/ptrace.c
-+++ b/arch/mips/kernel/ptrace.c
-@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * Notification of system call entry/exit
- * - triggered by current->work.syscall_trace
-@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
- tracehook_report_syscall_entry(regs))
- ret = -1;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->regs[2]);
-
-diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
-index 07fc524..b9d7f28 100644
---- a/arch/mips/kernel/reset.c
-+++ b/arch/mips/kernel/reset.c
-@@ -13,6 +13,7 @@
- #include <linux/reboot.h>
-
- #include <asm/reboot.h>
-+#include <asm/bug.h>
-
- /*
- * Urgs ... Too many MIPS machines to handle this in a generic way.
-@@ -29,16 +30,19 @@ void machine_restart(char *command)
- {
- if (_machine_restart)
- _machine_restart(command);
-+ BUG();
- }
-
- void machine_halt(void)
- {
- if (_machine_halt)
- _machine_halt();
-+ BUG();
- }
-
- void machine_power_off(void)
- {
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
-diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
-index 2242bdd..b284048 100644
---- a/arch/mips/kernel/sync-r4k.c
-+++ b/arch/mips/kernel/sync-r4k.c
-@@ -18,8 +18,8 @@
- #include <asm/mipsregs.h>
-
- static atomic_t count_start_flag = ATOMIC_INIT(0);
--static atomic_t count_count_start = ATOMIC_INIT(0);
--static atomic_t count_count_stop = ATOMIC_INIT(0);
-+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
-+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
- static atomic_t count_reference = ATOMIC_INIT(0);
-
- #define COUNTON 100
-@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
-
- for (i = 0; i < NR_LOOPS; i++) {
- /* slaves loop on '!= 2' */
-- while (atomic_read(&count_count_start) != 1)
-+ while (atomic_read_unchecked(&count_count_start) != 1)
- mb();
-- atomic_set(&count_count_stop, 0);
-+ atomic_set_unchecked(&count_count_stop, 0);
- smp_wmb();
-
- /* this lets the slaves write their count register */
-- atomic_inc(&count_count_start);
-+ atomic_inc_unchecked(&count_count_start);
-
- /*
- * Everyone initialises count in the last loop:
-@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
- /*
- * Wait for all slaves to leave the synchronization point:
- */
-- while (atomic_read(&count_count_stop) != 1)
-+ while (atomic_read_unchecked(&count_count_stop) != 1)
- mb();
-- atomic_set(&count_count_start, 0);
-+ atomic_set_unchecked(&count_count_start, 0);
- smp_wmb();
-- atomic_inc(&count_count_stop);
-+ atomic_inc_unchecked(&count_count_stop);
- }
- /* Arrange for an interrupt in a short while */
- write_c0_compare(read_c0_count() + COUNTON);
-@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
- initcount = atomic_read(&count_reference);
-
- for (i = 0; i < NR_LOOPS; i++) {
-- atomic_inc(&count_count_start);
-- while (atomic_read(&count_count_start) != 2)
-+ atomic_inc_unchecked(&count_count_start);
-+ while (atomic_read_unchecked(&count_count_start) != 2)
- mb();
-
- /*
-@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
- if (i == NR_LOOPS-1)
- write_c0_count(initcount);
-
-- atomic_inc(&count_count_stop);
-- while (atomic_read(&count_count_stop) != 2)
-+ atomic_inc_unchecked(&count_count_stop);
-+ while (atomic_read_unchecked(&count_count_stop) != 2)
- mb();
- }
- /* Arrange for an interrupt in a short while */
-diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index c3b41e2..46c32e9 100644
---- a/arch/mips/kernel/traps.c
-+++ b/arch/mips/kernel/traps.c
-@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
- siginfo_t info;
-
- prev_state = exception_enter();
-- die_if_kernel("Integer overflow", regs);
-+ if (unlikely(!user_mode(regs))) {
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (fixup_exception(regs)) {
-+ pax_report_refcount_overflow(regs);
-+ exception_exit(prev_state);
-+ return;
-+ }
-+#endif
-+
-+ die("Integer overflow", regs);
-+ }
-
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
-diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index 270bbd4..c01932a 100644
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
- return r;
- }
-
--int kvm_arch_init(void *opaque)
-+int kvm_arch_init(const void *opaque)
- {
- if (kvm_mips_callbacks) {
- kvm_err("kvm: module already exists\n");
-diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index 70ab5d6..62940fe 100644
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -28,6 +28,23 @@
- #include <asm/highmem.h> /* For VMALLOC_END */
- #include <linux/kdebug.h>
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
-@@ -201,6 +218,14 @@ bad_area:
- bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
-+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- tsk->thread.cp0_badvaddr = address;
- tsk->thread.error_code = write;
- #if 0
-diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index f1baadd..5472dca 100644
---- a/arch/mips/mm/mmap.c
-+++ b/arch/mips/mm/mmap.c
-@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- struct vm_area_struct *vma;
- unsigned long addr = addr0;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- if (unlikely(len > TASK_SIZE))
-@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- do_color_align = 1;
-
- /* requesting a specific address */
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
- info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-+ info.threadstack_offset = offset;
-
- if (dir == DOWN) {
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- {
- unsigned long random_factor = 0UL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
-@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
-
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-
--static inline unsigned long brk_rnd(void)
--{
-- unsigned long rnd = get_random_int();
--
-- rnd = rnd << PAGE_SHIFT;
-- /* 8MB for 32bit, 256MB for 64bit */
-- if (TASK_IS_32BIT_ADDR)
-- rnd = rnd & 0x7ffffful;
-- else
-- rnd = rnd & 0xffffffful;
--
-- return rnd;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long base = mm->brk;
-- unsigned long ret;
--
-- ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (ret < mm->brk)
-- return mm->brk;
--
-- return ret;
--}
--
- int __virt_addr_valid(const volatile void *kaddr)
- {
- return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
-diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
-index d07e041..bedb72b 100644
---- a/arch/mips/pci/pci-octeon.c
-+++ b/arch/mips/pci/pci-octeon.c
-@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
-
-
- static struct pci_ops octeon_pci_ops = {
-- octeon_read_config,
-- octeon_write_config,
-+ .read = octeon_read_config,
-+ .write = octeon_write_config,
- };
-
- static struct resource octeon_pci_mem_resource = {
-diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
-index 5e36c33..eb4a17b 100644
---- a/arch/mips/pci/pcie-octeon.c
-+++ b/arch/mips/pci/pcie-octeon.c
-@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
- }
-
- static struct pci_ops octeon_pcie0_ops = {
-- octeon_pcie0_read_config,
-- octeon_pcie0_write_config,
-+ .read = octeon_pcie0_read_config,
-+ .write = octeon_pcie0_write_config,
- };
-
- static struct resource octeon_pcie0_mem_resource = {
-@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
- };
-
- static struct pci_ops octeon_pcie1_ops = {
-- octeon_pcie1_read_config,
-- octeon_pcie1_write_config,
-+ .read = octeon_pcie1_read_config,
-+ .write = octeon_pcie1_write_config,
- };
-
- static struct resource octeon_pcie1_mem_resource = {
-@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
- };
-
- static struct pci_ops octeon_dummy_ops = {
-- octeon_dummy_read_config,
-- octeon_dummy_write_config,
-+ .read = octeon_dummy_read_config,
-+ .write = octeon_dummy_write_config,
- };
-
- static struct resource octeon_dummy_mem_resource = {
-diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
-index a2358b4..7cead4f 100644
---- a/arch/mips/sgi-ip27/ip27-nmi.c
-+++ b/arch/mips/sgi-ip27/ip27-nmi.c
-@@ -187,9 +187,9 @@ void
- cont_nmi_dump(void)
- {
- #ifndef REAL_NMI_SIGNAL
-- static atomic_t nmied_cpus = ATOMIC_INIT(0);
-+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
-
-- atomic_inc(&nmied_cpus);
-+ atomic_inc_unchecked(&nmied_cpus);
- #endif
- /*
- * Only allow 1 cpu to proceed
-@@ -233,7 +233,7 @@ cont_nmi_dump(void)
- udelay(10000);
- }
- #else
-- while (atomic_read(&nmied_cpus) != num_online_cpus());
-+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
- #endif
-
- /*
-diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
-index a046b30..6799527 100644
---- a/arch/mips/sni/rm200.c
-+++ b/arch/mips/sni/rm200.c
-@@ -270,7 +270,7 @@ spurious_8259A_irq:
- "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
-diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
-index 41e873b..34d33a7 100644
---- a/arch/mips/vr41xx/common/icu.c
-+++ b/arch/mips/vr41xx/common/icu.c
-@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
-
- printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
-
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
-
- return -1;
- }
-diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
-index ae0e4ee..e8f0692 100644
---- a/arch/mips/vr41xx/common/irq.c
-+++ b/arch/mips/vr41xx/common/irq.c
-@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
- irq_cascade_t *cascade;
-
- if (irq >= NR_IRQS) {
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- return;
- }
-
-@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
- ret = cascade->get_irq(irq);
- irq = ret;
- if (ret < 0)
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- else
- irq_dispatch(irq);
- if (!irqd_irq_disabled(idata) && chip->irq_unmask)
-diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
-index 967d144..db12197 100644
---- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
-+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
-@@ -11,12 +11,14 @@
- #ifndef _ASM_PROC_CACHE_H
- #define _ASM_PROC_CACHE_H
-
-+#include <linux/const.h>
-+
- /* L1 cache */
-
- #define L1_CACHE_NWAYS 4 /* number of ways in caches */
- #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
--#define L1_CACHE_BYTES 16 /* bytes per entry */
- #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
- #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
-
- #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
-diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-index bcb5df2..84fabd2 100644
---- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
-@@ -16,13 +16,15 @@
- #ifndef _ASM_PROC_CACHE_H
- #define _ASM_PROC_CACHE_H
-
-+#include <linux/const.h>
-+
- /*
- * L1 cache
- */
- #define L1_CACHE_NWAYS 4 /* number of ways in caches */
- #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
--#define L1_CACHE_BYTES 32 /* bytes per entry */
- #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
- #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
-
- #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
-diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
-index 4ce7a01..449202a 100644
---- a/arch/openrisc/include/asm/cache.h
-+++ b/arch/openrisc/include/asm/cache.h
-@@ -19,11 +19,13 @@
- #ifndef __ASM_OPENRISC_CACHE_H
- #define __ASM_OPENRISC_CACHE_H
-
-+#include <linux/const.h>
-+
- /* FIXME: How can we replace these with values from the CPU...
- * they shouldn't be hard-coded!
- */
-
--#define L1_CACHE_BYTES 16
- #define L1_CACHE_SHIFT 4
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* __ASM_OPENRISC_CACHE_H */
-diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
-index 226f8ca..9d9b87d 100644
---- a/arch/parisc/include/asm/atomic.h
-+++ b/arch/parisc/include/asm/atomic.h
-@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
- return dec;
- }
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* !CONFIG_64BIT */
-
-
-diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
-index 47f11c7..3420df2 100644
---- a/arch/parisc/include/asm/cache.h
-+++ b/arch/parisc/include/asm/cache.h
-@@ -5,6 +5,7 @@
- #ifndef __ARCH_PARISC_CACHE_H
- #define __ARCH_PARISC_CACHE_H
-
-+#include <linux/const.h>
-
- /*
- * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
-@@ -15,13 +16,13 @@
- * just ruin performance.
- */
- #ifdef CONFIG_PA20
--#define L1_CACHE_BYTES 64
- #define L1_CACHE_SHIFT 6
- #else
--#define L1_CACHE_BYTES 32
- #define L1_CACHE_SHIFT 5
- #endif
-
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-+
- #ifndef __ASSEMBLY__
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
-index 3391d06..c23a2cc 100644
---- a/arch/parisc/include/asm/elf.h
-+++ b/arch/parisc/include/asm/elf.h
-@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN 16
-+#define PAX_DELTA_STACK_LEN 16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
- but it's not easy, and we've already done it here. */
-diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
-index f213f5b..0af3e8e 100644
---- a/arch/parisc/include/asm/pgalloc.h
-+++ b/arch/parisc/include/asm/pgalloc.h
-@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
- (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
- }
-
-+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
-+{
-+ pgd_populate(mm, pgd, pmd);
-+}
-+
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
- {
- pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
- #define pmd_free(mm, x) do { } while (0)
- #define pgd_populate(mm, pmd, pte) BUG()
-+#define pgd_populate_kernel(mm, pmd, pte) BUG()
-
- #endif
-
-diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
-index 22b89d1..ce34230 100644
---- a/arch/parisc/include/asm/pgtable.h
-+++ b/arch/parisc/include/asm/pgtable.h
-@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
- #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
- #define PAGE_COPY PAGE_EXECREAD
- #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
-+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+#else
-+# define PAGE_SHARED_NOEXEC PAGE_SHARED
-+# define PAGE_COPY_NOEXEC PAGE_COPY
-+# define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
- #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
- #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
-diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
-index a5cb070..8604ddc 100644
---- a/arch/parisc/include/asm/uaccess.h
-+++ b/arch/parisc/include/asm/uaccess.h
-@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
- int ret = -EFAULT;
-
-- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
-+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
- ret = __copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
-index 5822e8e..bc5e638 100644
---- a/arch/parisc/kernel/module.c
-+++ b/arch/parisc/kernel/module.c
-@@ -98,16 +98,38 @@
-
- /* three functions to determine where in the module core
- * or init pieces the location is */
-+static inline int in_init_rx(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_init_rx &&
-+ loc < (me->module_init_rx + me->init_size_rx));
-+}
-+
-+static inline int in_init_rw(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_init_rw &&
-+ loc < (me->module_init_rw + me->init_size_rw));
-+}
-+
- static inline int in_init(struct module *me, void *loc)
- {
-- return (loc >= me->module_init &&
-- loc <= (me->module_init + me->init_size));
-+ return in_init_rx(me, loc) || in_init_rw(me, loc);
-+}
-+
-+static inline int in_core_rx(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_core_rx &&
-+ loc < (me->module_core_rx + me->core_size_rx));
-+}
-+
-+static inline int in_core_rw(struct module *me, void *loc)
-+{
-+ return (loc >= me->module_core_rw &&
-+ loc < (me->module_core_rw + me->core_size_rw));
- }
-
- static inline int in_core(struct module *me, void *loc)
- {
-- return (loc >= me->module_core &&
-- loc <= (me->module_core + me->core_size));
-+ return in_core_rx(me, loc) || in_core_rw(me, loc);
- }
-
- static inline int in_local(struct module *me, void *loc)
-@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
- }
-
- /* align things a bit */
-- me->core_size = ALIGN(me->core_size, 16);
-- me->arch.got_offset = me->core_size;
-- me->core_size += gots * sizeof(struct got_entry);
-+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+ me->arch.got_offset = me->core_size_rw;
-+ me->core_size_rw += gots * sizeof(struct got_entry);
-
-- me->core_size = ALIGN(me->core_size, 16);
-- me->arch.fdesc_offset = me->core_size;
-- me->core_size += fdescs * sizeof(Elf_Fdesc);
-+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+ me->arch.fdesc_offset = me->core_size_rw;
-+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
-
- me->arch.got_max = gots;
- me->arch.fdesc_max = fdescs;
-@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
-
- BUG_ON(value == 0);
-
-- got = me->module_core + me->arch.got_offset;
-+ got = me->module_core_rw + me->arch.got_offset;
- for (i = 0; got[i].addr; i++)
- if (got[i].addr == value)
- goto out;
-@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
- #ifdef CONFIG_64BIT
- static Elf_Addr get_fdesc(struct module *me, unsigned long value)
- {
-- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
-+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
-
- if (!value) {
- printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
-@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
-
- /* Create new one */
- fdesc->addr = value;
-- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
- return (Elf_Addr)fdesc;
- }
- #endif /* CONFIG_64BIT */
-@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
-
- table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
- end = table + sechdrs[me->arch.unwind_section].sh_size;
-- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
-
- DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
- me->arch.unwind_section, table, end, gp);
-diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index e1ffea2..46ed66e 100644
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long task_size = TASK_SIZE;
- int do_color_align, last_mmap;
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- if (len > task_size)
- return -ENOMEM;
-@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- goto found_addr;
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
-@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- info.high_limit = mmap_upper_limit();
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- found_addr:
-@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- unsigned long addr = addr0;
- int do_color_align, last_mmap;
- struct vm_unmapped_area_info info;
-+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
-
- #ifdef CONFIG_64BIT
- /* This should only ever run for 32-bit processes. */
-@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- }
-
- /* requesting a specific address */
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
-@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- info.high_limit = mm->mmap_base;
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- goto found_addr;
-@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_upper_limit();
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
-+ mm->mmap_legacy_base += mm->delta_mmap;
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+ }
-+#endif
-+
- if (mmap_is_legacy()) {
- mm->mmap_base = mm->mmap_legacy_base;
- mm->get_unmapped_area = arch_get_unmapped_area;
-diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 47ee620..1107387 100644
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm,regs->iaoq[0]);
-- if (vma && (regs->iaoq[0] >= vma->vm_start)
-- && (vma->vm_flags & VM_EXEC)) {
--
-+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
- fault_address = regs->iaoq[0];
- fault_space = regs->iasq[0];
-
-diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index e5120e6..8ddb5cc 100644
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -15,6 +15,7 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/unistd.h>
-
- #include <asm/uaccess.h>
- #include <asm/traps.h>
-@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
- static unsigned long
- parisc_acctyp(unsigned long code, unsigned int inst)
- {
-- if (code == 6 || code == 16)
-+ if (code == 6 || code == 7 || code == 16)
- return VM_EXEC;
-
- switch (inst & 0xf0000000) {
-@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
- }
- #endif
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when rt_sigreturn trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: unpatched PLT emulation */
-+ unsigned int bl, depwi;
-+
-+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
-+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
-+
-+ if (err)
-+ break;
-+
-+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
-+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
-+
-+ err = get_user(ldw, (unsigned int *)addr);
-+ err |= get_user(bv, (unsigned int *)(addr+4));
-+ err |= get_user(ldw2, (unsigned int *)(addr+8));
-+
-+ if (err)
-+ break;
-+
-+ if (ldw == 0x0E801096U &&
-+ bv == 0xEAC0C000U &&
-+ ldw2 == 0x0E881095U)
-+ {
-+ unsigned int resolver, map;
-+
-+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
-+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
-+ if (err)
-+ break;
-+
-+ regs->gr[20] = instruction_pointer(regs)+8;
-+ regs->gr[21] = map;
-+ regs->gr[22] = resolver;
-+ regs->iaoq[0] = resolver | 3UL;
-+ regs->iaoq[1] = regs->iaoq[0] + 4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+
-+#ifndef CONFIG_PAX_EMUSIGRT
-+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+ return 1;
-+#endif
-+
-+ do { /* PaX: rt_sigreturn emulation */
-+ unsigned int ldi1, ldi2, bel, nop;
-+
-+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
-+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
-+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
-+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
-+
-+ if (err)
-+ break;
-+
-+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
-+ ldi2 == 0x3414015AU &&
-+ bel == 0xE4008200U &&
-+ nop == 0x08000240U)
-+ {
-+ regs->gr[25] = (ldi1 & 2) >> 1;
-+ regs->gr[20] = __NR_rt_sigreturn;
-+ regs->gr[31] = regs->iaoq[1] + 16;
-+ regs->sr[0] = regs->iasq[1];
-+ regs->iaoq[0] = 0x100UL;
-+ regs->iaoq[1] = regs->iaoq[0] + 4;
-+ regs->iasq[0] = regs->sr[2];
-+ regs->iasq[1] = regs->sr[2];
-+ return 2;
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- int fixup_exception(struct pt_regs *regs)
- {
- const struct exception_table_entry *fix;
-@@ -234,8 +345,33 @@ retry:
-
- good_area:
-
-- if ((vma->vm_flags & acc_type) != acc_type)
-+ if ((vma->vm_flags & acc_type) != acc_type) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
-+ (address & ~3UL) == instruction_pointer(regs))
-+ {
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 3:
-+ return;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+ case 2:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- goto bad_area;
-+ }
-
- /*
- * If for any reason at all we couldn't handle the fault, make
-diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index a2a168e..e484682 100644
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
- config KEXEC
- bool "kexec system call"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
-+ depends on !GRKERNSEC_KMEM
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
-index 512d278..d31fadd 100644
---- a/arch/powerpc/include/asm/atomic.h
-+++ b/arch/powerpc/include/asm/atomic.h
-@@ -12,6 +12,11 @@
-
- #define ATOMIC_INIT(i) { (i) }
-
-+#define _ASM_EXTABLE(from, to) \
-+" .section __ex_table,\"a\"\n" \
-+ PPC_LONG" " #from ", " #to"\n" \
-+" .previous\n"
-+
- static __inline__ int atomic_read(const atomic_t *v)
- {
- int t;
-@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
- return t;
- }
-
-+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ int t;
-+
-+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
-+
-+ return t;
-+}
-+
- static __inline__ void atomic_set(atomic_t *v, int i)
- {
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
- }
-
--#define ATOMIC_OP(op, asm_op) \
--static __inline__ void atomic_##op(int a, atomic_t *v) \
-+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
-+}
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define __REFCOUNT_OP(op) op##o.
-+#define __OVERFLOW_PRE \
-+ " mcrxr cr0\n"
-+#define __OVERFLOW_POST \
-+ " bf 4*cr0+so, 3f\n" \
-+ "2: .long 0x00c00b00\n" \
-+ "3:\n"
-+#define __OVERFLOW_EXTABLE \
-+ "\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#else
-+#define __REFCOUNT_OP(op) op
-+#define __OVERFLOW_PRE
-+#define __OVERFLOW_POST
-+#define __OVERFLOW_EXTABLE
-+#endif
-+
-+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
-+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
- { \
- int t; \
- \
- __asm__ __volatile__( \
--"1: lwarx %0,0,%3 # atomic_" #op "\n" \
-+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
-+ pre_op \
- #asm_op " %0,%2,%0\n" \
-+ post_op \
- PPC405_ERR77(0,%3) \
- " stwcx. %0,0,%3 \n" \
- " bne- 1b\n" \
-+ extable \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- } \
-
--#define ATOMIC_OP_RETURN(op, asm_op) \
--static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
-+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
-+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
-+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
- { \
- int t; \
- \
- __asm__ __volatile__( \
- PPC_ATOMIC_ENTRY_BARRIER \
--"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
-+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
-+ pre_op \
- #asm_op " %0,%1,%0\n" \
-+ post_op \
- PPC405_ERR77(0,%2) \
- " stwcx. %0,0,%2 \n" \
- " bne- 1b\n" \
-+ extable \
- PPC_ATOMIC_EXIT_BARRIER \
- : "=&r" (t) \
- : "r" (a), "r" (&v->counter) \
-@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
- return t; \
- }
-
-+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
-+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
- #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
-
- ATOMIC_OPS(add, add)
-@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
-
- #undef ATOMIC_OPS
- #undef ATOMIC_OP_RETURN
-+#undef __ATOMIC_OP_RETURN
- #undef ATOMIC_OP
-+#undef __ATOMIC_OP
-
- #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
--static __inline__ void atomic_inc(atomic_t *v)
--{
-- int t;
-+/*
-+ * atomic_inc - increment atomic variable
-+ * @v: pointer of type atomic_t
-+ *
-+ * Automatically increments @v by 1
-+ */
-+#define atomic_inc(v) atomic_add(1, (v))
-+#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-- __asm__ __volatile__(
--"1: lwarx %0,0,%2 # atomic_inc\n\
-- addic %0,%0,1\n"
-- PPC405_ERR77(0,%2)
--" stwcx. %0,0,%2 \n\
-- bne- 1b"
-- : "=&r" (t), "+m" (v->counter)
-- : "r" (&v->counter)
-- : "cc", "xer");
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
- }
-
--static __inline__ int atomic_inc_return(atomic_t *v)
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
- {
-- int t;
--
-- __asm__ __volatile__(
-- PPC_ATOMIC_ENTRY_BARRIER
--"1: lwarx %0,0,%1 # atomic_inc_return\n\
-- addic %0,%0,1\n"
-- PPC405_ERR77(0,%1)
--" stwcx. %0,0,%1 \n\
-- bne- 1b"
-- PPC_ATOMIC_EXIT_BARRIER
-- : "=&r" (t)
-- : "r" (&v->counter)
-- : "cc", "xer", "memory");
--
-- return t;
-+ return atomic_add_return_unchecked(1, v);
- }
-
- /*
-@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
- */
- #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
--static __inline__ void atomic_dec(atomic_t *v)
-+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
- {
-- int t;
--
-- __asm__ __volatile__(
--"1: lwarx %0,0,%2 # atomic_dec\n\
-- addic %0,%0,-1\n"
-- PPC405_ERR77(0,%2)\
--" stwcx. %0,0,%2\n\
-- bne- 1b"
-- : "=&r" (t), "+m" (v->counter)
-- : "r" (&v->counter)
-- : "cc", "xer");
-+ return atomic_add_return_unchecked(1, v) == 0;
- }
-
--static __inline__ int atomic_dec_return(atomic_t *v)
-+/*
-+ * atomic_dec - decrement atomic variable
-+ * @v: pointer of type atomic_t
-+ *
-+ * Atomically decrements @v by 1
-+ */
-+#define atomic_dec(v) atomic_sub(1, (v))
-+#define atomic_dec_return(v) atomic_sub_return(1, (v))
-+
-+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
- {
-- int t;
--
-- __asm__ __volatile__(
-- PPC_ATOMIC_ENTRY_BARRIER
--"1: lwarx %0,0,%1 # atomic_dec_return\n\
-- addic %0,%0,-1\n"
-- PPC405_ERR77(0,%1)
--" stwcx. %0,0,%1\n\
-- bne- 1b"
-- PPC_ATOMIC_EXIT_BARRIER
-- : "=&r" (t)
-- : "r" (&v->counter)
-- : "cc", "xer", "memory");
--
-- return t;
-+ atomic_sub_unchecked(1, v);
- }
-
- #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
-+{
-+ return cmpxchg(&(v->counter), old, new);
-+}
-+
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&(v->counter), new);
-+}
-+
- /**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
-@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
- PPC_ATOMIC_ENTRY_BARRIER
- "1: lwarx %0,0,%1 # __atomic_add_unless\n\
- cmpw 0,%0,%3 \n\
-- beq- 2f \n\
-- add %0,%2,%0 \n"
-+ beq- 2f \n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" mcrxr cr0\n"
-+" addo. %0,%2,%0\n"
-+" bf 4*cr0+so, 4f\n"
-+"3:.long " "0x00c00b00""\n"
-+"4:\n"
-+#else
-+ "add %0,%2,%0 \n"
-+#endif
-+
- PPC405_ERR77(0,%2)
- " stwcx. %0,0,%1 \n\
- bne- 1b \n"
-+"5:"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(3b, 5b)
-+#endif
-+
- PPC_ATOMIC_EXIT_BARRIER
- " subf %0,%2,%0 \n\
- 2:"
-@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
- }
- #define atomic_dec_if_positive atomic_dec_if_positive
-
-+#define smp_mb__before_atomic_dec() smp_mb()
-+#define smp_mb__after_atomic_dec() smp_mb()
-+#define smp_mb__before_atomic_inc() smp_mb()
-+#define smp_mb__after_atomic_inc() smp_mb()
-+
- #ifdef __powerpc64__
-
- #define ATOMIC64_INIT(i) { (i) }
-@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
- return t;
- }
-
-+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ long t;
-+
-+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
-+
-+ return t;
-+}
-+
- static __inline__ void atomic64_set(atomic64_t *v, long i)
- {
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
- }
-
--#define ATOMIC64_OP(op, asm_op) \
--static __inline__ void atomic64_##op(long a, atomic64_t *v) \
-+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
-+}
-+
-+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
-+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
- { \
- long t; \
- \
- __asm__ __volatile__( \
- "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
-+ pre_op \
- #asm_op " %0,%2,%0\n" \
-+ post_op \
- " stdcx. %0,0,%3 \n" \
- " bne- 1b\n" \
-+ extable \
- : "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
- }
-
--#define ATOMIC64_OP_RETURN(op, asm_op) \
--static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
-+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
-+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
-+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
-+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
- { \
- long t; \
- \
- __asm__ __volatile__( \
- PPC_ATOMIC_ENTRY_BARRIER \
- "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
-+ pre_op \
- #asm_op " %0,%1,%0\n" \
-+ post_op \
- " stdcx. %0,0,%2 \n" \
- " bne- 1b\n" \
-+ extable \
- PPC_ATOMIC_EXIT_BARRIER \
- : "=&r" (t) \
- : "r" (a), "r" (&v->counter) \
-@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
- return t; \
- }
-
-+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
-+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
-+
- #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
-
- ATOMIC64_OPS(add, add)
-@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
-
- #undef ATOMIC64_OPS
- #undef ATOMIC64_OP_RETURN
-+#undef __ATOMIC64_OP_RETURN
- #undef ATOMIC64_OP
-+#undef __ATOMIC64_OP
-+#undef __OVERFLOW_EXTABLE
-+#undef __OVERFLOW_POST
-+#undef __OVERFLOW_PRE
-+#undef __REFCOUNT_OP
-
- #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
--static __inline__ void atomic64_inc(atomic64_t *v)
--{
-- long t;
-+/*
-+ * atomic64_inc - increment atomic variable
-+ * @v: pointer of type atomic64_t
-+ *
-+ * Automatically increments @v by 1
-+ */
-+#define atomic64_inc(v) atomic64_add(1, (v))
-+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-- __asm__ __volatile__(
--"1: ldarx %0,0,%2 # atomic64_inc\n\
-- addic %0,%0,1\n\
-- stdcx. %0,0,%2 \n\
-- bne- 1b"
-- : "=&r" (t), "+m" (v->counter)
-- : "r" (&v->counter)
-- : "cc", "xer");
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
-+{
-+ atomic64_add_unchecked(1, v);
- }
-
--static __inline__ long atomic64_inc_return(atomic64_t *v)
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
- {
-- long t;
--
-- __asm__ __volatile__(
-- PPC_ATOMIC_ENTRY_BARRIER
--"1: ldarx %0,0,%1 # atomic64_inc_return\n\
-- addic %0,%0,1\n\
-- stdcx. %0,0,%1 \n\
-- bne- 1b"
-- PPC_ATOMIC_EXIT_BARRIER
-- : "=&r" (t)
-- : "r" (&v->counter)
-- : "cc", "xer", "memory");
--
-- return t;
-+ return atomic64_add_return_unchecked(1, v);
- }
-
- /*
-@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
- */
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
--static __inline__ void atomic64_dec(atomic64_t *v)
-+/*
-+ * atomic64_dec - decrement atomic variable
-+ * @v: pointer of type atomic64_t
-+ *
-+ * Atomically decrements @v by 1
-+ */
-+#define atomic64_dec(v) atomic64_sub(1, (v))
-+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-+
-+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
- {
-- long t;
--
-- __asm__ __volatile__(
--"1: ldarx %0,0,%2 # atomic64_dec\n\
-- addic %0,%0,-1\n\
-- stdcx. %0,0,%2\n\
-- bne- 1b"
-- : "=&r" (t), "+m" (v->counter)
-- : "r" (&v->counter)
-- : "cc", "xer");
--}
--
--static __inline__ long atomic64_dec_return(atomic64_t *v)
--{
-- long t;
--
-- __asm__ __volatile__(
-- PPC_ATOMIC_ENTRY_BARRIER
--"1: ldarx %0,0,%1 # atomic64_dec_return\n\
-- addic %0,%0,-1\n\
-- stdcx. %0,0,%1\n\
-- bne- 1b"
-- PPC_ATOMIC_EXIT_BARRIER
-- : "=&r" (t)
-- : "r" (&v->counter)
-- : "cc", "xer", "memory");
--
-- return t;
-+ atomic64_sub_unchecked(1, v);
- }
-
- #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
- #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
- #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
-+{
-+ return cmpxchg(&(v->counter), old, new);
-+}
-+
-+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
-+{
-+ return xchg(&(v->counter), new);
-+}
-+
- /**
- * atomic64_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
-@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
--"1: ldarx %0,0,%1 # __atomic_add_unless\n\
-+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
- cmpd 0,%0,%3 \n\
-- beq- 2f \n\
-- add %0,%2,%0 \n"
-+ beq- 2f \n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" mcrxr cr0\n"
-+" addo. %0,%2,%0\n"
-+" bf 4*cr0+so, 4f\n"
-+"3:.long " "0x00c00b00""\n"
-+"4:\n"
-+#else
-+ "add %0,%2,%0 \n"
-+#endif
-+
- " stdcx. %0,0,%1 \n\
- bne- 1b \n"
- PPC_ATOMIC_EXIT_BARRIER
-+"5:"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(3b, 5b)
-+#endif
-+
- " subf %0,%2,%0 \n\
- 2:"
- : "=&r" (t)
-diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
-index a3bf5be..e03ba81 100644
---- a/arch/powerpc/include/asm/barrier.h
-+++ b/arch/powerpc/include/asm/barrier.h
-@@ -76,7 +76,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_lwsync(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
-index 34a05a1..a1f2c67 100644
---- a/arch/powerpc/include/asm/cache.h
-+++ b/arch/powerpc/include/asm/cache.h
-@@ -4,6 +4,7 @@
- #ifdef __KERNEL__
-
- #include <asm/reg.h>
-+#include <linux/const.h>
-
- /* bytes per L1 cache line */
- #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-@@ -23,7 +24,7 @@
- #define L1_CACHE_SHIFT 7
- #endif
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define SMP_CACHE_BYTES L1_CACHE_BYTES
-
-diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
-index 57d289a..b36c98c 100644
---- a/arch/powerpc/include/asm/elf.h
-+++ b/arch/powerpc/include/asm/elf.h
-@@ -30,6 +30,18 @@
-
- #define ELF_ET_DYN_BASE 0x20000000
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
-+
-+#ifdef __powerpc64__
-+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
-+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
-+#else
-+#define PAX_DELTA_MMAP_LEN 15
-+#define PAX_DELTA_STACK_LEN 15
-+#endif
-+#endif
-+
- #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
-
- /*
-@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- (0x7ff >> (PAGE_SHIFT - 12)) : \
- (0x3ffff >> (PAGE_SHIFT - 12)))
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
--
- #ifdef CONFIG_SPU_BASE
- /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
- #define NT_SPU 1
-diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
-index 8196e9c..d83a9f3 100644
---- a/arch/powerpc/include/asm/exec.h
-+++ b/arch/powerpc/include/asm/exec.h
-@@ -4,6 +4,6 @@
- #ifndef _ASM_POWERPC_EXEC_H
- #define _ASM_POWERPC_EXEC_H
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- #endif /* _ASM_POWERPC_EXEC_H */
-diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
-index 5acabbd..7ea14fa 100644
---- a/arch/powerpc/include/asm/kmap_types.h
-+++ b/arch/powerpc/include/asm/kmap_types.h
-@@ -10,7 +10,7 @@
- * 2 of the License, or (at your option) any later version.
- */
-
--#define KM_TYPE_NR 16
-+#define KM_TYPE_NR 17
-
- #endif /* __KERNEL__ */
- #endif /* _ASM_POWERPC_KMAP_TYPES_H */
-diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
-index b8da913..c02b593 100644
---- a/arch/powerpc/include/asm/local.h
-+++ b/arch/powerpc/include/asm/local.h
-@@ -9,21 +9,65 @@ typedef struct
- atomic_long_t a;
- } local_t;
-
-+typedef struct
-+{
-+ atomic_long_unchecked_t a;
-+} local_unchecked_t;
-+
- #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-
- #define local_read(l) atomic_long_read(&(l)->a)
-+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
- #define local_set(l,i) atomic_long_set(&(l)->a, (i))
-+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
-
- #define local_add(i,l) atomic_long_add((i),(&(l)->a))
-+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
- #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
-+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
- #define local_inc(l) atomic_long_inc(&(l)->a)
-+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
- #define local_dec(l) atomic_long_dec(&(l)->a)
-+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
-
- static __inline__ long local_add_return(long a, local_t *l)
- {
- long t;
-
- __asm__ __volatile__(
-+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" mcrxr cr0\n"
-+" addo. %0,%1,%0\n"
-+" bf 4*cr0+so, 3f\n"
-+"2:.long " "0x00c00b00""\n"
-+#else
-+" add %0,%1,%0\n"
-+#endif
-+
-+"3:\n"
-+ PPC405_ERR77(0,%2)
-+ PPC_STLCX "%0,0,%2 \n\
-+ bne- 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
-+ : "=&r" (t)
-+ : "r" (a), "r" (&(l->a.counter))
-+ : "cc", "memory");
-+
-+ return t;
-+}
-+
-+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
-+{
-+ long t;
-+
-+ __asm__ __volatile__(
- "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
- add %0,%1,%0\n"
- PPC405_ERR77(0,%2)
-@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
-
- #define local_cmpxchg(l, o, n) \
- (cmpxchg_local(&((l)->a.counter), (o), (n)))
-+#define local_cmpxchg_unchecked(l, o, n) \
-+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
- #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
-
- /**
-diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
-index 8565c25..2865190 100644
---- a/arch/powerpc/include/asm/mman.h
-+++ b/arch/powerpc/include/asm/mman.h
-@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
- }
- #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
-
--static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
- {
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
- }
-diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
-index 69c0598..2c56964 100644
---- a/arch/powerpc/include/asm/page.h
-+++ b/arch/powerpc/include/asm/page.h
-@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
- * and needs to be executable. This means the whole heap ends
- * up being executable.
- */
--#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
-- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_DATA_DEFAULT_FLAGS32 \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
- #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
- #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
- #endif
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #ifndef CONFIG_PPC_BOOK3S_64
- /*
- * Use the top bit of the higher-level page table entries to indicate whether
-diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
-index d908a46..3753f71 100644
---- a/arch/powerpc/include/asm/page_64.h
-+++ b/arch/powerpc/include/asm/page_64.h
-@@ -172,15 +172,18 @@ do { \
- * stack by default, so in the absence of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
--#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
-- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_STACK_DEFAULT_FLAGS32 \
-+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
-+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
- #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-+#ifndef CONFIG_PAX_PAGEEXEC
- #define VM_STACK_DEFAULT_FLAGS \
- (is_32bit_task() ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-+#endif
-
- #include <asm-generic/getorder.h>
-
-diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
-index 4b0be20..c15a27d 100644
---- a/arch/powerpc/include/asm/pgalloc-64.h
-+++ b/arch/powerpc/include/asm/pgalloc-64.h
-@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
- #ifndef CONFIG_PPC_64K_PAGES
-
- #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
-+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
-
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
-@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- pud_set(pud, (unsigned long)pmd);
- }
-
-+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+ pud_populate(mm, pud, pmd);
-+}
-+
- #define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
- #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
-@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
- #endif
-
- #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
-+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
-
- static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
-diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
-index a8805fe..6d69617 100644
---- a/arch/powerpc/include/asm/pgtable.h
-+++ b/arch/powerpc/include/asm/pgtable.h
-@@ -2,6 +2,7 @@
- #define _ASM_POWERPC_PGTABLE_H
- #ifdef __KERNEL__
-
-+#include <linux/const.h>
- #ifndef __ASSEMBLY__
- #include <linux/mmdebug.h>
- #include <linux/mmzone.h>
-diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
-index 4aad413..85d86bf 100644
---- a/arch/powerpc/include/asm/pte-hash32.h
-+++ b/arch/powerpc/include/asm/pte-hash32.h
-@@ -21,6 +21,7 @@
- #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
- #define _PAGE_USER 0x004 /* usermode access allowed */
- #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-+#define _PAGE_EXEC _PAGE_GUARDED
- #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
- #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
- #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index 1c874fb..e8480a4 100644
---- a/arch/powerpc/include/asm/reg.h
-+++ b/arch/powerpc/include/asm/reg.h
-@@ -253,6 +253,7 @@
- #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
- #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
- #define DSISR_NOHPTE 0x40000000 /* no translation found */
-+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
- #define DSISR_PROTFAULT 0x08000000 /* protection fault */
- #define DSISR_ISSTORE 0x02000000 /* access was a store */
- #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
-index 5a6614a..d89995d1 100644
---- a/arch/powerpc/include/asm/smp.h
-+++ b/arch/powerpc/include/asm/smp.h
-@@ -51,7 +51,7 @@ struct smp_ops_t {
- int (*cpu_disable)(void);
- void (*cpu_die)(unsigned int nr);
- int (*cpu_bootable)(unsigned int nr);
--};
-+} __no_const;
-
- extern void smp_send_debugger_break(void);
- extern void start_secondary_resume(void);
-diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
-index 4dbe072..b803275 100644
---- a/arch/powerpc/include/asm/spinlock.h
-+++ b/arch/powerpc/include/asm/spinlock.h
-@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
- __asm__ __volatile__(
- "1: " PPC_LWARX(%0,0,%1,1) "\n"
- __DO_SIGN_EXTEND
--" addic. %0,%0,1\n\
-- ble- 2f\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" mcrxr cr0\n"
-+" addico. %0,%0,1\n"
-+" bf 4*cr0+so, 3f\n"
-+"2:.long " "0x00c00b00""\n"
-+#else
-+" addic. %0,%0,1\n"
-+#endif
-+
-+"3:\n"
-+ "ble- 4f\n"
- PPC405_ERR77(0,%1)
- " stwcx. %0,0,%1\n\
- bne- 1b\n"
- PPC_ACQUIRE_BARRIER
--"2:" : "=&r" (tmp)
-+"4:"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ _ASM_EXTABLE(2b,4b)
-+#endif
-+
-+ : "=&r" (tmp)
- : "r" (&rw->lock)
- : "cr0", "xer", "memory");
-
-@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
- __asm__ __volatile__(
- "# read_unlock\n\t"
- PPC_RELEASE_BARRIER
--"1: lwarx %0,0,%1\n\
-- addic %0,%0,-1\n"
-+"1: lwarx %0,0,%1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" mcrxr cr0\n"
-+" addico. %0,%0,-1\n"
-+" bf 4*cr0+so, 3f\n"
-+"2:.long " "0x00c00b00""\n"
-+#else
-+" addic. %0,%0,-1\n"
-+#endif
-+
-+"3:\n"
- PPC405_ERR77(0,%1)
- " stwcx. %0,0,%1\n\
- bne- 1b"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+"\n4:\n"
-+ _ASM_EXTABLE(2b, 4b)
-+#endif
-+
- : "=&r"(tmp)
- : "r"(&rw->lock)
- : "cr0", "xer", "memory");
-diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
-index 0be6c68..9c3c6ee 100644
---- a/arch/powerpc/include/asm/thread_info.h
-+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
- #if defined(CONFIG_PPC64)
- #define TIF_ELF2ABI 18 /* function descriptors must die! */
- #endif
-+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
-+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
-
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
- #define _TIF_NOHZ (1<<TIF_NOHZ)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
- #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
-- _TIF_NOHZ)
-+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
-
- #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
-index a0c071d..49cdc7f 100644
---- a/arch/powerpc/include/asm/uaccess.h
-+++ b/arch/powerpc/include/asm/uaccess.h
-@@ -58,6 +58,7 @@
-
- #endif
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
- __access_ok((__force unsigned long)(addr), (size), get_fs()))
-@@ -318,52 +319,6 @@ do { \
- extern unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size);
-
--#ifndef __powerpc64__
--
--static inline unsigned long copy_from_user(void *to,
-- const void __user *from, unsigned long n)
--{
-- unsigned long over;
--
-- if (access_ok(VERIFY_READ, from, n))
-- return __copy_tofrom_user((__force void __user *)to, from, n);
-- if ((unsigned long)from < TASK_SIZE) {
-- over = (unsigned long)from + n - TASK_SIZE;
-- return __copy_tofrom_user((__force void __user *)to, from,
-- n - over) + over;
-- }
-- return n;
--}
--
--static inline unsigned long copy_to_user(void __user *to,
-- const void *from, unsigned long n)
--{
-- unsigned long over;
--
-- if (access_ok(VERIFY_WRITE, to, n))
-- return __copy_tofrom_user(to, (__force void __user *)from, n);
-- if ((unsigned long)to < TASK_SIZE) {
-- over = (unsigned long)to + n - TASK_SIZE;
-- return __copy_tofrom_user(to, (__force void __user *)from,
-- n - over) + over;
-- }
-- return n;
--}
--
--#else /* __powerpc64__ */
--
--#define __copy_in_user(to, from, size) \
-- __copy_tofrom_user((to), (from), (size))
--
--extern unsigned long copy_from_user(void *to, const void __user *from,
-- unsigned long n);
--extern unsigned long copy_to_user(void __user *to, const void *from,
-- unsigned long n);
--extern unsigned long copy_in_user(void __user *to, const void __user *from,
-- unsigned long n);
--
--#endif /* __powerpc64__ */
--
- static inline unsigned long __copy_from_user_inatomic(void *to,
- const void __user *from, unsigned long n)
- {
-@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
- if (ret == 0)
- return 0;
- }
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+
- return __copy_tofrom_user((__force void __user *)to, from, n);
- }
-
-@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
- if (ret == 0)
- return 0;
- }
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+
- return __copy_tofrom_user(to, (__force const void __user *)from, n);
- }
-
-@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
- return __copy_to_user_inatomic(to, from, size);
- }
-
-+#ifndef __powerpc64__
-+
-+static inline unsigned long __must_check copy_from_user(void *to,
-+ const void __user *from, unsigned long n)
-+{
-+ unsigned long over;
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (access_ok(VERIFY_READ, from, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+ return __copy_tofrom_user((__force void __user *)to, from, n);
-+ }
-+ if ((unsigned long)from < TASK_SIZE) {
-+ over = (unsigned long)from + n - TASK_SIZE;
-+ if (!__builtin_constant_p(n - over))
-+ check_object_size(to, n - over, false);
-+ return __copy_tofrom_user((__force void __user *)to, from,
-+ n - over) + over;
-+ }
-+ return n;
-+}
-+
-+static inline unsigned long __must_check copy_to_user(void __user *to,
-+ const void *from, unsigned long n)
-+{
-+ unsigned long over;
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (access_ok(VERIFY_WRITE, to, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+ return __copy_tofrom_user(to, (__force void __user *)from, n);
-+ }
-+ if ((unsigned long)to < TASK_SIZE) {
-+ over = (unsigned long)to + n - TASK_SIZE;
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n - over, true);
-+ return __copy_tofrom_user(to, (__force void __user *)from,
-+ n - over) + over;
-+ }
-+ return n;
-+}
-+
-+#else /* __powerpc64__ */
-+
-+#define __copy_in_user(to, from, size) \
-+ __copy_tofrom_user((to), (from), (size))
-+
-+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ if ((long)n < 0 || n > INT_MAX)
-+ return n;
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
-+
-+ if (likely(access_ok(VERIFY_READ, from, n)))
-+ n = __copy_from_user(to, from, n);
-+ else
-+ memset(to, 0, n);
-+ return n;
-+}
-+
-+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
-+{
-+ if ((long)n < 0 || n > INT_MAX)
-+ return n;
-+
-+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+ n = __copy_to_user(to, from, n);
-+ }
-+ return n;
-+}
-+
-+extern unsigned long copy_in_user(void __user *to, const void __user *from,
-+ unsigned long n);
-+
-+#endif /* __powerpc64__ */
-+
- extern unsigned long __clear_user(void __user *addr, unsigned long size);
-
- static inline unsigned long clear_user(void __user *addr, unsigned long size)
-diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index 502cf69..53936a1 100644
---- a/arch/powerpc/kernel/Makefile
-+++ b/arch/powerpc/kernel/Makefile
-@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
- CFLAGS_btext.o += -fPIC
- endif
-
-+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+
- ifdef CONFIG_FUNCTION_TRACER
- # Do not trace early boot code
- CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
-@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
- CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
- endif
-
-+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
-+
- obj-y := cputable.o ptrace.o syscalls.o \
- irq.o align.o signal_32.o pmc.o vdso.o \
- process.o systbl.o idle.o \
-diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
-index 3e68d1c..72a5ee6 100644
---- a/arch/powerpc/kernel/exceptions-64e.S
-+++ b/arch/powerpc/kernel/exceptions-64e.S
-@@ -1010,6 +1010,7 @@ storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
-+ bl save_nvgprs
- mr r4,r14
- mr r5,r15
- ld r14,PACA_EXGEN+EX_R14(r13)
-@@ -1018,8 +1019,7 @@ storage_fault_common:
- cmpdi r3,0
- bne- 1f
- b ret_from_except_lite
--1: bl save_nvgprs
-- mr r5,r3
-+1: mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl bad_page_fault
-diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
-index 9519e6b..13f6c38 100644
---- a/arch/powerpc/kernel/exceptions-64s.S
-+++ b/arch/powerpc/kernel/exceptions-64s.S
-@@ -1599,10 +1599,10 @@ handle_page_fault:
- 11: ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
-+ bl save_nvgprs
- bl do_page_fault
- cmpdi r3,0
- beq+ 12f
-- bl save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
-diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 4509603..cdb491f 100644
---- a/arch/powerpc/kernel/irq.c
-+++ b/arch/powerpc/kernel/irq.c
-@@ -460,6 +460,8 @@ void migrate_irqs(void)
- }
- #endif
-
-+extern void gr_handle_kernel_exploit(void);
-+
- static inline void check_stack_overflow(void)
- {
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
-@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
- pr_err("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
-+ gr_handle_kernel_exploit();
- }
- #endif
- }
-diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
-index c94d2e0..992a9ce 100644
---- a/arch/powerpc/kernel/module_32.c
-+++ b/arch/powerpc/kernel/module_32.c
-@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
- me->arch.core_plt_section = i;
- }
- if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
-- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
-+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
- return -ENOEXEC;
- }
-
-@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
-
- pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
- /* Init, or core PLT? */
-- if (location >= mod->module_core
-- && location < mod->module_core + mod->core_size)
-+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
-+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
- entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
-- else
-+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
-+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
- entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
-+ else {
-+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
-+ return ~0UL;
-+ }
-
- /* Find this entry, or if that fails, the next avail. entry */
- while (entry->jump[0]) {
-@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
- }
- #ifdef CONFIG_DYNAMIC_FTRACE
- module->arch.tramp =
-- do_plt_call(module->module_core,
-+ do_plt_call(module->module_core_rx,
- (unsigned long)ftrace_caller,
- sechdrs, module);
- #endif
-diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index b4cc7be..1fe8bb3 100644
---- a/arch/powerpc/kernel/process.c
-+++ b/arch/powerpc/kernel/process.c
-@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
- * Lookup NIP late so we have the best change of getting the
- * above info out without failing
- */
-- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
-- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
-+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
-+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
- #endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
- if (!user_mode(regs))
-@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
- newsp = stack[0];
- ip = stack[STACK_FRAME_LR_SAVE];
- if (!firstframe || ip != lr) {
-- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
-+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if ((ip == rth) && curr_frame >= 0) {
-- printk(" (%pS)",
-+ printk(" (%pA)",
- (void *)current->ret_stack[curr_frame].ret);
- curr_frame--;
- }
-@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
- struct pt_regs *regs = (struct pt_regs *)
- (sp + STACK_FRAME_OVERHEAD);
- lr = regs->link;
-- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
-+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
- regs->trap, (void *)regs->nip, (void *)lr);
- firstframe = 1;
- }
-@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
- mtspr(SPRN_CTRLT, ctrl);
- }
- #endif /* CONFIG_PPC64 */
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
-- return sp & ~0xf;
--}
--
--static inline unsigned long brk_rnd(void)
--{
-- unsigned long rnd = 0;
--
-- /* 8MB for 32bit, 1GB for 64bit */
-- if (is_32bit_task())
-- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
-- else
-- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
--
-- return rnd << PAGE_SHIFT;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long base = mm->brk;
-- unsigned long ret;
--
--#ifdef CONFIG_PPC_STD_MMU_64
-- /*
-- * If we are using 1TB segments and we are allowed to randomise
-- * the heap, we can put it above 1TB so it is backed by a 1TB
-- * segment. Otherwise the heap will be in the bottom 1TB
-- * which always uses 256MB segments and this may result in a
-- * performance penalty.
-- */
-- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
-- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
--#endif
--
-- ret = PAGE_ALIGN(base + brk_rnd());
--
-- if (ret < mm->brk)
-- return mm->brk;
--
-- return ret;
--}
--
-diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
-index f21897b..28c0428 100644
---- a/arch/powerpc/kernel/ptrace.c
-+++ b/arch/powerpc/kernel/ptrace.c
-@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
-@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
-
- secure_computing_strict(regs->gpr[0]);
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
-@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
- {
- int step;
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index b171001..4ac7ac5 100644
---- a/arch/powerpc/kernel/signal_32.c
-+++ b/arch/powerpc/kernel/signal_32.c
-@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
- /* Save user registers on the stack */
- frame = &rt_sf->uc.uc_mcontext;
- addr = frame;
-- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- sigret = 0;
- tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
- } else {
-diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index 2cb0c94..c0c0bc9 100644
---- a/arch/powerpc/kernel/signal_64.c
-+++ b/arch/powerpc/kernel/signal_64.c
-@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
- current->thread.fp_state.fpscr = 0;
-
- /* Set up to return from userspace. */
-- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
- } else {
- err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
-diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index e6595b7..24bde6e 100644
---- a/arch/powerpc/kernel/traps.c
-+++ b/arch/powerpc/kernel/traps.c
-@@ -36,6 +36,7 @@
- #include <linux/debugfs.h>
- #include <linux/ratelimit.h>
- #include <linux/context_tracking.h>
-+#include <linux/uaccess.h>
-
- #include <asm/emulated_ops.h>
- #include <asm/pgtable.h>
-@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
- return flags;
- }
-
-+extern void gr_handle_kernel_exploit(void);
-+
- static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
- int signr)
- {
-@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception");
-+
-+ gr_handle_kernel_exploit();
-+
- do_exit(signr);
- }
-
-@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
- enum ctx_state prev_state = exception_enter();
- unsigned int reason = get_reason(regs);
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+ unsigned int bkpt;
-+ const struct exception_table_entry *entry;
-+
-+ if (reason & REASON_ILLEGAL) {
-+ /* Check if PaX bad instruction */
-+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
-+ current->thread.trap_nr = 0;
-+ pax_report_refcount_overflow(regs);
-+ /* fixup_exception() for PowerPC does not exist, simulate its job */
-+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
-+ regs->nip = entry->fixup;
-+ return;
-+ }
-+ /* fixup_exception() could not handle */
-+ goto bail;
-+ }
-+ }
-+#endif
-+
- /* We can now get here via a FP Unavailable exception if the core
- * has no FPU, in that case the reason flags will be 0 */
-
-diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 305eb0d..accc5b40 100644
---- a/arch/powerpc/kernel/vdso.c
-+++ b/arch/powerpc/kernel/vdso.c
-@@ -34,6 +34,7 @@
- #include <asm/vdso.h>
- #include <asm/vdso_datapage.h>
- #include <asm/setup.h>
-+#include <asm/mman.h>
-
- #undef DEBUG
-
-@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- vdso_base = VDSO32_MBASE;
- #endif
-
-- current->mm->context.vdso_base = 0;
-+ current->mm->context.vdso_base = ~0UL;
-
- /* vDSO has a problem and was disabled, just don't "enable" it for the
- * process
-@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- vdso_base = get_unmapped_area(NULL, vdso_base,
- (vdso_pages << PAGE_SHIFT) +
- ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
-- 0, 0);
-+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
- if (IS_ERR_VALUE(vdso_base)) {
- rc = vdso_base;
- goto fail_mmapsem;
-diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
-index c45eaab..5f41b57 100644
---- a/arch/powerpc/kvm/powerpc.c
-+++ b/arch/powerpc/kvm/powerpc.c
-@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
- }
- EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
-
--int kvm_arch_init(void *opaque)
-+int kvm_arch_init(const void *opaque)
- {
- return 0;
- }
-diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
-index 5eea6f3..5d10396 100644
---- a/arch/powerpc/lib/usercopy_64.c
-+++ b/arch/powerpc/lib/usercopy_64.c
-@@ -9,22 +9,6 @@
- #include <linux/module.h>
- #include <asm/uaccess.h>
-
--unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
--{
-- if (likely(access_ok(VERIFY_READ, from, n)))
-- n = __copy_from_user(to, from, n);
-- else
-- memset(to, 0, n);
-- return n;
--}
--
--unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
--{
-- if (likely(access_ok(VERIFY_WRITE, to, n)))
-- n = __copy_to_user(to, from, n);
-- return n;
--}
--
- unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n)
- {
-@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
- return n;
- }
-
--EXPORT_SYMBOL(copy_from_user);
--EXPORT_SYMBOL(copy_to_user);
- EXPORT_SYMBOL(copy_in_user);
-
-diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index 6154b0a..4de2b19 100644
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -33,6 +33,10 @@
- #include <linux/ratelimit.h>
- #include <linux/context_tracking.h>
- #include <linux/hugetlb.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/unistd.h>
-
- #include <asm/firmware.h>
- #include <asm/page.h>
-@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
- }
- #endif
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->nip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 5; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int __user *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- /*
- * Check whether the instruction at regs->nip is a store using
- * an update addressing form which will update r1.
-@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
- * indicate errors in DSISR but can validly be set in SRR1.
- */
- if (trap == 0x400)
-- error_code &= 0x48200000;
-+ error_code &= 0x58200000;
- else
- is_write = error_code & DSISR_ISSTORE;
- #else
-@@ -383,7 +414,7 @@ good_area:
- * "undefined". Of those that can be set, this is the only
- * one which seems bad.
- */
-- if (error_code & 0x10000000)
-+ if (error_code & DSISR_GUARDED)
- /* Guarded storage error. */
- goto bad_area;
- #endif /* CONFIG_8xx */
-@@ -398,7 +429,7 @@ good_area:
- * processors use the same I/D cache coherency mechanism
- * as embedded.
- */
-- if (error_code & DSISR_PROTFAULT)
-+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
- goto bad_area;
- #endif /* CONFIG_PPC_STD_MMU */
-
-@@ -490,6 +521,23 @@ bad_area:
- bad_area_nosemaphore:
- /* User mode accesses cause a SIGSEGV */
- if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+#ifdef CONFIG_PPC_STD_MMU
-+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
-+#else
-+ if (is_exec && regs->nip == address) {
-+#endif
-+ switch (pax_handle_fetch_fault(regs)) {
-+ }
-+
-+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
-+ do_group_exit(SIGKILL);
-+ }
-+ }
-+#endif
-+
- _exception(SIGSEGV, regs, code, address);
- goto bail;
- }
-diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
-index cb8bdbe..cde4bc7 100644
---- a/arch/powerpc/mm/mmap.c
-+++ b/arch/powerpc/mm/mmap.c
-@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
- return sysctl_legacy_va_layout;
- }
-
--static unsigned long mmap_rnd(void)
-+static unsigned long mmap_rnd(struct mm_struct *mm)
- {
- unsigned long rnd = 0;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
-@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
- return rnd << PAGE_SHIFT;
- }
-
--static inline unsigned long mmap_base(void)
-+static inline unsigned long mmap_base(struct mm_struct *mm)
- {
- unsigned long gap = rlimit(RLIMIT_STACK);
-
-@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
-- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
- }
-
- /*
-@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
-- mm->mmap_base = mmap_base();
-+ mm->mmap_base = mmap_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index ded0ea1..f213a9b 100644
---- a/arch/powerpc/mm/slice.c
-+++ b/arch/powerpc/mm/slice.c
-@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
- if ((mm->task_size - len) < addr)
- return 0;
- vma = find_vma(mm, addr);
-- return (!vma || (addr + len) <= vma->vm_start);
-+ return check_heap_stack_gap(vma, addr, len, 0);
- }
-
- static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
-@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
- info.align_offset = 0;
-
- addr = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ addr += mm->delta_mmap;
-+#endif
-+
- while (addr < TASK_SIZE) {
- info.low_limit = addr;
- if (!slice_scan_available(addr, available, 1, &addr))
-@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
- if (fixed && addr > (mm->task_size - len))
- return -ENOMEM;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
-+ addr = 0;
-+#endif
-+
- /* If hint, make sure it matches our alignment restrictions */
- if (!fixed && addr) {
- addr = _ALIGN_UP(addr, 1ul << pshift);
-diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-index f223875..94170e4 100644
---- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
-@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
- }
-
- static struct pci_ops scc_pciex_pci_ops = {
-- scc_pciex_read_config,
-- scc_pciex_write_config,
-+ .read = scc_pciex_read_config,
-+ .write = scc_pciex_write_config,
- };
-
- static void pciex_clear_intr_all(unsigned int __iomem *base)
-diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
-index d966bbe..372124a 100644
---- a/arch/powerpc/platforms/cell/spufs/file.c
-+++ b/arch/powerpc/platforms/cell/spufs/file.c
-@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- return VM_FAULT_NOPAGE;
- }
-
--static int spufs_mem_mmap_access(struct vm_area_struct *vma,
-+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
- unsigned long address,
-- void *buf, int len, int write)
-+ void *buf, size_t len, int write)
- {
- struct spu_context *ctx = vma->vm_file->private_data;
- unsigned long offset = address - vma->vm_start;
-diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
-index fa934fe..c296056 100644
---- a/arch/s390/include/asm/atomic.h
-+++ b/arch/s390/include/asm/atomic.h
-@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
- #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- #endif /* __ARCH_S390_ATOMIC__ */
-diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
-index 8d72471..5322500 100644
---- a/arch/s390/include/asm/barrier.h
-+++ b/arch/s390/include/asm/barrier.h
-@@ -42,7 +42,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
-index 4d7ccac..d03d0ad 100644
---- a/arch/s390/include/asm/cache.h
-+++ b/arch/s390/include/asm/cache.h
-@@ -9,8 +9,10 @@
- #ifndef __ARCH_S390_CACHE_H
- #define __ARCH_S390_CACHE_H
-
--#define L1_CACHE_BYTES 256
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 8
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
- #define NET_SKB_PAD 32
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
-index f6e43d3..5f57681 100644
---- a/arch/s390/include/asm/elf.h
-+++ b/arch/s390/include/asm/elf.h
-@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
--extern unsigned long randomize_et_dyn(unsigned long base);
--#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
-+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
-+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. */
-@@ -223,9 +229,6 @@ struct linux_binprm;
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- int arch_setup_additional_pages(struct linux_binprm *, int);
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
-
- #endif
-diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
-index c4a93d6..4d2a9b4 100644
---- a/arch/s390/include/asm/exec.h
-+++ b/arch/s390/include/asm/exec.h
-@@ -7,6 +7,6 @@
- #ifndef __ASM_EXEC_H
- #define __ASM_EXEC_H
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) ((x) & ~0xfUL)
-
- #endif /* __ASM_EXEC_H */
-diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
-index cd4c68e..6764641 100644
---- a/arch/s390/include/asm/uaccess.h
-+++ b/arch/s390/include/asm/uaccess.h
-@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
- __range_ok((unsigned long)(addr), (size)); \
- })
-
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) __access_ok(addr, size)
-
- /*
-@@ -275,6 +276,10 @@ static inline unsigned long __must_check
- copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- might_fault();
-+
-+ if ((long)n < 0)
-+ return n;
-+
- return __copy_to_user(to, from, n);
- }
-
-@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
- static inline unsigned long __must_check
- copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-- unsigned int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
-
- might_fault();
-- if (unlikely(sz != -1 && sz < n)) {
-+
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (unlikely(sz != (size_t)-1 && sz < n)) {
- copy_from_user_overflow();
- return n;
- }
-diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
-index 409d152..d90d368 100644
---- a/arch/s390/kernel/module.c
-+++ b/arch/s390/kernel/module.c
-@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
-
- /* Increase core size by size of got & plt and set start
- offsets for got and plt. */
-- me->core_size = ALIGN(me->core_size, 4);
-- me->arch.got_offset = me->core_size;
-- me->core_size += me->arch.got_size;
-- me->arch.plt_offset = me->core_size;
-- me->core_size += me->arch.plt_size;
-+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
-+ me->arch.got_offset = me->core_size_rw;
-+ me->core_size_rw += me->arch.got_size;
-+ me->arch.plt_offset = me->core_size_rx;
-+ me->core_size_rx += me->arch.plt_size;
- return 0;
- }
-
-@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- if (info->got_initialized == 0) {
- Elf_Addr *gotent;
-
-- gotent = me->module_core + me->arch.got_offset +
-+ gotent = me->module_core_rw + me->arch.got_offset +
- info->got_offset;
- *gotent = val;
- info->got_initialized = 1;
-@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- rc = apply_rela_bits(loc, val, 0, 64, 0);
- else if (r_type == R_390_GOTENT ||
- r_type == R_390_GOTPLTENT) {
-- val += (Elf_Addr) me->module_core - loc;
-+ val += (Elf_Addr) me->module_core_rw - loc;
- rc = apply_rela_bits(loc, val, 1, 32, 1);
- }
- break;
-@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
- if (info->plt_initialized == 0) {
- unsigned int *ip;
-- ip = me->module_core + me->arch.plt_offset +
-+ ip = me->module_core_rx + me->arch.plt_offset +
- info->plt_offset;
- #ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
-@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- val - loc + 0xffffUL < 0x1ffffeUL) ||
- (r_type == R_390_PLT32DBL &&
- val - loc + 0xffffffffULL < 0x1fffffffeULL)))
-- val = (Elf_Addr) me->module_core +
-+ val = (Elf_Addr) me->module_core_rx +
- me->arch.plt_offset +
- info->plt_offset;
- val += rela->r_addend - loc;
-@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- case R_390_GOTOFF32: /* 32 bit offset to GOT. */
- case R_390_GOTOFF64: /* 64 bit offset to GOT. */
- val = val + rela->r_addend -
-- ((Elf_Addr) me->module_core + me->arch.got_offset);
-+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
- if (r_type == R_390_GOTOFF16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
- else if (r_type == R_390_GOTOFF32)
-@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- break;
- case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
- case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
-- val = (Elf_Addr) me->module_core + me->arch.got_offset +
-+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
- rela->r_addend - loc;
- if (r_type == R_390_GOTPC)
- rc = apply_rela_bits(loc, val, 1, 32, 0);
-diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index aa7a839..6c2a916 100644
---- a/arch/s390/kernel/process.c
-+++ b/arch/s390/kernel/process.c
-@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
- }
- return 0;
- }
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() & ~PAGE_MASK;
-- return sp & ~0xf;
--}
--
--static inline unsigned long brk_rnd(void)
--{
-- /* 8MB for 32bit, 1GB for 64bit */
-- if (is_32bit_task())
-- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-- else
-- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
--}
--
--unsigned long arch_randomize_brk(struct mm_struct *mm)
--{
-- unsigned long ret;
--
-- ret = PAGE_ALIGN(mm->brk + brk_rnd());
-- return (ret > mm->brk) ? ret : mm->brk;
--}
--
--unsigned long randomize_et_dyn(unsigned long base)
--{
-- unsigned long ret;
--
-- if (!(current->flags & PF_RANDOMIZE))
-- return base;
-- ret = PAGE_ALIGN(base + brk_rnd());
-- return (ret > base) ? ret : base;
--}
-diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
-index 9b436c2..54fbf0a 100644
---- a/arch/s390/mm/mmap.c
-+++ b/arch/s390/mm/mmap.c
-@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = s390_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = s390_get_unmapped_area_topdown;
- }
- }
-diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
-index ae3d59f..f65f075 100644
---- a/arch/score/include/asm/cache.h
-+++ b/arch/score/include/asm/cache.h
-@@ -1,7 +1,9 @@
- #ifndef _ASM_SCORE_CACHE_H
- #define _ASM_SCORE_CACHE_H
-
-+#include <linux/const.h>
-+
- #define L1_CACHE_SHIFT 4
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif /* _ASM_SCORE_CACHE_H */
-diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
-index f9f3cd5..58ff438 100644
---- a/arch/score/include/asm/exec.h
-+++ b/arch/score/include/asm/exec.h
-@@ -1,6 +1,6 @@
- #ifndef _ASM_SCORE_EXEC_H
- #define _ASM_SCORE_EXEC_H
-
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) (x)
-
- #endif /* _ASM_SCORE_EXEC_H */
-diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
-index a1519ad3..e8ac1ff 100644
---- a/arch/score/kernel/process.c
-+++ b/arch/score/kernel/process.c
-@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
-
- return task_pt_regs(task)->cp0_epc;
- }
--
--unsigned long arch_align_stack(unsigned long sp)
--{
-- return sp;
--}
-diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
-index ef9e555..331bd29 100644
---- a/arch/sh/include/asm/cache.h
-+++ b/arch/sh/include/asm/cache.h
-@@ -9,10 +9,11 @@
- #define __ASM_SH_CACHE_H
- #ifdef __KERNEL__
-
-+#include <linux/const.h>
- #include <linux/init.h>
- #include <cpu/cache.h>
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index 6777177..cb5e44f 100644
---- a/arch/sh/mm/mmap.c
-+++ b/arch/sh/mm/mmap.c
-@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- int do_colour_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- if (flags & MAP_FIXED) {
-@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- if (filp || (flags & MAP_SHARED))
- do_colour_align = 1;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
-@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
- info.flags = 0;
- info.length = len;
-- info.low_limit = TASK_UNMAPPED_BASE;
-+ info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
- info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int do_colour_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- if (flags & MAP_FIXED) {
-@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- if (filp || (flags & MAP_SHARED))
- do_colour_align = 1;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* requesting a specific address */
- if (addr) {
- if (do_colour_align)
-@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
-@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ info.low_limit += mm->delta_mmap;
-+#endif
-+
- info.high_limit = TASK_SIZE;
- addr = vm_unmapped_area(&info);
- }
-diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
-index 4082749..fd97781 100644
---- a/arch/sparc/include/asm/atomic_64.h
-+++ b/arch/sparc/include/asm/atomic_64.h
-@@ -15,18 +15,38 @@
- #define ATOMIC64_INIT(i) { (i) }
-
- #define atomic_read(v) ACCESS_ONCE((v)->counter)
-+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
- #define atomic64_read(v) ACCESS_ONCE((v)->counter)
-+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ return ACCESS_ONCE(v->counter);
-+}
-
- #define atomic_set(v, i) (((v)->counter) = i)
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
- #define atomic64_set(v, i) (((v)->counter) = i)
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ v->counter = i;
-+}
-
--#define ATOMIC_OP(op) \
--void atomic_##op(int, atomic_t *); \
--void atomic64_##op(long, atomic64_t *);
-+#define __ATOMIC_OP(op, suffix) \
-+void atomic_##op##suffix(int, atomic##suffix##_t *); \
-+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
-
--#define ATOMIC_OP_RETURN(op) \
--int atomic_##op##_return(int, atomic_t *); \
--long atomic64_##op##_return(long, atomic64_t *);
-+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix) \
-+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
-+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
-+
-+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
-
- #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-
-@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
-
- #undef ATOMIC_OPS
- #undef ATOMIC_OP_RETURN
-+#undef __ATOMIC_OP_RETURN
- #undef ATOMIC_OP
-+#undef __ATOMIC_OP
-
- #define atomic_dec_return(v) atomic_sub_return(1, v)
- #define atomic64_dec_return(v) atomic64_sub_return(1, v)
-
- #define atomic_inc_return(v) atomic_add_return(1, v)
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
- #define atomic64_inc_return(v) atomic64_add_return(1, v)
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ return atomic64_add_return_unchecked(1, v);
-+}
-
- /*
- * atomic_inc_and_test - increment and test
-@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
- * other cases.
- */
- #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_inc_return_unchecked(v) == 0;
-+}
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
- #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
- #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
-
- #define atomic_inc(v) atomic_add(1, v)
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_add_unchecked(1, v);
-+}
- #define atomic64_inc(v) atomic64_add(1, v)
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
-+{
-+ atomic64_add_unchecked(1, v);
-+}
-
- #define atomic_dec(v) atomic_sub(1, v)
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ atomic_sub_unchecked(1, v);
-+}
- #define atomic64_dec(v) atomic64_sub(1, v)
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
-+{
-+ atomic64_sub_unchecked(1, v);
-+}
-
- #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
- #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
-
- #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
-- int c, old;
-+ int c, old, new;
- c = atomic_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addcc %2, %0, %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "tvs %%icc, 6\n"
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a)
-+ : "cc");
-+
-+ old = atomic_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
-@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- #define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
- #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
-+{
-+ return xchg(&v->counter, new);
-+}
-
- static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
- {
-- long c, old;
-+ long c, old, new;
- c = atomic64_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic64_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addcc %2, %0, %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "tvs %%xcc, 6\n"
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a)
-+ : "cc");
-+
-+ old = atomic64_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
- }
-- return c != (u);
-+ return c != u;
- }
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
-index 7664894..45a974b 100644
---- a/arch/sparc/include/asm/barrier_64.h
-+++ b/arch/sparc/include/asm/barrier_64.h
-@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
-index 5bb6991..5c2132e 100644
---- a/arch/sparc/include/asm/cache.h
-+++ b/arch/sparc/include/asm/cache.h
-@@ -7,10 +7,12 @@
- #ifndef _SPARC_CACHE_H
- #define _SPARC_CACHE_H
-
-+#include <linux/const.h>
-+
- #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-
- #define L1_CACHE_SHIFT 5
--#define L1_CACHE_BYTES 32
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #ifdef CONFIG_SPARC32
- #define SMP_CACHE_BYTES_SHIFT 5
-diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
-index a24e41f..47677ff 100644
---- a/arch/sparc/include/asm/elf_32.h
-+++ b/arch/sparc/include/asm/elf_32.h
-@@ -114,6 +114,13 @@ typedef struct {
-
- #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE 0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN 16
-+#define PAX_DELTA_STACK_LEN 16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. This can NOT be done in userspace
- on Sparc. */
-diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
-index 370ca1e..d4f4a98 100644
---- a/arch/sparc/include/asm/elf_64.h
-+++ b/arch/sparc/include/asm/elf_64.h
-@@ -189,6 +189,13 @@ typedef struct {
- #define ELF_ET_DYN_BASE 0x0000010000000000UL
- #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
-
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
-+
-+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
-+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
-+#endif
-+
- extern unsigned long sparc64_elf_hwcap;
- #define ELF_HWCAP sparc64_elf_hwcap
-
-diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
-index a3890da..f6a408e 100644
---- a/arch/sparc/include/asm/pgalloc_32.h
-+++ b/arch/sparc/include/asm/pgalloc_32.h
-@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
- }
-
- #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
-+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
-
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
- unsigned long address)
-diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
-index 5e31871..13469c6 100644
---- a/arch/sparc/include/asm/pgalloc_64.h
-+++ b/arch/sparc/include/asm/pgalloc_64.h
-@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
- }
-
- #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
-+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
-
- static inline pgd_t *pgd_alloc(struct mm_struct *mm)
- {
-@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
- }
-
- #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
-+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
-
- static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
- {
-diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
-index 59ba6f6..4518128 100644
---- a/arch/sparc/include/asm/pgtable.h
-+++ b/arch/sparc/include/asm/pgtable.h
-@@ -5,4 +5,8 @@
- #else
- #include <asm/pgtable_32.h>
- #endif
-+
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #endif
-diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
-index b9b91ae..950b91e 100644
---- a/arch/sparc/include/asm/pgtable_32.h
-+++ b/arch/sparc/include/asm/pgtable_32.h
-@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
- #define PAGE_SHARED SRMMU_PAGE_SHARED
- #define PAGE_COPY SRMMU_PAGE_COPY
- #define PAGE_READONLY SRMMU_PAGE_RDONLY
-+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
-+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
-+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
- #define PAGE_KERNEL SRMMU_PAGE_KERNEL
-
- /* Top-level page directory - dummy used by init-mm.
-@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
-
- /* xwr */
- #define __P000 PAGE_NONE
--#define __P001 PAGE_READONLY
--#define __P010 PAGE_COPY
--#define __P011 PAGE_COPY
-+#define __P001 PAGE_READONLY_NOEXEC
-+#define __P010 PAGE_COPY_NOEXEC
-+#define __P011 PAGE_COPY_NOEXEC
- #define __P100 PAGE_READONLY
- #define __P101 PAGE_READONLY
- #define __P110 PAGE_COPY
- #define __P111 PAGE_COPY
-
- #define __S000 PAGE_NONE
--#define __S001 PAGE_READONLY
--#define __S010 PAGE_SHARED
--#define __S011 PAGE_SHARED
-+#define __S001 PAGE_READONLY_NOEXEC
-+#define __S010 PAGE_SHARED_NOEXEC
-+#define __S011 PAGE_SHARED_NOEXEC
- #define __S100 PAGE_READONLY
- #define __S101 PAGE_READONLY
- #define __S110 PAGE_SHARED
-diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
-index 79da178..c2eede8 100644
---- a/arch/sparc/include/asm/pgtsrmmu.h
-+++ b/arch/sparc/include/asm/pgtsrmmu.h
-@@ -115,6 +115,11 @@
- SRMMU_EXEC | SRMMU_REF)
- #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
- SRMMU_EXEC | SRMMU_REF)
-+
-+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
-+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
-+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
-+
- #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
- SRMMU_DIRTY | SRMMU_REF)
-
-diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
-index 29d64b1..4272fe8 100644
---- a/arch/sparc/include/asm/setup.h
-+++ b/arch/sparc/include/asm/setup.h
-@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
- void handle_ld_nf(u32 insn, struct pt_regs *regs);
-
- /* init_64.c */
--extern atomic_t dcpage_flushes;
--extern atomic_t dcpage_flushes_xcall;
-+extern atomic_unchecked_t dcpage_flushes;
-+extern atomic_unchecked_t dcpage_flushes_xcall;
-
- extern int sysctl_tsb_ratio;
- #endif
-diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
-index 9689176..63c18ea 100644
---- a/arch/sparc/include/asm/spinlock_64.h
-+++ b/arch/sparc/include/asm/spinlock_64.h
-@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
-
- /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-
--static void inline arch_read_lock(arch_rwlock_t *lock)
-+static inline void arch_read_lock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__ (
- "1: ldsw [%2], %0\n"
- " brlz,pn %0, 2f\n"
--"4: add %0, 1, %1\n"
-+"4: addcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%icc, 1b\n"
-@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
- " .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
-- : "memory");
-+ : "memory", "cc");
- }
-
--static int inline arch_read_trylock(arch_rwlock_t *lock)
-+static inline int arch_read_trylock(arch_rwlock_t *lock)
- {
- int tmp1, tmp2;
-
-@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
- "1: ldsw [%2], %0\n"
- " brlz,a,pn %0, 2f\n"
- " mov 0, %0\n"
--" add %0, 1, %1\n"
-+" addcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%icc, 1b\n"
-@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
- return tmp1;
- }
-
--static void inline arch_read_unlock(arch_rwlock_t *lock)
-+static inline void arch_read_unlock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
- "1: lduw [%2], %0\n"
--" sub %0, 1, %1\n"
-+" subcc %0, 1, %1\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+" tvs %%icc, 6\n"
-+#endif
-+
- " cas [%2], %0, %1\n"
- " cmp %0, %1\n"
- " bne,pn %%xcc, 1b\n"
-@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_lock(arch_rwlock_t *lock)
-+static inline void arch_write_lock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2;
-
-@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_unlock(arch_rwlock_t *lock)
-+static inline void arch_write_unlock(arch_rwlock_t *lock)
- {
- __asm__ __volatile__(
- " stw %%g0, [%0]"
-@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static int inline arch_write_trylock(arch_rwlock_t *lock)
-+static inline int arch_write_trylock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2, result;
-
-diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
-index 025c984..a216504 100644
---- a/arch/sparc/include/asm/thread_info_32.h
-+++ b/arch/sparc/include/asm/thread_info_32.h
-@@ -49,6 +49,8 @@ struct thread_info {
- unsigned long w_saved;
-
- struct restart_block restart_block;
-+
-+ unsigned long lowest_stack;
- };
-
- /*
-diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
-index 798f027..b009941 100644
---- a/arch/sparc/include/asm/thread_info_64.h
-+++ b/arch/sparc/include/asm/thread_info_64.h
-@@ -63,6 +63,8 @@ struct thread_info {
- struct pt_regs *kern_una_regs;
- unsigned int kern_una_insn;
-
-+ unsigned long lowest_stack;
-+
- unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
- __attribute__ ((aligned(64)));
- };
-@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
- #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
- /* flag bit 4 is available */
- #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
--/* flag bit 6 is available */
-+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
- #define TIF_32BIT 7 /* 32-bit binary */
- #define TIF_NOHZ 8 /* in adaptive nohz mode */
- #define TIF_SECCOMP 9 /* secure computing */
- #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
-+
- /* NOTE: Thread flags >= 12 should be ones we have no interest
- * in using in assembly, else we can't use the mask as
- * an immediate value in instructions such as andcc.
-@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
- #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
-
- #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
- _TIF_DO_NOTIFY_RESUME_MASK | \
- _TIF_NEED_RESCHED)
- #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
-
-+#define _TIF_WORK_SYSCALL \
-+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
-+
- #define is_32bit_task() (test_thread_flag(TIF_32BIT))
-
- /*
-diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
-index bd56c28..4b63d83 100644
---- a/arch/sparc/include/asm/uaccess.h
-+++ b/arch/sparc/include/asm/uaccess.h
-@@ -1,5 +1,6 @@
- #ifndef ___ASM_SPARC_UACCESS_H
- #define ___ASM_SPARC_UACCESS_H
-+
- #if defined(__sparc__) && defined(__arch64__)
- #include <asm/uaccess_64.h>
- #else
-diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
-index 9634d08..020b2dc 100644
---- a/arch/sparc/include/asm/uaccess_32.h
-+++ b/arch/sparc/include/asm/uaccess_32.h
-@@ -47,6 +47,7 @@
- #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
- #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
- #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
-+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
- #define access_ok(type, addr, size) \
- ({ (void)(type); __access_ok((unsigned long)(addr), size); })
-
-@@ -250,27 +251,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
-
- static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-- if (n && __access_ok((unsigned long) to, n))
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (n && __access_ok((unsigned long) to, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
- return __copy_user(to, (__force void __user *) from, n);
-- else
-+ } else
- return n;
- }
-
- static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (!__builtin_constant_p(n))
-+ check_object_size(from, n, true);
-+
- return __copy_user(to, (__force void __user *) from, n);
- }
-
- static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-- if (n && __access_ok((unsigned long) from, n))
-+ if ((long)n < 0)
-+ return n;
-+
-+ if (n && __access_ok((unsigned long) from, n)) {
-+ if (!__builtin_constant_p(n))
-+ check_object_size(to, n, false);
- return __copy_user((__force void __user *) to, from, n);
-- else
-+ } else
- return n;
- }
-
- static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
- {
-+ if ((long)n < 0)
-+ return n;
-+
- return __copy_user((__force void __user *) to, from, n);
- }
-
-diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
-index c990a5e..7384856 100644
---- a/arch/sparc/include/asm/uaccess_64.h
-+++ b/arch/sparc/include/asm/uaccess_64.h
-@@ -10,6 +10,7 @@
- #include <linux/compiler.h>
- #include <linux/string.h>
- #include <linux/thread_info.h>
-+#include <linux/kernel.h>
- #include <asm/asi.h>
- #include <asm/spitfire.h>
- #include <asm-generic/uaccess-unaligned.h>
-@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
- return 1;
- }
-
-+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
-+{
-+ return 1;
-+}
-+
- static inline int access_ok(int type, const void __user * addr, unsigned long size)
- {
- return 1;
-@@ -214,8 +220,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
- static inline unsigned long __must_check
- copy_from_user(void *to, const void __user *from, unsigned long size)
- {
-- unsigned long ret = ___copy_from_user(to, from, size);
-+ unsigned long ret;
-
-+ if ((long)size < 0 || size > INT_MAX)
-+ return size;
-+
-+ if (!__builtin_constant_p(size))
-+ check_object_size(to, size, false);
-+
-+ ret = ___copy_from_user(to, from, size);
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
-
-@@ -231,8 +244,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
- static inline unsigned long __must_check
- copy_to_user(void __user *to, const void *from, unsigned long size)
- {
-- unsigned long ret = ___copy_to_user(to, from, size);
-+ unsigned long ret;
-
-+ if ((long)size < 0 || size > INT_MAX)
-+ return size;
-+
-+ if (!__builtin_constant_p(size))
-+ check_object_size(from, size, true);
-+
-+ ret = ___copy_to_user(to, from, size);
- if (unlikely(ret))
- ret = copy_to_user_fixup(to, from, size);
- return ret;
-diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
-index 7cf9c6e..6206648 100644
---- a/arch/sparc/kernel/Makefile
-+++ b/arch/sparc/kernel/Makefile
-@@ -4,7 +4,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- extra-y := head_$(BITS).o
-
-diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
-index 50e7b62..79fae35 100644
---- a/arch/sparc/kernel/process_32.c
-+++ b/arch/sparc/kernel/process_32.c
-@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
-
- printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
- r->psr, r->pc, r->npc, r->y, print_tainted());
-- printk("PC: <%pS>\n", (void *) r->pc);
-+ printk("PC: <%pA>\n", (void *) r->pc);
- printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
- r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
- printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
- r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
-- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
-+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
-
- printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
-@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
- rw = (struct reg_window32 *) fp;
- pc = rw->ins[7];
- printk("[%08lx : ", pc);
-- printk("%pS ] ", (void *) pc);
-+ printk("%pA ] ", (void *) pc);
- fp = rw->ins[6];
- } while (++count < 16);
- printk("\n");
-diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
-index 46a5964..a35c62c 100644
---- a/arch/sparc/kernel/process_64.c
-+++ b/arch/sparc/kernel/process_64.c
-@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
- printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
- rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
- if (regs->tstate & TSTATE_PRIV)
-- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
-+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
- }
-
- void show_regs(struct pt_regs *regs)
-@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
-
- printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
- regs->tpc, regs->tnpc, regs->y, print_tainted());
-- printk("TPC: <%pS>\n", (void *) regs->tpc);
-+ printk("TPC: <%pA>\n", (void *) regs->tpc);
- printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
- regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
- regs->u_regs[3]);
-@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
- printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
- regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
- regs->u_regs[15]);
-- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
-+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
- show_regwindow(regs);
- show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
- }
-@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
- ((tp && tp->task) ? tp->task->pid : -1));
-
- if (gp->tstate & TSTATE_PRIV) {
-- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
-+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
- (void *) gp->tpc,
- (void *) gp->o7,
- (void *) gp->i7,
-diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
-index 79cc0d1..ec62734 100644
---- a/arch/sparc/kernel/prom_common.c
-+++ b/arch/sparc/kernel/prom_common.c
-@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
-
- unsigned int prom_early_allocated __initdata;
-
--static struct of_pdt_ops prom_sparc_ops __initdata = {
-+static struct of_pdt_ops prom_sparc_ops __initconst = {
- .nextprop = prom_common_nextprop,
- .getproplen = prom_getproplen,
- .getproperty = prom_getproperty,
-diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
-index 9ddc492..27a5619 100644
---- a/arch/sparc/kernel/ptrace_64.c
-+++ b/arch/sparc/kernel/ptrace_64.c
-@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- asmlinkage int syscall_trace_enter(struct pt_regs *regs)
- {
- int ret = 0;
-@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
- if (test_thread_flag(TIF_NOHZ))
- user_exit();
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- ret = tracehook_report_syscall_entry(regs);
-
-@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
- if (test_thread_flag(TIF_NOHZ))
- user_exit();
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index da6f1a7..e5dea8f 100644
---- a/arch/sparc/kernel/smp_64.c
-+++ b/arch/sparc/kernel/smp_64.c
-@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
- return;
-
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
-
- this_cpu = get_cpu();
-@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpumask_of(cpu));
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes_xcall);
-+ atomic_inc_unchecked(&dcpage_flushes_xcall);
- #endif
- }
- }
-@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
- preempt_disable();
-
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
- data0 = 0;
- pg_addr = page_address(page);
-@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
- xcall_deliver(data0, __pa(pg_addr),
- (u64) pg_addr, cpu_online_mask);
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes_xcall);
-+ atomic_inc_unchecked(&dcpage_flushes_xcall);
- #endif
- }
- __local_flush_dcache_page(page);
-diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index 646988d..b88905f 100644
---- a/arch/sparc/kernel/sys_sparc_32.c
-+++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (len > TASK_SIZE - PAGE_SIZE)
- return -ENOMEM;
- if (!addr)
-- addr = TASK_UNMAPPED_BASE;
-+ addr = current->mm->mmap_base;
-
- info.flags = 0;
- info.length = len;
-diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 30e7ddb..266a3b0 100644
---- a/arch/sparc/kernel/sys_sparc_64.c
-+++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- struct vm_area_struct * vma;
- unsigned long task_size = TASK_SIZE;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
-- if ((flags & MAP_SHARED) &&
-+ if ((filp || (flags & MAP_SHARED)) &&
- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- return -EINVAL;
- return addr;
-@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- if (do_color_align)
- addr = COLOR_ALIGN(addr, pgoff);
-@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
- info.flags = 0;
- info.length = len;
-- info.low_limit = TASK_UNMAPPED_BASE;
-+ info.low_limit = mm->mmap_base;
- info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
- VM_BUG_ON(addr != -ENOMEM);
- info.low_limit = VA_EXCLUDE_END;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ info.low_limit += mm->delta_mmap;
-+#endif
-+
- info.high_limit = task_size;
- addr = vm_unmapped_area(&info);
- }
-@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- unsigned long task_size = STACK_TOP32;
- unsigned long addr = addr0;
- int do_color_align;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- struct vm_unmapped_area_info info;
-
- /* This should only ever run for 32-bit processes. */
-@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
-- if ((flags & MAP_SHARED) &&
-+ if ((filp || (flags & MAP_SHARED)) &&
- ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- return -EINVAL;
- return addr;
-@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- /* requesting a specific address */
- if (addr) {
- if (do_color_align)
-@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
-
-@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- info.high_limit = mm->mmap_base;
- info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
- info.align_offset = pgoff << PAGE_SHIFT;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- /*
-@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ info.low_limit += mm->delta_mmap;
-+#endif
-+
- info.high_limit = STACK_TOP32;
- addr = vm_unmapped_area(&info);
- }
-@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
- EXPORT_SYMBOL(get_fb_unmapped_area);
-
- /* Essentially the same as PowerPC. */
--static unsigned long mmap_rnd(void)
-+static unsigned long mmap_rnd(struct mm_struct *mm)
- {
- unsigned long rnd = 0UL;
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (current->flags & PF_RANDOMIZE) {
- unsigned long val = get_random_int();
- if (test_thread_flag(TIF_32BIT))
-@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
-
- void arch_pick_mmap_layout(struct mm_struct *mm)
- {
-- unsigned long random_factor = mmap_rnd();
-+ unsigned long random_factor = mmap_rnd(mm);
- unsigned long gap;
-
- /*
-@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- gap == RLIM_INFINITY ||
- sysctl_legacy_va_layout) {
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- /* We know it's 32-bit */
-@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- gap = (task_size / 6 * 5);
-
- mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
-index bb00089..e0ea580 100644
---- a/arch/sparc/kernel/syscalls.S
-+++ b/arch/sparc/kernel/syscalls.S
-@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
- #endif
- .align 32
- 1: ldx [%g6 + TI_FLAGS], %l5
-- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
-+ andcc %l5, _TIF_WORK_SYSCALL, %g0
- be,pt %icc, rtrap
- nop
- call syscall_trace_leave
-@@ -194,7 +194,7 @@ linux_sparc_syscall32:
-
- srl %i3, 0, %o3 ! IEU0
- srl %i2, 0, %o2 ! IEU0 Group
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- bne,pn %icc, linux_syscall_trace32 ! CTI
- mov %i0, %l5 ! IEU1
- 5: call %l7 ! CTI Group brk forced
-@@ -218,7 +218,7 @@ linux_sparc_syscall:
-
- mov %i3, %o3 ! IEU1
- mov %i4, %o4 ! IEU0 Group
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- bne,pn %icc, linux_syscall_trace ! CTI Group
- mov %i0, %l5 ! IEU0
- 2: call %l7 ! CTI Group brk forced
-@@ -233,7 +233,7 @@ ret_sys_call:
-
- cmp %o0, -ERESTART_RESTARTBLOCK
- bgeu,pn %xcc, 1f
-- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
-+ andcc %l0, _TIF_WORK_SYSCALL, %g0
- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
-
- 2:
-diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
-index 6fd386c5..6907d81 100644
---- a/arch/sparc/kernel/traps_32.c
-+++ b/arch/sparc/kernel/traps_32.c
-@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
- #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
- #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
-
-+extern void gr_handle_kernel_exploit(void);
-+
- void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
- {
- static int die_counter;
-@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
- count++ < 30 &&
- (((unsigned long) rw) >= PAGE_OFFSET) &&
- !(((unsigned long) rw) & 0x7)) {
-- printk("Caller[%08lx]: %pS\n", rw->ins[7],
-+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
- (void *) rw->ins[7]);
- rw = (struct reg_window32 *)rw->ins[6];
- }
- }
- printk("Instruction DUMP:");
- instruction_dump ((unsigned long *) regs->pc);
-- if(regs->psr & PSR_PS)
-+ if(regs->psr & PSR_PS) {
-+ gr_handle_kernel_exploit();
- do_exit(SIGKILL);
-+ }
- do_exit(SIGSEGV);
- }
-
-diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index 981a769..d906eda 100644
---- a/arch/sparc/kernel/traps_64.c
-+++ b/arch/sparc/kernel/traps_64.c
-@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
- i + 1,
- p->trapstack[i].tstate, p->trapstack[i].tpc,
- p->trapstack[i].tnpc, p->trapstack[i].tt);
-- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
-+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
- }
- }
-
-@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
-
- lvl -= 0x100;
- if (regs->tstate & TSTATE_PRIV) {
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (lvl == 6)
-+ pax_report_refcount_overflow(regs);
-+#endif
-+
- sprintf(buffer, "Kernel bad sw trap %lx", lvl);
- die_if_kernel(buffer, regs);
- }
-@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
- void bad_trap_tl1(struct pt_regs *regs, long lvl)
- {
- char buffer[32];
--
-+
- if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
- 0, lvl, SIGTRAP) == NOTIFY_STOP)
- return;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+ if (lvl == 6)
-+ pax_report_refcount_overflow(regs);
-+#endif
-+
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
-
- sprintf (buffer, "Bad trap %lx at tl>0", lvl);
-@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
- regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
- printk("%s" "ERROR(%d): ",
- (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
-- printk("TPC<%pS>\n", (void *) regs->tpc);
-+ printk("TPC<%pA>\n", (void *) regs->tpc);
- printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
- (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
- (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
-@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
- smp_processor_id(),
- (type & 0x1) ? 'I' : 'D',
- regs->tpc);
-- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
- panic("Irrecoverable Cheetah+ parity error.");
- }
-
-@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
- smp_processor_id(),
- (type & 0x1) ? 'I' : 'D',
- regs->tpc);
-- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
- }
-
- struct sun4v_error_entry {
-@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
- /*0x38*/u64 reserved_5;
- };
-
--static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
--static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
-+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
-+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
-
- static const char *sun4v_err_type_to_str(u8 type)
- {
-@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
- }
-
- static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
-- int cpu, const char *pfx, atomic_t *ocnt)
-+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
- {
- u64 *raw_ptr = (u64 *) ent;
- u32 attrs;
-@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
-
- show_regs(regs);
-
-- if ((cnt = atomic_read(ocnt)) != 0) {
-- atomic_set(ocnt, 0);
-+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
-+ atomic_set_unchecked(ocnt, 0);
- wmb();
- printk("%s: Queue overflowed %d times.\n",
- pfx, cnt);
-@@ -2048,7 +2059,7 @@ out:
- */
- void sun4v_resum_overflow(struct pt_regs *regs)
- {
-- atomic_inc(&sun4v_resum_oflow_cnt);
-+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
- }
-
- /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
-@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
- /* XXX Actually even this can make not that much sense. Perhaps
- * XXX we should just pull the plug and panic directly from here?
- */
-- atomic_inc(&sun4v_nonresum_oflow_cnt);
-+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
- }
-
- static void sun4v_tlb_error(struct pt_regs *regs)
-@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
-
- printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
- regs->tpc, tl);
-- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
- printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
-+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
- (void *) regs->u_regs[UREG_I7]);
- printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
- "pte[%lx] error[%lx]\n",
-@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
-
- printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
- regs->tpc, tl);
-- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
-+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
- printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
-+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
- (void *) regs->u_regs[UREG_I7]);
- printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
- "pte[%lx] error[%lx]\n",
-@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
- fp = (unsigned long)sf->fp + STACK_BIAS;
- }
-
-- printk(" [%016lx] %pS\n", pc, (void *) pc);
-+ printk(" [%016lx] %pA\n", pc, (void *) pc);
- #ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = tsk->curr_ret_stack;
- if (tsk->ret_stack && index >= graph) {
- pc = tsk->ret_stack[index - graph].ret;
-- printk(" [%016lx] %pS\n", pc, (void *) pc);
-+ printk(" [%016lx] %pA\n", pc, (void *) pc);
- graph++;
- }
- }
-@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
- return (struct reg_window *) (fp + STACK_BIAS);
- }
-
-+extern void gr_handle_kernel_exploit(void);
-+
- void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
- {
- static int die_counter;
-@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
- while (rw &&
- count++ < 30 &&
- kstack_valid(tp, (unsigned long) rw)) {
-- printk("Caller[%016lx]: %pS\n", rw->ins[7],
-+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
- (void *) rw->ins[7]);
-
- rw = kernel_stack_up(rw);
-@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
- }
- user_instruction_dump ((unsigned int __user *) regs->tpc);
- }
-- if (regs->tstate & TSTATE_PRIV)
-+ if (regs->tstate & TSTATE_PRIV) {
-+ gr_handle_kernel_exploit();
- do_exit(SIGKILL);
-+ }
- do_exit(SIGSEGV);
- }
- EXPORT_SYMBOL(die_if_kernel);
-diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
-index 62098a8..547ab2c 100644
---- a/arch/sparc/kernel/unaligned_64.c
-+++ b/arch/sparc/kernel/unaligned_64.c
-@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
- static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
-
- if (__ratelimit(&ratelimit)) {
-- printk("Kernel unaligned access at TPC[%lx] %pS\n",
-+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
- regs->tpc, (void *) regs->tpc);
- }
- }
-diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
-index 3269b02..64f5231 100644
---- a/arch/sparc/lib/Makefile
-+++ b/arch/sparc/lib/Makefile
-@@ -2,7 +2,7 @@
- #
-
- asflags-y := -ansi -DST_DIV0=0x02
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- lib-$(CONFIG_SPARC32) += ashrdi3.o
- lib-$(CONFIG_SPARC32) += memcpy.o memset.o
-diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
-index 05dac43..76f8ed4 100644
---- a/arch/sparc/lib/atomic_64.S
-+++ b/arch/sparc/lib/atomic_64.S
-@@ -15,11 +15,22 @@
- * a value and does the barriers.
- */
-
--#define ATOMIC_OP(op) \
--ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define __REFCOUNT_OP(op) op##cc
-+#define __OVERFLOW_IOP tvs %icc, 6;
-+#define __OVERFLOW_XOP tvs %xcc, 6;
-+#else
-+#define __REFCOUNT_OP(op) op
-+#define __OVERFLOW_IOP
-+#define __OVERFLOW_XOP
-+#endif
-+
-+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
-+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
- BACKOFF_SETUP(%o2); \
- 1: lduw [%o1], %g1; \
-- op %g1, %o0, %g7; \
-+ asm_op %g1, %o0, %g7; \
-+ post_op \
- cas [%o1], %g1, %g7; \
- cmp %g1, %g7; \
- bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
-@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
- 2: BACKOFF_SPIN(%o2, %o3, 1b); \
- ENDPROC(atomic_##op); \
-
--#define ATOMIC_OP_RETURN(op) \
--ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
-+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
-+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
-+
-+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
-+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
- BACKOFF_SETUP(%o2); \
- 1: lduw [%o1], %g1; \
-- op %g1, %o0, %g7; \
-+ asm_op %g1, %o0, %g7; \
-+ post_op \
- cas [%o1], %g1, %g7; \
- cmp %g1, %g7; \
- bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
-@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
- 2: BACKOFF_SPIN(%o2, %o3, 1b); \
- ENDPROC(atomic_##op##_return);
-
-+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
-+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
-+
- #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-
- ATOMIC_OPS(add)
-@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
-
- #undef ATOMIC_OPS
- #undef ATOMIC_OP_RETURN
-+#undef __ATOMIC_OP_RETURN
- #undef ATOMIC_OP
-+#undef __ATOMIC_OP
-
--#define ATOMIC64_OP(op) \
--ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
-+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
-+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
- BACKOFF_SETUP(%o2); \
- 1: ldx [%o1], %g1; \
-- op %g1, %o0, %g7; \
-+ asm_op %g1, %o0, %g7; \
-+ post_op \
- casx [%o1], %g1, %g7; \
- cmp %g1, %g7; \
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
- 2: BACKOFF_SPIN(%o2, %o3, 1b); \
- ENDPROC(atomic64_##op); \
-
--#define ATOMIC64_OP_RETURN(op) \
--ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
-+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
-+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
-+
-+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
-+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
- BACKOFF_SETUP(%o2); \
- 1: ldx [%o1], %g1; \
-- op %g1, %o0, %g7; \
-+ asm_op %g1, %o0, %g7; \
-+ post_op \
- casx [%o1], %g1, %g7; \
- cmp %g1, %g7; \
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
- 2: BACKOFF_SPIN(%o2, %o3, 1b); \
- ENDPROC(atomic64_##op##_return);
-
-+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
-+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
-+
- #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
-
- ATOMIC64_OPS(add)
-@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
-
- #undef ATOMIC64_OPS
- #undef ATOMIC64_OP_RETURN
-+#undef __ATOMIC64_OP_RETURN
- #undef ATOMIC64_OP
-+#undef __ATOMIC64_OP
-+#undef __OVERFLOW_XOP
-+#undef __OVERFLOW_IOP
-+#undef __REFCOUNT_OP
-
- ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index 1d649a9..fbc5bfc 100644
---- a/arch/sparc/lib/ksyms.c
-+++ b/arch/sparc/lib/ksyms.c
-@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
- /* Atomic counter implementation. */
- #define ATOMIC_OP(op) \
- EXPORT_SYMBOL(atomic_##op); \
--EXPORT_SYMBOL(atomic64_##op);
-+EXPORT_SYMBOL(atomic_##op##_unchecked); \
-+EXPORT_SYMBOL(atomic64_##op); \
-+EXPORT_SYMBOL(atomic64_##op##_unchecked);
-
- #define ATOMIC_OP_RETURN(op) \
- EXPORT_SYMBOL(atomic_##op##_return); \
-@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
- #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-
- ATOMIC_OPS(add)
-+EXPORT_SYMBOL(atomic_add_ret_unchecked);
-+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
- ATOMIC_OPS(sub)
-
- #undef ATOMIC_OPS
-diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
-index 30c3ecc..736f015 100644
---- a/arch/sparc/mm/Makefile
-+++ b/arch/sparc/mm/Makefile
-@@ -2,7 +2,7 @@
- #
-
- asflags-y := -ansi
--ccflags-y := -Werror
-+#ccflags-y := -Werror
-
- obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
- obj-y += fault_$(BITS).o
-diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index 70d8171..274c6c0 100644
---- a/arch/sparc/mm/fault_32.c
-+++ b/arch/sparc/mm/fault_32.c
-@@ -21,6 +21,9 @@
- #include <linux/perf_event.h>
- #include <linux/interrupt.h>
- #include <linux/kdebug.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
- return safe_compute_effective_address(regs, insn);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#ifdef CONFIG_PAX_DLRESOLVE
-+static void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+ vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ unsigned int *kaddr;
-+
-+ vmf->page = alloc_page(GFP_HIGHUSER);
-+ if (!vmf->page)
-+ return VM_FAULT_OOM;
-+
-+ kaddr = kmap(vmf->page);
-+ memset(kaddr, 0, PAGE_SIZE);
-+ kaddr[0] = 0x9DE3BFA8U; /* save */
-+ flush_dcache_page(vmf->page);
-+ kunmap(vmf->page);
-+ return VM_FAULT_MAJOR;
-+}
-+
-+static const struct vm_operations_struct pax_vm_ops = {
-+ .close = pax_emuplt_close,
-+ .fault = pax_emuplt_fault
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ int ret;
-+
-+ INIT_LIST_HEAD(&vma->anon_vma_chain);
-+ vma->vm_mm = current->mm;
-+ vma->vm_start = addr;
-+ vma->vm_end = addr + PAGE_SIZE;
-+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+ vma->vm_ops = &pax_vm_ops;
-+
-+ ret = insert_vm_struct(current->mm, vma);
-+ if (ret)
-+ return ret;
-+
-+ ++current->mm->total_vm;
-+ return 0;
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int sethi1, sethi2, jmpl;
-+
-+ err = get_user(sethi1, (unsigned int *)regs->pc);
-+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
-+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+ {
-+ unsigned int addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+ addr = regs->u_regs[UREG_G1];
-+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ba;
-+
-+ err = get_user(ba, (unsigned int *)regs->pc);
-+
-+ if (err)
-+ break;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
-+ unsigned int addr;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+ else
-+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, bajmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
-+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ else
-+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 1 */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->pc);
-+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int addr, save, call;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+ else
-+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+
-+ err = get_user(save, (unsigned int *)addr);
-+ err |= get_user(call, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ if (err)
-+ break;
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ struct vm_area_struct *vma;
-+ unsigned long call_dl_resolve;
-+
-+ down_read(&current->mm->mmap_sem);
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_read(&current->mm->mmap_sem);
-+ if (likely(call_dl_resolve))
-+ goto emulate;
-+
-+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+
-+ down_write(&current->mm->mmap_sem);
-+ if (current->mm->call_dl_resolve) {
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ goto emulate;
-+ }
-+
-+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ if (pax_insert_vma(vma, call_dl_resolve)) {
-+ up_write(&current->mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ current->mm->call_dl_resolve = call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->pc = call_dl_resolve;
-+ regs->npc = addr+4;
-+ return 3;
-+ }
-+#endif
-+
-+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
-+ if ((save & 0xFFC00000U) == 0x05000000U &&
-+ (call & 0xFFFFE000U) == 0x85C0A000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G2] = addr + 4;
-+ addr = (save & 0x003FFFFFU) << 10;
-+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+ regs->pc = addr;
-+ regs->npc = addr+4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 2 */
-+ unsigned int save, call, nop;
-+
-+ err = get_user(save, (unsigned int *)(regs->pc-4));
-+ err |= get_user(call, (unsigned int *)regs->pc);
-+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
-+ if (err)
-+ break;
-+
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
-+
-+ regs->u_regs[UREG_RETPC] = regs->pc;
-+ regs->pc = dl_resolve;
-+ regs->npc = dl_resolve+4;
-+ return 3;
-+ }
-+ } while (0);
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
- int text_fault)
- {
-@@ -226,6 +500,24 @@ good_area:
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- } else {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- /* Allow reads even for write-only mappings */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
-diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 4798232..f76e3aa 100644
---- a/arch/sparc/mm/fault_64.c
-+++ b/arch/sparc/mm/fault_64.c
-@@ -22,6 +22,9 @@
- #include <linux/kdebug.h>
- #include <linux/percpu.h>
- #include <linux/context_tracking.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
- printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
- regs->tpc);
- printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
-- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
-+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
- printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
- dump_stack();
- unhandled_fault(regs->tpc, current, regs);
-@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
- show_regs(regs);
- }
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#ifdef CONFIG_PAX_DLRESOLVE
-+static void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+ vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ unsigned int *kaddr;
-+
-+ vmf->page = alloc_page(GFP_HIGHUSER);
-+ if (!vmf->page)
-+ return VM_FAULT_OOM;
-+
-+ kaddr = kmap(vmf->page);
-+ memset(kaddr, 0, PAGE_SIZE);
-+ kaddr[0] = 0x9DE3BFA8U; /* save */
-+ flush_dcache_page(vmf->page);
-+ kunmap(vmf->page);
-+ return VM_FAULT_MAJOR;
-+}
-+
-+static const struct vm_operations_struct pax_vm_ops = {
-+ .close = pax_emuplt_close,
-+ .fault = pax_emuplt_fault
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ int ret;
-+
-+ INIT_LIST_HEAD(&vma->anon_vma_chain);
-+ vma->vm_mm = current->mm;
-+ vma->vm_start = addr;
-+ vma->vm_end = addr + PAGE_SIZE;
-+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+ vma->vm_ops = &pax_vm_ops;
-+
-+ ret = insert_vm_struct(current->mm, vma);
-+ if (ret)
-+ return ret;
-+
-+ ++current->mm->total_vm;
-+ return 0;
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->tpc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ * 2 when patched PLT trampoline was detected
-+ * 3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ int err;
-+
-+ do { /* PaX: patched PLT emulation #1 */
-+ unsigned int sethi1, sethi2, jmpl;
-+
-+ err = get_user(sethi1, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+ addr = regs->u_regs[UREG_G1];
-+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #2 */
-+ unsigned int ba;
-+
-+ err = get_user(ba, (unsigned int *)regs->tpc);
-+
-+ if (err)
-+ break;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
-+ unsigned long addr;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+ else
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #3 */
-+ unsigned int sethi, bajmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
-+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+ else
-+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #4 */
-+ unsigned int sethi, mov1, call, mov2;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ mov1 == 0x8210000FU &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ mov2 == 0x9E100001U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
-+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #5 */
-+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
-+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
-+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ (or1 & 0xFFFFE000U) == 0x82106000U &&
-+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
-+ sllx == 0x83287020U &&
-+ jmpl == 0x81C04005U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
-+ regs->u_regs[UREG_G1] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
-+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: patched PLT emulation #6 */
-+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
-+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
-+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
-+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ sllx == 0x83287020U &&
-+ (or & 0xFFFFE000U) == 0x8A116000U &&
-+ jmpl == 0x81C04005U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
-+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+ do { /* PaX: unpatched PLT emulation step 1 */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+ unsigned int save, call;
-+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
-+
-+ if ((ba & 0xFFC00000U) == 0x30800000U)
-+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+ else
-+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ err = get_user(save, (unsigned int *)addr);
-+ err |= get_user(call, (unsigned int *)(addr+4));
-+ err |= get_user(nop, (unsigned int *)(addr+8));
-+ if (err)
-+ break;
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ struct vm_area_struct *vma;
-+ unsigned long call_dl_resolve;
-+
-+ down_read(&current->mm->mmap_sem);
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_read(&current->mm->mmap_sem);
-+ if (likely(call_dl_resolve))
-+ goto emulate;
-+
-+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+
-+ down_write(&current->mm->mmap_sem);
-+ if (current->mm->call_dl_resolve) {
-+ call_dl_resolve = current->mm->call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ goto emulate;
-+ }
-+
-+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+ up_write(&current->mm->mmap_sem);
-+ if (vma)
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ if (pax_insert_vma(vma, call_dl_resolve)) {
-+ up_write(&current->mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return 1;
-+ }
-+
-+ current->mm->call_dl_resolve = call_dl_resolve;
-+ up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->tpc = call_dl_resolve;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+#endif
-+
-+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
-+ if ((save & 0xFFC00000U) == 0x05000000U &&
-+ (call & 0xFFFFE000U) == 0x85C0A000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G2] = addr + 4;
-+ addr = (save & 0x003FFFFFU) << 10;
-+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+
-+ /* PaX: 64-bit PLT stub */
-+ err = get_user(sethi1, (unsigned int *)addr);
-+ err |= get_user(sethi2, (unsigned int *)(addr+4));
-+ err |= get_user(or1, (unsigned int *)(addr+8));
-+ err |= get_user(or2, (unsigned int *)(addr+12));
-+ err |= get_user(sllx, (unsigned int *)(addr+16));
-+ err |= get_user(add, (unsigned int *)(addr+20));
-+ err |= get_user(jmpl, (unsigned int *)(addr+24));
-+ err |= get_user(nop, (unsigned int *)(addr+28));
-+ if (err)
-+ break;
-+
-+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
-+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+ (or1 & 0xFFFFE000U) == 0x88112000U &&
-+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
-+ sllx == 0x89293020U &&
-+ add == 0x8A010005U &&
-+ jmpl == 0x89C14000U &&
-+ nop == 0x01000000U)
-+ {
-+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
-+ regs->u_regs[UREG_G4] <<= 32;
-+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
-+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
-+ regs->u_regs[UREG_G4] = addr + 24;
-+ addr = regs->u_regs[UREG_G5];
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 3;
-+ }
-+ }
-+ } while (0);
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+ do { /* PaX: unpatched PLT emulation step 2 */
-+ unsigned int save, call, nop;
-+
-+ err = get_user(save, (unsigned int *)(regs->tpc-4));
-+ err |= get_user(call, (unsigned int *)regs->tpc);
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
-+ if (err)
-+ break;
-+
-+ if (save == 0x9DE3BFA8U &&
-+ (call & 0xC0000000U) == 0x40000000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ dl_resolve &= 0xFFFFFFFFUL;
-+
-+ regs->u_regs[UREG_RETPC] = regs->tpc;
-+ regs->tpc = dl_resolve;
-+ regs->tnpc = dl_resolve+4;
-+ return 3;
-+ }
-+ } while (0);
-+#endif
-+
-+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
-+ unsigned int sethi, ba, nop;
-+
-+ err = get_user(sethi, (unsigned int *)regs->tpc);
-+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+ if (err)
-+ break;
-+
-+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+ (ba & 0xFFF00000U) == 0x30600000U &&
-+ nop == 0x01000000U)
-+ {
-+ unsigned long addr;
-+
-+ addr = (sethi & 0x003FFFFFU) << 10;
-+ regs->u_regs[UREG_G1] = addr;
-+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+ if (test_thread_flag(TIF_32BIT))
-+ addr &= 0xFFFFFFFFUL;
-+
-+ regs->tpc = addr;
-+ regs->tnpc = addr+4;
-+ return 2;
-+ }
-+ } while (0);
-+
-+#endif
-+
-+ return 1;
-+}
-+
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+ unsigned long i;
-+
-+ printk(KERN_ERR "PAX: bytes at PC: ");
-+ for (i = 0; i < 8; i++) {
-+ unsigned int c;
-+ if (get_user(c, (unsigned int *)pc+i))
-+ printk(KERN_CONT "???????? ");
-+ else
-+ printk(KERN_CONT "%08x ", c);
-+ }
-+ printk("\n");
-+}
-+#endif
-+
- asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
- {
- enum ctx_state prev_state = exception_enter();
-@@ -353,6 +816,29 @@ retry:
- if (!vma)
- goto bad_area;
-
-+#ifdef CONFIG_PAX_PAGEEXEC
-+ /* PaX: detect ITLB misses on non-exec pages */
-+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
-+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
-+ {
-+ if (address != regs->tpc)
-+ goto good_area;
-+
-+ up_read(&mm->mmap_sem);
-+ switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+ case 2:
-+ case 3:
-+ return;
-+#endif
-+
-+ }
-+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
-+ do_group_exit(SIGKILL);
-+ }
-+#endif
-+
- /* Pure DTLB misses do not tell us whether the fault causing
- * load/store/atomic was a write or not, it only says that there
- * was no match. So in such a case we (carefully) read the
-diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 4242eab..9ae6360 100644
---- a/arch/sparc/mm/hugetlbpage.c
-+++ b/arch/sparc/mm/hugetlbpage.c
-@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
-- unsigned long flags)
-+ unsigned long flags,
-+ unsigned long offset)
- {
-+ struct mm_struct *mm = current->mm;
- unsigned long task_size = TASK_SIZE;
- struct vm_unmapped_area_info info;
-
-@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
-
- info.flags = 0;
- info.length = len;
-- info.low_limit = TASK_UNMAPPED_BASE;
-+ info.low_limit = mm->mmap_base;
- info.high_limit = min(task_size, VA_EXCLUDE_START);
- info.align_mask = PAGE_MASK & ~HPAGE_MASK;
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
- VM_BUG_ON(addr != -ENOMEM);
- info.low_limit = VA_EXCLUDE_END;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ info.low_limit += mm->delta_mmap;
-+#endif
-+
- info.high_limit = task_size;
- addr = vm_unmapped_area(&info);
- }
-@@ -55,7 +64,8 @@ static unsigned long
- hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
-- const unsigned long flags)
-+ const unsigned long flags,
-+ const unsigned long offset)
- {
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
-@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- info.high_limit = mm->mmap_base;
- info.align_mask = PAGE_MASK & ~HPAGE_MASK;
- info.align_offset = 0;
-+ info.threadstack_offset = offset;
- addr = vm_unmapped_area(&info);
-
- /*
-@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ info.low_limit += mm->delta_mmap;
-+#endif
-+
- info.high_limit = STACK_TOP32;
- addr = vm_unmapped_area(&info);
- }
-@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long task_size = TASK_SIZE;
-+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
-
- if (test_thread_flag(TIF_32BIT))
- task_size = STACK_TOP32;
-@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- return addr;
- }
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- if (addr) {
- addr = ALIGN(addr, HPAGE_SIZE);
- vma = find_vma(mm, addr);
-- if (task_size - len >= addr &&
-- (!vma || addr + len <= vma->vm_start))
-+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
- return addr;
- }
- if (mm->get_unmapped_area == arch_get_unmapped_area)
- return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-- pgoff, flags);
-+ pgoff, flags, offset);
- else
- return hugetlb_get_unmapped_area_topdown(file, addr, len,
-- pgoff, flags);
-+ pgoff, flags, offset);
- }
-
- pte_t *huge_pte_alloc(struct mm_struct *mm,
-diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 3ea267c..93f0659 100644
---- a/arch/sparc/mm/init_64.c
-+++ b/arch/sparc/mm/init_64.c
-@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
- int num_kernel_image_mappings;
-
- #ifdef CONFIG_DEBUG_DCFLUSH
--atomic_t dcpage_flushes = ATOMIC_INIT(0);
-+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
- #ifdef CONFIG_SMP
--atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
-+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
- #endif
- #endif
-
-@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
- {
- BUG_ON(tlb_type == hypervisor);
- #ifdef CONFIG_DEBUG_DCFLUSH
-- atomic_inc(&dcpage_flushes);
-+ atomic_inc_unchecked(&dcpage_flushes);
- #endif
-
- #ifdef DCACHE_ALIASING_POSSIBLE
-@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
-
- #ifdef CONFIG_DEBUG_DCFLUSH
- seq_printf(m, "DCPageFlushes\t: %d\n",
-- atomic_read(&dcpage_flushes));
-+ atomic_read_unchecked(&dcpage_flushes));
- #ifdef CONFIG_SMP
- seq_printf(m, "DCPageFlushesXC\t: %d\n",
-- atomic_read(&dcpage_flushes_xcall));
-+ atomic_read_unchecked(&dcpage_flushes_xcall));
- #endif /* CONFIG_SMP */
- #endif /* CONFIG_DEBUG_DCFLUSH */
- }
-diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index 7cca418..53fc030 100644
---- a/arch/tile/Kconfig
-+++ b/arch/tile/Kconfig
-@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
-
- config KEXEC
- bool "kexec system call"
-+ depends on !GRKERNSEC_KMEM
- ---help---
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
-index 7b11c5f..755a026 100644
---- a/arch/tile/include/asm/atomic_64.h
-+++ b/arch/tile/include/asm/atomic_64.h
-@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
-+
- /* Define this to indicate that cmpxchg is an efficient operation. */
- #define __HAVE_ARCH_CMPXCHG
-
-diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
-index 6160761..00cac88 100644
---- a/arch/tile/include/asm/cache.h
-+++ b/arch/tile/include/asm/cache.h
-@@ -15,11 +15,12 @@
- #ifndef _ASM_TILE_CACHE_H
- #define _ASM_TILE_CACHE_H
-
-+#include <linux/const.h>
- #include <arch/chip.h>
-
- /* bytes per L1 data cache line */
- #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /* bytes per L2 cache line */
- #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
-diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
-index b6cde32..c0cb736 100644
---- a/arch/tile/include/asm/uaccess.h
-+++ b/arch/tile/include/asm/uaccess.h
-@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
-
-- if (likely(sz == -1 || sz >= n))
-+ if (likely(sz == (size_t)-1 || sz >= n))
- n = _copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
-diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
-index 8416240..a012fb7 100644
---- a/arch/tile/mm/hugetlbpage.c
-+++ b/arch/tile/mm/hugetlbpage.c
-@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
-+ info.threadstack_offset = 0;
- return vm_unmapped_area(&info);
- }
-
-@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- info.high_limit = current->mm->mmap_base;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
-+ info.threadstack_offset = 0;
- addr = vm_unmapped_area(&info);
-
- /*
-diff --git a/arch/um/Makefile b/arch/um/Makefile
-index e4b1a96..16162f8 100644
---- a/arch/um/Makefile
-+++ b/arch/um/Makefile
-@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
- $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
- $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
-
-+ifdef CONSTIFY_PLUGIN
-+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
-+
- #This will adjust *FLAGS accordingly to the platform.
- include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
-
-diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
-index 19e1bdd..3665b77 100644
---- a/arch/um/include/asm/cache.h
-+++ b/arch/um/include/asm/cache.h
-@@ -1,6 +1,7 @@
- #ifndef __UM_CACHE_H
- #define __UM_CACHE_H
-
-+#include <linux/const.h>
-
- #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
- # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
-@@ -12,6 +13,6 @@
- # define L1_CACHE_SHIFT 5
- #endif
-
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #endif
-diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
-index 2e0a6b1..a64d0f5 100644
---- a/arch/um/include/asm/kmap_types.h
-+++ b/arch/um/include/asm/kmap_types.h
-@@ -8,6 +8,6 @@
-
- /* No more #include "asm/arch/kmap_types.h" ! */
-
--#define KM_TYPE_NR 14
-+#define KM_TYPE_NR 15
-
- #endif
-diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
-index 71c5d13..4c7b9f1 100644
---- a/arch/um/include/asm/page.h
-+++ b/arch/um/include/asm/page.h
-@@ -14,6 +14,9 @@
- #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
- #define PAGE_MASK (~(PAGE_SIZE-1))
-
-+#define ktla_ktva(addr) (addr)
-+#define ktva_ktla(addr) (addr)
-+
- #ifndef __ASSEMBLY__
-
- struct page;
-diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
-index 0032f92..cd151e0 100644
---- a/arch/um/include/asm/pgtable-3level.h
-+++ b/arch/um/include/asm/pgtable-3level.h
-@@ -58,6 +58,7 @@
- #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
- #define pud_populate(mm, pud, pmd) \
- set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
-+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
-
- #ifdef CONFIG_64BIT
- #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
-index f17bca8..48adb87 100644
---- a/arch/um/kernel/process.c
-+++ b/arch/um/kernel/process.c
-@@ -356,22 +356,6 @@ int singlestepping(void * t)
- return 2;
- }
-
--/*
-- * Only x86 and x86_64 have an arch_align_stack().
-- * All other arches have "#define arch_align_stack(x) (x)"
-- * in their asm/exec.h
-- * As this is included in UML from asm-um/system-generic.h,
-- * we can use it to behave as the subarch does.
-- */
--#ifndef arch_align_stack
--unsigned long arch_align_stack(unsigned long sp)
--{
-- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-- sp -= get_random_int() % 8192;
-- return sp & ~0xf;
--}
--#endif
--
- unsigned long get_wchan(struct task_struct *p)
- {
- unsigned long stack_page, sp, ip;
-diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
-index ad8f795..2c7eec6 100644
---- a/arch/unicore32/include/asm/cache.h
-+++ b/arch/unicore32/include/asm/cache.h
-@@ -12,8 +12,10 @@
- #ifndef __UNICORE_CACHE_H__
- #define __UNICORE_CACHE_H__
-
--#define L1_CACHE_SHIFT (5)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#include <linux/const.h>
-+
-+#define L1_CACHE_SHIFT 5
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- /*
- * Memory returned by kmalloc() may be used for DMA, so we must make
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 0dc9d01..98df103 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -130,7 +130,7 @@ config X86
- select RTC_LIB
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
-- select HAVE_CC_STACKPROTECTOR
-+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
- select GENERIC_CPU_AUTOPROBE
- select HAVE_ARCH_AUDITSYSCALL
- select ARCH_SUPPORTS_ATOMIC_RMW
-@@ -263,7 +263,7 @@ config X86_HT
-
- config X86_32_LAZY_GS
- def_bool y
-- depends on X86_32 && !CC_STACKPROTECTOR
-+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
-
- config ARCH_HWEIGHT_CFLAGS
- string
-@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
-
- menuconfig HYPERVISOR_GUEST
- bool "Linux guest support"
-+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
- ---help---
- Say Y here to enable options for running Linux under various hyper-
- visors. This option enables basic hypervisor detection and platform
-@@ -978,6 +979,7 @@ config VM86
-
- config X86_16BIT
- bool "Enable support for 16-bit segments" if EXPERT
-+ depends on !GRKERNSEC
- default y
- ---help---
- This option is required by programs like Wine to run 16-bit
-@@ -1151,6 +1153,7 @@ choice
-
- config NOHIGHMEM
- bool "off"
-+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Linux can use up to 64 Gigabytes of physical memory on x86 systems.
- However, the address space of 32-bit x86 processors is only 4
-@@ -1187,6 +1190,7 @@ config NOHIGHMEM
-
- config HIGHMEM4G
- bool "4GB"
-+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
- ---help---
- Select this if you have a 32-bit processor and between 1 and 4
- gigabytes of physical RAM.
-@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
- hex
- default 0xB0000000 if VMSPLIT_3G_OPT
- default 0x80000000 if VMSPLIT_2G
-- default 0x78000000 if VMSPLIT_2G_OPT
-+ default 0x70000000 if VMSPLIT_2G_OPT
- default 0x40000000 if VMSPLIT_1G
- default 0xC0000000
- depends on X86_32
-@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
-
- config KEXEC
- bool "kexec system call"
-+ depends on !GRKERNSEC_KMEM
- ---help---
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
-
- config PHYSICAL_ALIGN
- hex "Alignment value to which kernel should be aligned"
-- default "0x200000"
-+ default "0x1000000"
-+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
-+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
- range 0x2000 0x1000000 if X86_32
- range 0x200000 0x1000000 if X86_64
- ---help---
-@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
- def_bool n
- prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
- depends on X86_32 || IA32_EMULATION
-+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
- ---help---
- Certain buggy versions of glibc will crash if they are
- presented with a 32-bit vDSO that is not mapped at the address
-diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 6983314..54ad7e8 100644
---- a/arch/x86/Kconfig.cpu
-+++ b/arch/x86/Kconfig.cpu
-@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
-
- config X86_F00F_BUG
- def_bool y
-- depends on M586MMX || M586TSC || M586 || M486
-+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
-
- config X86_INVD_BUG
- def_bool y
-@@ -327,7 +327,7 @@ config X86_INVD_BUG
-
- config X86_ALIGNMENT_16
- def_bool y
-- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-
- config X86_INTEL_USERCOPY
- def_bool y
-@@ -369,7 +369,7 @@ config X86_CMPXCHG64
- # generates cmov.
- config X86_CMOV
- def_bool y
-- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-
- config X86_MINIMUM_CPU_FAMILY
- int
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 61bd2ad..50b625d 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
- config DEBUG_RODATA
- bool "Write protect kernel read-only data structures"
- default y
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && BROKEN
- ---help---
- Mark the kernel read-only data as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
-@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
-
- config DEBUG_SET_MODULE_RONX
- bool "Set loadable kernel module data as NX and text as RO"
-- depends on MODULES
-+ depends on MODULES && BROKEN
- ---help---
- This option helps catch unintended modifications to loadable
- kernel module's text and read-only data. It also prevents execution
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 920e616..ac3d4df 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
- # CPU-specific tuning. Anything which can be shared with UML should go here.
- include $(srctree)/arch/x86/Makefile_32.cpu
- KBUILD_CFLAGS += $(cflags-y)
--
-- # temporary until string.h is fixed
-- KBUILD_CFLAGS += -ffreestanding
- else
- BITS := 64
- UTS_MACHINE := x86_64
-@@ -107,6 +104,9 @@ else
- KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
- endif
-
-+# temporary until string.h is fixed
-+KBUILD_CFLAGS += -ffreestanding
-+
- # Make sure compiler does not have buggy stack-protector support.
- ifdef CONFIG_CC_STACKPROTECTOR
- cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-@@ -180,6 +180,7 @@ archheaders:
- $(Q)$(MAKE) $(build)=arch/x86/syscalls all
-
- archprepare:
-+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
- ifeq ($(CONFIG_KEXEC_FILE),y)
- $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
- endif
-@@ -263,3 +264,9 @@ define archhelp
- echo ' FDARGS="..." arguments for the booted kernel'
- echo ' FDINITRD=file initrd for the booted kernel'
- endef
-+
-+define OLD_LD
-+
-+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
-+*** Please upgrade your binutils to 2.18 or newer
-+endef
-diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
-index 3db07f3..9d81d0f 100644
---- a/arch/x86/boot/Makefile
-+++ b/arch/x86/boot/Makefile
-@@ -56,6 +56,9 @@ clean-files += cpustr.h
- # ---------------------------------------------------------------------------
-
- KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
-+ifdef CONSTIFY_PLUGIN
-+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
- KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
- GCOV_PROFILE := n
-
-diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
-index 878e4b9..20537ab 100644
---- a/arch/x86/boot/bitops.h
-+++ b/arch/x86/boot/bitops.h
-@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
- u8 v;
- const u32 *p = (const u32 *)addr;
-
-- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
-+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
- return v;
- }
-
-@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
-
- static inline void set_bit(int nr, void *addr)
- {
-- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
-+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
- }
-
- #endif /* BOOT_BITOPS_H */
-diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
-index bd49ec6..94c7f58 100644
---- a/arch/x86/boot/boot.h
-+++ b/arch/x86/boot/boot.h
-@@ -84,7 +84,7 @@ static inline void io_delay(void)
- static inline u16 ds(void)
- {
- u16 seg;
-- asm("movw %%ds,%0" : "=rm" (seg));
-+ asm volatile("movw %%ds,%0" : "=rm" (seg));
- return seg;
- }
-
-diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 8bd44e8..6b111e9 100644
---- a/arch/x86/boot/compressed/Makefile
-+++ b/arch/x86/boot/compressed/Makefile
-@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
- KBUILD_CFLAGS += -mno-mmx -mno-sse
- KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
- KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
-+ifdef CONSTIFY_PLUGIN
-+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
-+endif
-
- KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
- GCOV_PROFILE := n
-diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
-index a53440e..c3dbf1e 100644
---- a/arch/x86/boot/compressed/efi_stub_32.S
-+++ b/arch/x86/boot/compressed/efi_stub_32.S
-@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
- * parameter 2, ..., param n. To make things easy, we save the return
- * address of efi_call_phys in a global variable.
- */
-- popl %ecx
-- movl %ecx, saved_return_addr(%edx)
-- /* get the function pointer into ECX*/
-- popl %ecx
-- movl %ecx, efi_rt_function_ptr(%edx)
-+ popl saved_return_addr(%edx)
-+ popl efi_rt_function_ptr(%edx)
-
- /*
- * 3. Call the physical function.
- */
-- call *%ecx
-+ call *efi_rt_function_ptr(%edx)
-
- /*
- * 4. Balance the stack. And because EAX contain the return value,
-@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
- 1: popl %edx
- subl $1b, %edx
-
-- movl efi_rt_function_ptr(%edx), %ecx
-- pushl %ecx
-+ pushl efi_rt_function_ptr(%edx)
-
- /*
- * 10. Push the saved return address onto the stack and return.
- */
-- movl saved_return_addr(%edx), %ecx
-- pushl %ecx
-- ret
-+ jmpl *saved_return_addr(%edx)
- ENDPROC(efi_call_phys)
- .previous
-
-diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
-index 630384a..278e788 100644
---- a/arch/x86/boot/compressed/efi_thunk_64.S
-+++ b/arch/x86/boot/compressed/efi_thunk_64.S
-@@ -189,8 +189,8 @@ efi_gdt64:
- .long 0 /* Filled out by user */
- .word 0
- .quad 0x0000000000000000 /* NULL descriptor */
-- .quad 0x00af9a000000ffff /* __KERNEL_CS */
-- .quad 0x00cf92000000ffff /* __KERNEL_DS */
-+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
-+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
- .quad 0x0080890000000000 /* TS descriptor */
- .quad 0x0000000000000000 /* TS continued */
- efi_gdt64_end:
-diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
-index 1d7fbbc..36ecd58 100644
---- a/arch/x86/boot/compressed/head_32.S
-+++ b/arch/x86/boot/compressed/head_32.S
-@@ -140,10 +140,10 @@ preferred_addr:
- addl %eax, %ebx
- notl %eax
- andl %eax, %ebx
-- cmpl $LOAD_PHYSICAL_ADDR, %ebx
-+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
- #endif
-- movl $LOAD_PHYSICAL_ADDR, %ebx
-+ movl $____LOAD_PHYSICAL_ADDR, %ebx
- 1:
-
- /* Target address to relocate to for decompression */
-diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
-index 6b1766c..ad465c9 100644
---- a/arch/x86/boot/compressed/head_64.S
-+++ b/arch/x86/boot/compressed/head_64.S
-@@ -94,10 +94,10 @@ ENTRY(startup_32)
- addl %eax, %ebx
- notl %eax
- andl %eax, %ebx
-- cmpl $LOAD_PHYSICAL_ADDR, %ebx
-+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
- #endif
-- movl $LOAD_PHYSICAL_ADDR, %ebx
-+ movl $____LOAD_PHYSICAL_ADDR, %ebx
- 1:
-
- /* Target address to relocate to for decompression */
-@@ -322,10 +322,10 @@ preferred_addr:
- addq %rax, %rbp
- notq %rax
- andq %rax, %rbp
-- cmpq $LOAD_PHYSICAL_ADDR, %rbp
-+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
- jge 1f
- #endif
-- movq $LOAD_PHYSICAL_ADDR, %rbp
-+ movq $____LOAD_PHYSICAL_ADDR, %rbp
- 1:
-
- /* Target address to relocate to for decompression */
-@@ -434,8 +434,8 @@ gdt:
- .long gdt
- .word 0
- .quad 0x0000000000000000 /* NULL descriptor */
-- .quad 0x00af9a000000ffff /* __KERNEL_CS */
-- .quad 0x00cf92000000ffff /* __KERNEL_DS */
-+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
-+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
- .quad 0x0080890000000000 /* TS descriptor */
- .quad 0x0000000000000000 /* TS continued */
- gdt_end:
-diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index a950864..c710239 100644
---- a/arch/x86/boot/compressed/misc.c
-+++ b/arch/x86/boot/compressed/misc.c
-@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
- * Calculate the delta between where vmlinux was linked to load
- * and where it was actually loaded.
- */
-- delta = min_addr - LOAD_PHYSICAL_ADDR;
-+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
- if (!delta) {
- debug_putstr("No relocation needed... ");
- return;
-@@ -324,7 +324,7 @@ static void parse_elf(void *output)
- Elf32_Ehdr ehdr;
- Elf32_Phdr *phdrs, *phdr;
- #endif
-- void *dest;
-+ void *dest, *prev;
- int i;
-
- memcpy(&ehdr, output, sizeof(ehdr));
-@@ -351,13 +351,16 @@ static void parse_elf(void *output)
- case PT_LOAD:
- #ifdef CONFIG_RELOCATABLE
- dest = output;
-- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
-+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
- #else
- dest = (void *)(phdr->p_paddr);
- #endif
- memcpy(dest,
- output + phdr->p_offset,
- phdr->p_filesz);
-+ if (i)
-+ memset(prev, 0xff, dest - prev);
-+ prev = dest + phdr->p_filesz;
- break;
- default: /* Ignore other PT_* */ break;
- }
-@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
- error("Destination address too large");
- #endif
- #ifndef CONFIG_RELOCATABLE
-- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
-+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
- error("Wrong destination address");
- #endif
-
-diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
-index 1fd7d57..0f7d096 100644
---- a/arch/x86/boot/cpucheck.c
-+++ b/arch/x86/boot/cpucheck.c
-@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 ecx = MSR_K7_HWCR;
- u32 eax, edx;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- eax &= ~(1 << 15);
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- get_cpuflags(); /* Make sure it really did something */
- err = check_cpuflags();
-@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 ecx = MSR_VIA_FCR;
- u32 eax, edx;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- eax |= (1<<1)|(1<<7);
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- set_bit(X86_FEATURE_CX8, cpu.flags);
- err = check_cpuflags();
-@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
- u32 eax, edx;
- u32 level = 1;
-
-- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-- asm("cpuid"
-+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-+ asm volatile("cpuid"
- : "+a" (level), "=d" (cpu.flags[0])
- : : "ecx", "ebx");
-- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-
- err = check_cpuflags();
- } else if (err == 0x01 &&
-diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index 16ef025..91e033b 100644
---- a/arch/x86/boot/header.S
-+++ b/arch/x86/boot/header.S
-@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
- # single linked list of
- # struct setup_data
-
--pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
-+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
-
- #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
-+#else
- #define VO_INIT_SIZE (VO__end - VO__text)
-+#endif
- #if ZO_INIT_SIZE > VO_INIT_SIZE
- #define INIT_SIZE ZO_INIT_SIZE
- #else
-diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
-index db75d07..8e6d0af 100644
---- a/arch/x86/boot/memory.c
-+++ b/arch/x86/boot/memory.c
-@@ -19,7 +19,7 @@
-
- static int detect_memory_e820(void)
- {
-- int count = 0;
-+ unsigned int count = 0;
- struct biosregs ireg, oreg;
- struct e820entry *desc = boot_params.e820_map;
- static struct e820entry buf; /* static so it is zeroed */
-diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
-index ba3e100..6501b8f 100644
---- a/arch/x86/boot/video-vesa.c
-+++ b/arch/x86/boot/video-vesa.c
-@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
-
- boot_params.screen_info.vesapm_seg = oreg.es;
- boot_params.screen_info.vesapm_off = oreg.di;
-+ boot_params.screen_info.vesapm_size = oreg.cx;
- }
-
- /*
-diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
-index 43eda28..5ab5fdb 100644
---- a/arch/x86/boot/video.c
-+++ b/arch/x86/boot/video.c
-@@ -96,7 +96,7 @@ static void store_mode_params(void)
- static unsigned int get_entry(void)
- {
- char entry_buf[4];
-- int i, len = 0;
-+ unsigned int i, len = 0;
- int key;
- unsigned int v;
-
-diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 9105655..41779c1 100644
---- a/arch/x86/crypto/aes-x86_64-asm_64.S
-+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
-@@ -8,6 +8,8 @@
- * including this sentence is retained in full.
- */
-
-+#include <asm/alternative-asm.h>
-+
- .extern crypto_ft_tab
- .extern crypto_it_tab
- .extern crypto_fl_tab
-@@ -70,6 +72,8 @@
- je B192; \
- leaq 32(r9),r9;
-
-+#define ret pax_force_retaddr; ret
-+
- #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
- movq r1,r2; \
- movq r3,r4; \
-diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 477e9d7..c92c7d8 100644
---- a/arch/x86/crypto/aesni-intel_asm.S
-+++ b/arch/x86/crypto/aesni-intel_asm.S
-@@ -31,6 +31,7 @@
-
- #include <linux/linkage.h>
- #include <asm/inst.h>
-+#include <asm/alternative-asm.h>
-
- #ifdef __x86_64__
- .data
-@@ -205,7 +206,7 @@ enc: .octa 0x2
- * num_initial_blocks = b mod 4
- * encrypt the initial num_initial_blocks blocks and apply ghash on
- * the ciphertext
--* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
- * are clobbered
- * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
- */
-@@ -214,8 +215,8 @@ enc: .octa 0x2
- .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
- XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- mov arg7, %r10 # %r10 = AAD
-- mov arg8, %r12 # %r12 = aadLen
-- mov %r12, %r11
-+ mov arg8, %r15 # %r15 = aadLen
-+ mov %r15, %r11
- pxor %xmm\i, %xmm\i
- _get_AAD_loop\num_initial_blocks\operation:
- movd (%r10), \TMP1
-@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
- add $4, %r10
-- sub $4, %r12
-+ sub $4, %r15
- jne _get_AAD_loop\num_initial_blocks\operation
- cmp $16, %r11
- je _get_AAD_loop2_done\num_initial_blocks\operation
-- mov $16, %r12
-+ mov $16, %r15
- _get_AAD_loop2\num_initial_blocks\operation:
- psrldq $4, %xmm\i
-- sub $4, %r12
-- cmp %r11, %r12
-+ sub $4, %r15
-+ cmp %r11, %r15
- jne _get_AAD_loop2\num_initial_blocks\operation
- _get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
-@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
- * num_initial_blocks = b mod 4
- * encrypt the initial num_initial_blocks blocks and apply ghash on
- * the ciphertext
--* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
- * are clobbered
- * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
- */
-@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
- .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
- XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- mov arg7, %r10 # %r10 = AAD
-- mov arg8, %r12 # %r12 = aadLen
-- mov %r12, %r11
-+ mov arg8, %r15 # %r15 = aadLen
-+ mov %r15, %r11
- pxor %xmm\i, %xmm\i
- _get_AAD_loop\num_initial_blocks\operation:
- movd (%r10), \TMP1
-@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
- add $4, %r10
-- sub $4, %r12
-+ sub $4, %r15
- jne _get_AAD_loop\num_initial_blocks\operation
- cmp $16, %r11
- je _get_AAD_loop2_done\num_initial_blocks\operation
-- mov $16, %r12
-+ mov $16, %r15
- _get_AAD_loop2\num_initial_blocks\operation:
- psrldq $4, %xmm\i
-- sub $4, %r12
-- cmp %r11, %r12
-+ sub $4, %r15
-+ cmp %r11, %r15
- jne _get_AAD_loop2\num_initial_blocks\operation
- _get_AAD_loop2_done\num_initial_blocks\operation:
- movdqa SHUF_MASK(%rip), %xmm14
-@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
- *
- *****************************************************************************/
- ENTRY(aesni_gcm_dec)
-- push %r12
-+ push %r15
- push %r13
- push %r14
- mov %rsp, %r14
-@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
- */
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-- mov %arg6, %r12
-- movdqu (%r12), %xmm13 # %xmm13 = HashKey
-+ mov %arg6, %r15
-+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
- movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
- mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
-- mov %r13, %r12
-- and $(3<<4), %r12
-+ mov %r13, %r15
-+ and $(3<<4), %r15
- jz _initial_num_blocks_is_0_decrypt
-- cmp $(2<<4), %r12
-+ cmp $(2<<4), %r15
- jb _initial_num_blocks_is_1_decrypt
- je _initial_num_blocks_is_2_decrypt
- _initial_num_blocks_is_3_decrypt:
-@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
-- lea SHIFT_MASK+16(%rip), %r12
-- sub %r13, %r12
-+ lea SHIFT_MASK+16(%rip), %r15
-+ sub %r13, %r15
- # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
-- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
-+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
-
- movdqa %xmm1, %xmm2
- pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
-- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
-+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
- # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm2
-@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
- sub $1, %r13
- jne _less_than_8_bytes_left_decrypt
- _multiple_of_16_bytes_decrypt:
-- mov arg8, %r12 # %r13 = aadLen (number of bytes)
-- shl $3, %r12 # convert into number of bits
-- movd %r12d, %xmm15 # len(A) in %xmm15
-+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
-+ shl $3, %r15 # convert into number of bits
-+ movd %r15d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
-- pop %r12
-+ pop %r15
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_gcm_dec)
-
-@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
- * poly = x^128 + x^127 + x^126 + x^121 + 1
- ***************************************************************************/
- ENTRY(aesni_gcm_enc)
-- push %r12
-+ push %r15
- push %r13
- push %r14
- mov %rsp, %r14
-@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
- #
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp
-- mov %arg6, %r12
-- movdqu (%r12), %xmm13
-+ mov %arg6, %r15
-+ movdqu (%r15), %xmm13
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
- movdqa %xmm13, HashKey(%rsp)
- mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
- and $-16, %r13
-- mov %r13, %r12
-+ mov %r13, %r15
-
- # Encrypt first few blocks
-
-- and $(3<<4), %r12
-+ and $(3<<4), %r15
- jz _initial_num_blocks_is_0_encrypt
-- cmp $(2<<4), %r12
-+ cmp $(2<<4), %r15
- jb _initial_num_blocks_is_1_encrypt
- je _initial_num_blocks_is_2_encrypt
- _initial_num_blocks_is_3_encrypt:
-@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
-- lea SHIFT_MASK+16(%rip), %r12
-- sub %r13, %r12
-+ lea SHIFT_MASK+16(%rip), %r15
-+ sub %r13, %r15
- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
-- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
-+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
- pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
-- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
-+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
- # get the appropriate mask to mask out top 16-r13 bytes of xmm0
- pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
- movdqa SHUF_MASK(%rip), %xmm10
-@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
- sub $1, %r13
- jne _less_than_8_bytes_left_encrypt
- _multiple_of_16_bytes_encrypt:
-- mov arg8, %r12 # %r12 = addLen (number of bytes)
-- shl $3, %r12
-- movd %r12d, %xmm15 # len(A) in %xmm15
-+ mov arg8, %r15 # %r15 = addLen (number of bytes)
-+ shl $3, %r15
-+ movd %r15d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
-- pop %r12
-+ pop %r15
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_gcm_enc)
-
-@@ -1722,6 +1725,7 @@ _key_expansion_256a:
- pxor %xmm1, %xmm0
- movaps %xmm0, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
- ENDPROC(_key_expansion_128)
- ENDPROC(_key_expansion_256a)
-@@ -1748,6 +1752,7 @@ _key_expansion_192a:
- shufps $0b01001110, %xmm2, %xmm1
- movaps %xmm1, 0x10(TKEYP)
- add $0x20, TKEYP
-+ pax_force_retaddr
- ret
- ENDPROC(_key_expansion_192a)
-
-@@ -1768,6 +1773,7 @@ _key_expansion_192b:
-
- movaps %xmm0, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
- ENDPROC(_key_expansion_192b)
-
-@@ -1781,6 +1787,7 @@ _key_expansion_256b:
- pxor %xmm1, %xmm2
- movaps %xmm2, (TKEYP)
- add $0x10, TKEYP
-+ pax_force_retaddr
- ret
- ENDPROC(_key_expansion_256b)
-
-@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
- #ifndef __x86_64__
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_set_key)
-
-@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
- popl KLEN
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_enc)
-
-@@ -1974,6 +1983,7 @@ _aesni_enc1:
- AESENC KEY STATE
- movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_enc1)
-
-@@ -2083,6 +2093,7 @@ _aesni_enc4:
- AESENCLAST KEY STATE2
- AESENCLAST KEY STATE3
- AESENCLAST KEY STATE4
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_enc4)
-
-@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
- popl KLEN
- popl KEYP
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_dec)
-
-@@ -2164,6 +2176,7 @@ _aesni_dec1:
- AESDEC KEY STATE
- movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_dec1)
-
-@@ -2273,6 +2286,7 @@ _aesni_dec4:
- AESDECLAST KEY STATE2
- AESDECLAST KEY STATE3
- AESDECLAST KEY STATE4
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_dec4)
-
-@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
- popl KEYP
- popl LEN
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_ecb_enc)
-
-@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
- popl KEYP
- popl LEN
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_ecb_dec)
-
-@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
- popl LEN
- popl IVP
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_cbc_enc)
-
-@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
- popl LEN
- popl IVP
- #endif
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_cbc_dec)
-
-@@ -2550,6 +2568,7 @@ _aesni_inc_init:
- mov $1, TCTR_LOW
- MOVQ_R64_XMM TCTR_LOW INC
- MOVQ_R64_XMM CTR TCTR_LOW
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_inc_init)
-
-@@ -2579,6 +2598,7 @@ _aesni_inc:
- .Linc_low:
- movaps CTR, IV
- PSHUFB_XMM BSWAP_MASK IV
-+ pax_force_retaddr
- ret
- ENDPROC(_aesni_inc)
-
-@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
- .Lctr_enc_ret:
- movups IV, (IVP)
- .Lctr_enc_just_ret:
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_ctr_enc)
-
-@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
- pxor INC, STATE4
- movdqu STATE4, 0x70(OUTP)
-
-+ pax_force_retaddr
- ret
- ENDPROC(aesni_xts_crypt8)
-
-diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-index 246c670..466e2d6 100644
---- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
-+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-@@ -21,6 +21,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .file "blowfish-x86_64-asm.S"
- .text
-@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
- jnz .L__enc_xor;
-
- write_block();
-+ pax_force_retaddr
- ret;
- .L__enc_xor:
- xor_block();
-+ pax_force_retaddr
- ret;
- ENDPROC(__blowfish_enc_blk)
-
-@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
-
- movq %r11, %rbp;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(blowfish_dec_blk)
-
-@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
-
- popq %rbx;
- popq %rbp;
-+ pax_force_retaddr
- ret;
-
- .L__enc_xor4:
-@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
-
- popq %rbx;
- popq %rbp;
-+ pax_force_retaddr
- ret;
- ENDPROC(__blowfish_enc_blk_4way)
-
-@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
- popq %rbx;
- popq %rbp;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(blowfish_dec_blk_4way)
-diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-index ce71f92..1dce7ec 100644
---- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-@@ -16,6 +16,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- #define CAMELLIA_TABLE_BYTE_LEN 272
-
-@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
- roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
- %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
- %rcx, (%r9));
-+ pax_force_retaddr
- ret;
- ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
-
-@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
- roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
- %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
- %rax, (%r9));
-+ pax_force_retaddr
- ret;
- ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
-
-@@ -780,6 +783,7 @@ __camellia_enc_blk16:
- %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
- %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
-
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -865,6 +869,7 @@ __camellia_dec_blk16:
- %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
- %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
-
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ecb_enc_16way)
-
-@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ecb_dec_16way)
-
-@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_cbc_dec_16way)
-
-@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ctr_16way)
-
-@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
- %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
- %xmm8, %rsi);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_xts_crypt_16way)
-
-diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-index 0e0b886..5a3123c 100644
---- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-@@ -11,6 +11,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- #define CAMELLIA_TABLE_BYTE_LEN 272
-
-@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
- roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
- %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
- %rcx, (%r9));
-+ pax_force_retaddr
- ret;
- ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
-
-@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
- roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
- %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
- %rax, (%r9));
-+ pax_force_retaddr
- ret;
- ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
-
-@@ -820,6 +823,7 @@ __camellia_enc_blk32:
- %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
- %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
-
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -905,6 +909,7 @@ __camellia_dec_blk32:
- %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
- %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
-
-+ pax_force_retaddr
- ret;
-
- .align 8
-@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ecb_enc_32way)
-
-@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ecb_dec_32way)
-
-@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_cbc_dec_32way)
-
-@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_ctr_32way)
-
-@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_xts_crypt_32way)
-
-diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
-index 310319c..db3d7b5 100644
---- a/arch/x86/crypto/camellia-x86_64-asm_64.S
-+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
-@@ -21,6 +21,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .file "camellia-x86_64-asm_64.S"
- .text
-@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
- enc_outunpack(mov, RT1);
-
- movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
-
- .L__enc_xor:
- enc_outunpack(xor, RT1);
-
- movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
- ENDPROC(__camellia_enc_blk)
-
-@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
- dec_outunpack();
-
- movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_dec_blk)
-
-@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
-
- movq RRBP, %rbp;
- popq %rbx;
-+ pax_force_retaddr
- ret;
-
- .L__enc2_xor:
-@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
-
- movq RRBP, %rbp;
- popq %rbx;
-+ pax_force_retaddr
- ret;
- ENDPROC(__camellia_enc_blk_2way)
-
-@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
-
- movq RRBP, %rbp;
- movq RXOR, %rbx;
-+ pax_force_retaddr
- ret;
- ENDPROC(camellia_dec_blk_2way)
-diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
-index c35fd5d..2d8c7db 100644
---- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
-+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
-@@ -24,6 +24,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .file "cast5-avx-x86_64-asm_64.S"
-
-@@ -281,6 +282,7 @@ __cast5_enc_blk16:
- outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
- outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__cast5_enc_blk16)
-
-@@ -352,6 +354,7 @@ __cast5_dec_blk16:
- outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
- outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-
-+ pax_force_retaddr
- ret;
-
- .L__skip_dec:
-@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
- vmovdqu RR4, (6*4*4)(%r11);
- vmovdqu RL4, (7*4*4)(%r11);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast5_ecb_enc_16way)
-
-@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
- vmovdqu RR4, (6*4*4)(%r11);
- vmovdqu RL4, (7*4*4)(%r11);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast5_ecb_dec_16way)
-
-@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
- * %rdx: src
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- vmovdqu (0*16)(%rdx), RL1;
- vmovdqu (1*16)(%rdx), RR1;
-@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
- call __cast5_dec_blk16;
-
- /* xor with src */
-- vmovq (%r12), RX;
-+ vmovq (%r14), RX;
- vpshufd $0x4f, RX, RX;
- vpxor RX, RR1, RR1;
-- vpxor 0*16+8(%r12), RL1, RL1;
-- vpxor 1*16+8(%r12), RR2, RR2;
-- vpxor 2*16+8(%r12), RL2, RL2;
-- vpxor 3*16+8(%r12), RR3, RR3;
-- vpxor 4*16+8(%r12), RL3, RL3;
-- vpxor 5*16+8(%r12), RR4, RR4;
-- vpxor 6*16+8(%r12), RL4, RL4;
-+ vpxor 0*16+8(%r14), RL1, RL1;
-+ vpxor 1*16+8(%r14), RR2, RR2;
-+ vpxor 2*16+8(%r14), RL2, RL2;
-+ vpxor 3*16+8(%r14), RR3, RR3;
-+ vpxor 4*16+8(%r14), RL3, RL3;
-+ vpxor 5*16+8(%r14), RR4, RR4;
-+ vpxor 6*16+8(%r14), RL4, RL4;
-
- vmovdqu RR1, (0*16)(%r11);
- vmovdqu RL1, (1*16)(%r11);
-@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
- vmovdqu RR4, (6*16)(%r11);
- vmovdqu RL4, (7*16)(%r11);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast5_cbc_dec_16way)
-
-@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
- * %rcx: iv (big endian, 64bit)
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- vpcmpeqd RTMP, RTMP, RTMP;
- vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
-@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
- call __cast5_enc_blk16;
-
- /* dst = src ^ iv */
-- vpxor (0*16)(%r12), RR1, RR1;
-- vpxor (1*16)(%r12), RL1, RL1;
-- vpxor (2*16)(%r12), RR2, RR2;
-- vpxor (3*16)(%r12), RL2, RL2;
-- vpxor (4*16)(%r12), RR3, RR3;
-- vpxor (5*16)(%r12), RL3, RL3;
-- vpxor (6*16)(%r12), RR4, RR4;
-- vpxor (7*16)(%r12), RL4, RL4;
-+ vpxor (0*16)(%r14), RR1, RR1;
-+ vpxor (1*16)(%r14), RL1, RL1;
-+ vpxor (2*16)(%r14), RR2, RR2;
-+ vpxor (3*16)(%r14), RL2, RL2;
-+ vpxor (4*16)(%r14), RR3, RR3;
-+ vpxor (5*16)(%r14), RL3, RL3;
-+ vpxor (6*16)(%r14), RR4, RR4;
-+ vpxor (7*16)(%r14), RL4, RL4;
- vmovdqu RR1, (0*16)(%r11);
- vmovdqu RL1, (1*16)(%r11);
- vmovdqu RR2, (2*16)(%r11);
-@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
- vmovdqu RR4, (6*16)(%r11);
- vmovdqu RL4, (7*16)(%r11);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast5_ctr_16way)
-diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
-index e3531f8..e123f35 100644
---- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
-+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
-@@ -24,6 +24,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
- #include "glue_helper-asm-avx.S"
-
- .file "cast6-avx-x86_64-asm_64.S"
-@@ -295,6 +296,7 @@ __cast6_enc_blk8:
- outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__cast6_enc_blk8)
-
-@@ -340,6 +342,7 @@ __cast6_dec_blk8:
- outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
- outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__cast6_dec_blk8)
-
-@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
-
- store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_ecb_enc_8way)
-
-@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
-
- store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_ecb_dec_8way)
-
-@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
- * %rdx: src
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
- call __cast6_dec_blk8;
-
-- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_cbc_dec_8way)
-
-@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
- * %rcx: iv (little endian, 128bit)
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RX, RKR, RKM);
-
- call __cast6_enc_blk8;
-
-- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_ctr_8way)
-
-@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_xts_enc_8way)
-
-@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(cast6_xts_dec_8way)
-diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-index 26d49eb..c0a8c84 100644
---- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-@@ -45,6 +45,7 @@
-
- #include <asm/inst.h>
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
-
-@@ -309,6 +310,7 @@ do_return:
- popq %rsi
- popq %rdi
- popq %rbx
-+ pax_force_retaddr
- ret
-
- ################################################################
-diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
-index 5d1e007..098cb4f 100644
---- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
-+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
-@@ -18,6 +18,7 @@
-
- #include <linux/linkage.h>
- #include <asm/inst.h>
-+#include <asm/alternative-asm.h>
-
- .data
-
-@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
- psrlq $1, T2
- pxor T2, T1
- pxor T1, DATA
-+ pax_force_retaddr
- ret
- ENDPROC(__clmul_gf128mul_ble)
-
-@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
- call __clmul_gf128mul_ble
- PSHUFB_XMM BSWAP DATA
- movups DATA, (%rdi)
-+ pax_force_retaddr
- ret
- ENDPROC(clmul_ghash_mul)
-
-@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
- PSHUFB_XMM BSWAP DATA
- movups DATA, (%rdi)
- .Lupdate_just_ret:
-+ pax_force_retaddr
- ret
- ENDPROC(clmul_ghash_update)
-diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-index 9279e0b..c4b3d2c 100644
---- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
-+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-@@ -1,4 +1,5 @@
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- # enter salsa20_encrypt_bytes
- ENTRY(salsa20_encrypt_bytes)
-@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
- # bytesatleast65:
- ._bytesatleast65:
-@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
- ENDPROC(salsa20_keysetup)
-
-@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
-+ pax_force_retaddr
- ret
- ENDPROC(salsa20_ivsetup)
-diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
-index 2f202f4..d9164d6 100644
---- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
-+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
-@@ -24,6 +24,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
- #include "glue_helper-asm-avx.S"
-
- .file "serpent-avx-x86_64-asm_64.S"
-@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
- write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__serpent_enc_blk8_avx)
-
-@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
- write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
- write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__serpent_dec_blk8_avx)
-
-@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
-
- store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ecb_enc_8way_avx)
-
-@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
-
- store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ecb_dec_8way_avx)
-
-@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
-
- store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_cbc_dec_8way_avx)
-
-@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
-
- store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ctr_8way_avx)
-
-@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_xts_enc_8way_avx)
-
-@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_xts_dec_8way_avx)
-diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
-index b222085..abd483c 100644
---- a/arch/x86/crypto/serpent-avx2-asm_64.S
-+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
-@@ -15,6 +15,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
- #include "glue_helper-asm-avx2.S"
-
- .file "serpent-avx2-asm_64.S"
-@@ -610,6 +611,7 @@ __serpent_enc_blk16:
- write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__serpent_enc_blk16)
-
-@@ -664,6 +666,7 @@ __serpent_dec_blk16:
- write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
- write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__serpent_dec_blk16)
-
-@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ecb_enc_16way)
-
-@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ecb_dec_16way)
-
-@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_cbc_dec_16way)
-
-@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_ctr_16way)
-
-@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_xts_enc_16way)
-
-@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
-
- vzeroupper;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_xts_dec_16way)
-diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
-index acc066c..1559cc4 100644
---- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
-+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
-@@ -25,6 +25,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .file "serpent-sse2-x86_64-asm_64.S"
- .text
-@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
- write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
-
- .L__enc_xor8:
- xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
- xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__serpent_enc_blk_8way)
-
-@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
- write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
- write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(serpent_dec_blk_8way)
-diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
-index a410950..9dfe7ad 100644
---- a/arch/x86/crypto/sha1_ssse3_asm.S
-+++ b/arch/x86/crypto/sha1_ssse3_asm.S
-@@ -29,6 +29,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- #define CTX %rdi // arg1
- #define BUF %rsi // arg2
-@@ -75,9 +76,9 @@
-
- push %rbx
- push %rbp
-- push %r12
-+ push %r14
-
-- mov %rsp, %r12
-+ mov %rsp, %r14
- sub $64, %rsp # allocate workspace
- and $~15, %rsp # align stack
-
-@@ -99,11 +100,12 @@
- xor %rax, %rax
- rep stosq
-
-- mov %r12, %rsp # deallocate workspace
-+ mov %r14, %rsp # deallocate workspace
-
-- pop %r12
-+ pop %r14
- pop %rbp
- pop %rbx
-+ pax_force_retaddr
- ret
-
- ENDPROC(\name)
-diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
-index 642f156..51a513c 100644
---- a/arch/x86/crypto/sha256-avx-asm.S
-+++ b/arch/x86/crypto/sha256-avx-asm.S
-@@ -49,6 +49,7 @@
-
- #ifdef CONFIG_AS_AVX
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- ## assume buffers not aligned
- #define VMOVDQ vmovdqu
-@@ -460,6 +461,7 @@ done_hash:
- popq %r13
- popq %rbp
- popq %rbx
-+ pax_force_retaddr
- ret
- ENDPROC(sha256_transform_avx)
-
-diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
-index 9e86944..3795e6a 100644
---- a/arch/x86/crypto/sha256-avx2-asm.S
-+++ b/arch/x86/crypto/sha256-avx2-asm.S
-@@ -50,6 +50,7 @@
-
- #ifdef CONFIG_AS_AVX2
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- ## assume buffers not aligned
- #define VMOVDQ vmovdqu
-@@ -720,6 +721,7 @@ done_hash:
- popq %r12
- popq %rbp
- popq %rbx
-+ pax_force_retaddr
- ret
- ENDPROC(sha256_transform_rorx)
-
-diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
-index f833b74..8c62a9e 100644
---- a/arch/x86/crypto/sha256-ssse3-asm.S
-+++ b/arch/x86/crypto/sha256-ssse3-asm.S
-@@ -47,6 +47,7 @@
- ########################################################################
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- ## assume buffers not aligned
- #define MOVDQ movdqu
-@@ -471,6 +472,7 @@ done_hash:
- popq %rbp
- popq %rbx
-
-+ pax_force_retaddr
- ret
- ENDPROC(sha256_transform_ssse3)
-
-diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
-index 974dde9..a823ff9 100644
---- a/arch/x86/crypto/sha512-avx-asm.S
-+++ b/arch/x86/crypto/sha512-avx-asm.S
-@@ -49,6 +49,7 @@
-
- #ifdef CONFIG_AS_AVX
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .text
-
-@@ -364,6 +365,7 @@ updateblock:
- mov frame_RSPSAVE(%rsp), %rsp
-
- nowork:
-+ pax_force_retaddr
- ret
- ENDPROC(sha512_transform_avx)
-
-diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
-index 568b961..ed20c37 100644
---- a/arch/x86/crypto/sha512-avx2-asm.S
-+++ b/arch/x86/crypto/sha512-avx2-asm.S
-@@ -51,6 +51,7 @@
-
- #ifdef CONFIG_AS_AVX2
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .text
-
-@@ -678,6 +679,7 @@ done_hash:
-
- # Restore Stack Pointer
- mov frame_RSPSAVE(%rsp), %rsp
-+ pax_force_retaddr
- ret
- ENDPROC(sha512_transform_rorx)
-
-diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
-index fb56855..6edd768 100644
---- a/arch/x86/crypto/sha512-ssse3-asm.S
-+++ b/arch/x86/crypto/sha512-ssse3-asm.S
-@@ -48,6 +48,7 @@
- ########################################################################
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .text
-
-@@ -363,6 +364,7 @@ updateblock:
- mov frame_RSPSAVE(%rsp), %rsp
-
- nowork:
-+ pax_force_retaddr
- ret
- ENDPROC(sha512_transform_ssse3)
-
-diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
-index 0505813..b067311 100644
---- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
-+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
-@@ -24,6 +24,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
- #include "glue_helper-asm-avx.S"
-
- .file "twofish-avx-x86_64-asm_64.S"
-@@ -284,6 +285,7 @@ __twofish_enc_blk8:
- outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
- outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__twofish_enc_blk8)
-
-@@ -324,6 +326,7 @@ __twofish_dec_blk8:
- outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
- outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(__twofish_dec_blk8)
-
-@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
-
- store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_ecb_enc_8way)
-
-@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
-
- store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_ecb_dec_8way)
-
-@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
- * %rdx: src
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
- call __twofish_dec_blk8;
-
-- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_cbc_dec_8way)
-
-@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
- * %rcx: iv (little endian, 128bit)
- */
-
-- pushq %r12;
-+ pushq %r14;
-
- movq %rsi, %r11;
-- movq %rdx, %r12;
-+ movq %rdx, %r14;
-
- load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
- RD2, RX0, RX1, RY0);
-
- call __twofish_enc_blk8;
-
-- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
-- popq %r12;
-+ popq %r14;
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_ctr_8way)
-
-@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_xts_enc_8way)
-
-@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
- /* dst <= regs xor IVs(in dst) */
- store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
-
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_xts_dec_8way)
-diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-index 1c3b7ce..02f578d 100644
---- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-@@ -21,6 +21,7 @@
- */
-
- #include <linux/linkage.h>
-+#include <asm/alternative-asm.h>
-
- .file "twofish-x86_64-asm-3way.S"
- .text
-@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
-
- .L__enc_xor3:
-@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
- ENDPROC(__twofish_enc_blk_3way)
-
-@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
- popq %r13;
- popq %r14;
- popq %r15;
-+ pax_force_retaddr
- ret;
- ENDPROC(twofish_dec_blk_3way)
-diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index a039d21..524b8b2 100644
---- a/arch/x86/crypto/twofish-x86_64-asm_64.S
-+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
-@@ -22,6 +22,7 @@
-
- #include <linux/linkage.h>
- #include <asm/asm-offsets.h>
-+#include <asm/alternative-asm.h>
-
- #define a_offset 0
- #define b_offset 4
-@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
-
- popq R1
- movq $1,%rax
-+ pax_force_retaddr
- ret
- ENDPROC(twofish_enc_blk)
-
-@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
-
- popq R1
- movq $1,%rax
-+ pax_force_retaddr
- ret
- ENDPROC(twofish_dec_blk)
-diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index ae6aad1..719d6d9 100644
---- a/arch/x86/ia32/ia32_aout.c
-+++ b/arch/x86/ia32/ia32_aout.c
-@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
- unsigned long dump_start, dump_size;
- struct user32 dump;
-
-+ memset(&dump, 0, sizeof(dump));
-+
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
-diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index f9e181a..300544c 100644
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_COMPAT_NSIG_WORDS > 1
- && __copy_from_user((((char *) &set.sig) + 4),
-- &frame->extramask,
-+ frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
-@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
- sp -= frame_size;
- /* Align the stack pointer according to the i386 ABI,
- * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-- sp = ((sp + 4) & -16ul) - 4;
-+ sp = ((sp - 12) & -16ul) - 4;
- return (void __user *) sp;
- }
-
-@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
- } else {
- /* Return stub is in 32bit vsyscall page */
- if (current->mm->context.vdso)
-- restorer = current->mm->context.vdso +
-- selected_vdso32->sym___kernel_sigreturn;
-+ restorer = (void __force_user *)(current->mm->context.vdso +
-+ selected_vdso32->sym___kernel_sigreturn);
- else
-- restorer = &frame->retcode;
-+ restorer = frame->retcode;
- }
-
- put_user_try {
-@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
- * These are actually not used anymore, but left because some
- * gdb versions depend on them as a marker.
- */
-- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
-
- if (err)
-@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
- 0xb8,
- __NR_ia32_rt_sigreturn,
- 0x80cd,
-- 0,
-+ 0
- };
-
- frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
-@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
-
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = ksig->ka.sa.sa_restorer;
-+ else if (current->mm->context.vdso)
-+ /* Return stub is in 32bit vsyscall page */
-+ restorer = (void __force_user *)(current->mm->context.vdso +
-+ selected_vdso32->sym___kernel_rt_sigreturn);
- else
-- restorer = current->mm->context.vdso +
-- selected_vdso32->sym___kernel_rt_sigreturn;
-+ restorer = frame->retcode;
- put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
-
- /*
- * Not actually used anymore, but left because some gdb
- * versions need it.
- */
-- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
-
- err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
-diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 82e8a1d..4e998d5 100644
---- a/arch/x86/ia32/ia32entry.S
-+++ b/arch/x86/ia32/ia32entry.S
-@@ -15,8 +15,10 @@
- #include <asm/irqflags.h>
- #include <asm/asm.h>
- #include <asm/smap.h>
-+#include <asm/pgtable.h>
- #include <linux/linkage.h>
- #include <linux/err.h>
-+#include <asm/alternative-asm.h>
-
- /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
- #include <linux/elf-em.h>
-@@ -62,12 +64,12 @@
- */
- .macro LOAD_ARGS32 offset, _r9=0
- .if \_r9
-- movl \offset+16(%rsp),%r9d
-+ movl \offset+R9(%rsp),%r9d
- .endif
-- movl \offset+40(%rsp),%ecx
-- movl \offset+48(%rsp),%edx
-- movl \offset+56(%rsp),%esi
-- movl \offset+64(%rsp),%edi
-+ movl \offset+RCX(%rsp),%ecx
-+ movl \offset+RDX(%rsp),%edx
-+ movl \offset+RSI(%rsp),%esi
-+ movl \offset+RDI(%rsp),%edi
- movl %eax,%eax /* zero extension */
- .endm
-
-@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
- ENDPROC(native_irq_enable_sysexit)
- #endif
-
-+ .macro pax_enter_kernel_user
-+ pax_set_fptr_mask
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel_user
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
-+#endif
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushq %rax
-+ pushq %r11
-+ call pax_randomize_kstack
-+ popq %r11
-+ popq %rax
-+#endif
-+ .endm
-+
-+ .macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+ .endm
-+
- /*
- * 32bit SYSENTER instruction entry.
- *
-@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
- CFI_REGISTER rsp,rbp
- SWAPGS_UNSAFE_STACK
- movq PER_CPU_VAR(kernel_stack), %rsp
-- addq $(KERNEL_STACK_OFFSET),%rsp
-- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs, here we enable it straight after entry:
-- */
-- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %ebp,%ebp /* zero extension */
- pushq_cfi $__USER32_DS
- /*CFI_REL_OFFSET ss,0*/
-@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
- CFI_REL_OFFSET rsp,0
- pushfq_cfi
- /*CFI_REL_OFFSET rflags,0*/
-- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
-- CFI_REGISTER rip,r10
-+ orl $X86_EFLAGS_IF,(%rsp)
-+ GET_THREAD_INFO(%r11)
-+ movl TI_sysenter_return(%r11), %r11d
-+ CFI_REGISTER rip,r11
- pushq_cfi $__USER32_CS
- /*CFI_REL_OFFSET cs,0*/
- movl %eax, %eax
-- pushq_cfi %r10
-+ pushq_cfi %r11
- CFI_REL_OFFSET rip,0
- pushq_cfi %rax
- cld
- SAVE_ARGS 0,1,0
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs, here we enable it straight after entry:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
- /* no need to do an access_ok check here because rbp has been
- 32bit zero extended */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ addq pax_user_shadow_base,%rbp
-+ ASM_PAX_OPEN_USERLAND
-+#endif
-+
- ASM_STAC
- 1: movl (%rbp),%ebp
- _ASM_EXTABLE(1b,ia32_badarg)
- ASM_CLAC
-
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ASM_PAX_CLOSE_USERLAND
-+#endif
-+
- /*
- * Sysenter doesn't filter flags, so we need to clear NT
- * ourselves. To save a few cycles, we can check whether
-@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
- jnz sysenter_fix_flags
- sysenter_flags_fixed:
-
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz sysenter_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -172,15 +218,18 @@ sysenter_do_call:
- sysenter_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r11)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
- jnz sysexit_audit
- sysexit_from_sys_call:
-- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ pax_exit_kernel_user
-+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
- /* clear IF, that popfq doesn't enable interrupts early */
-- andl $~0x200,EFLAGS-R11(%rsp)
-- movl RIP-R11(%rsp),%edx /* User %eip */
-+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
-+ movl RIP(%rsp),%edx /* User %eip */
- CFI_REGISTER rip,rdx
- RESTORE_ARGS 0,24,0,0,0,0
- xorq %r8,%r8
-@@ -205,6 +254,9 @@ sysexit_from_sys_call:
- movl %ebx,%esi /* 2nd arg: 1st syscall arg */
- movl %eax,%edi /* 1st arg: syscall number */
- call __audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -216,7 +268,7 @@ sysexit_from_sys_call:
- .endm
-
- .macro auditsys_exit exit
-- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jnz ia32_ret_from_sys_call
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -227,11 +279,12 @@ sysexit_from_sys_call:
- 1: setbe %al /* 1 if error, 0 if not */
- movzbl %al,%edi /* zero-extend that into %edi */
- call __audit_syscall_exit
-+ GET_THREAD_INFO(%r11)
- movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
- movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl %edi,TI_flags(%r11)
- jz \exit
- CLEAR_RREGS -ARGOFFSET
- jmp int_with_check
-@@ -253,7 +306,7 @@ sysenter_fix_flags:
-
- sysenter_tracesys:
- #ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jz sysenter_auditsys
- #endif
- SAVE_REST
-@@ -265,6 +318,9 @@ sysenter_tracesys:
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp sysenter_do_call
- CFI_ENDPROC
- ENDPROC(ia32_sysenter_target)
-@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
- ENTRY(ia32_cstar_target)
- CFI_STARTPROC32 simple
- CFI_SIGNAL_FRAME
-- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
-+ CFI_DEF_CFA rsp,0
- CFI_REGISTER rip,rcx
- /*CFI_REGISTER rflags,r11*/
- SWAPGS_UNSAFE_STACK
- movl %esp,%r8d
- CFI_REGISTER rsp,r8
- movq PER_CPU_VAR(kernel_stack),%rsp
-+ SAVE_ARGS 8*6,0,0
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- /*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
- */
- ENABLE_INTERRUPTS(CLBR_NONE)
-- SAVE_ARGS 8,0,0
- movl %eax,%eax /* zero extension */
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
- /* no need to do an access_ok check here because r8 has been
- 32bit zero extended */
- /* hardware stack frame is complete now */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ASM_PAX_OPEN_USERLAND
-+ movq pax_user_shadow_base,%r8
-+ addq RSP-ARGOFFSET(%rsp),%r8
-+#endif
-+
- ASM_STAC
- 1: movl (%r8),%r9d
- _ASM_EXTABLE(1b,ia32_badarg)
- ASM_CLAC
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ASM_PAX_CLOSE_USERLAND
-+#endif
-+
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz cstar_tracesys
- cmpq $IA32_NR_syscalls-1,%rax
-@@ -335,13 +410,16 @@ cstar_do_call:
- cstar_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r11)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
- jnz sysretl_audit
- sysretl_from_sys_call:
-- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
-+ pax_exit_kernel_user
-+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
-+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
- movl RIP-ARGOFFSET(%rsp),%ecx
- CFI_REGISTER rip,rcx
- movl EFLAGS-ARGOFFSET(%rsp),%r11d
-@@ -368,7 +446,7 @@ sysretl_audit:
-
- cstar_tracesys:
- #ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jz cstar_auditsys
- #endif
- xchgl %r9d,%ebp
-@@ -382,11 +460,19 @@ cstar_tracesys:
- xchgl %ebp,%r9d
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp cstar_do_call
- END(ia32_cstar_target)
-
- ia32_badarg:
- ASM_CLAC
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ASM_PAX_CLOSE_USERLAND
-+#endif
-+
- movq $-EFAULT,%rax
- jmp ia32_sysret
- CFI_ENDPROC
-@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
- CFI_REL_OFFSET rip,RIP-RIP
- PARAVIRT_ADJUST_EXCEPTION_FRAME
- SWAPGS
-- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs and here we enable it straight after entry:
-- */
-- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %eax,%eax
- pushq_cfi %rax
- cld
- /* note the registers are not zero extended to the sf.
- this could be a problem. */
- SAVE_ARGS 0,1,0
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
-+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- jnz ia32_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -458,6 +551,9 @@ ia32_tracesys:
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
-+
-+ pax_erase_kstack
-+
- jmp ia32_do_call
- END(ia32_syscall)
-
-diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index 8e0ceec..af13504 100644
---- a/arch/x86/ia32/sys_ia32.c
-+++ b/arch/x86/ia32/sys_ia32.c
-@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
- */
- static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
- {
-- typeof(ubuf->st_uid) uid = 0;
-- typeof(ubuf->st_gid) gid = 0;
-+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
-+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
- SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
- SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
- if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
-diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 372231c..51b537d 100644
---- a/arch/x86/include/asm/alternative-asm.h
-+++ b/arch/x86/include/asm/alternative-asm.h
-@@ -18,6 +18,45 @@
- .endm
- #endif
-
-+#ifdef KERNEXEC_PLUGIN
-+ .macro pax_force_retaddr_bts rip=0
-+ btsq $63,\rip(%rsp)
-+ .endm
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ .macro pax_force_retaddr rip=0, reload=0
-+ btsq $63,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
-+ btsq $63,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
-+#endif
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .if \reload
-+ pax_set_fptr_mask
-+ .endif
-+ orq %r12,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
-+ orq %r12,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
-+ movabs $0x8000000000000000,%r12
-+ .endm
-+#endif
-+#else
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .endm
-+ .macro pax_force_fptr ptr
-+ .endm
-+ .macro pax_force_retaddr_bts rip=0
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
-+#endif
-+
- .macro altinstruction_entry orig alt feature orig_len alt_len
- .long \orig - .
- .long \alt - .
-diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index 473bdbe..b1e3377 100644
---- a/arch/x86/include/asm/alternative.h
-+++ b/arch/x86/include/asm/alternative.h
-@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- ".pushsection .discard,\"aw\",@progbits\n" \
- DISCARD_ENTRY(1) \
- ".popsection\n" \
-- ".pushsection .altinstr_replacement, \"ax\"\n" \
-+ ".pushsection .altinstr_replacement, \"a\"\n" \
- ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
- ".popsection"
-
-@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- DISCARD_ENTRY(1) \
- DISCARD_ENTRY(2) \
- ".popsection\n" \
-- ".pushsection .altinstr_replacement, \"ax\"\n" \
-+ ".pushsection .altinstr_replacement, \"a\"\n" \
- ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
- ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
- ".popsection"
-diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index 465b309..ab7e51f 100644
---- a/arch/x86/include/asm/apic.h
-+++ b/arch/x86/include/asm/apic.h
-@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
-
- #ifdef CONFIG_X86_LOCAL_APIC
-
--extern unsigned int apic_verbosity;
-+extern int apic_verbosity;
- extern int local_apic_timer_c2_ok;
-
- extern int disable_apic;
-diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
-index 20370c6..a2eb9b0 100644
---- a/arch/x86/include/asm/apm.h
-+++ b/arch/x86/include/asm/apm.h
-@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
- __asm__ __volatile__(APM_DO_ZERO_SEGS
- "pushl %%edi\n\t"
- "pushl %%ebp\n\t"
-- "lcall *%%cs:apm_bios_entry\n\t"
-+ "lcall *%%ss:apm_bios_entry\n\t"
- "setc %%al\n\t"
- "popl %%ebp\n\t"
- "popl %%edi\n\t"
-@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
- __asm__ __volatile__(APM_DO_ZERO_SEGS
- "pushl %%edi\n\t"
- "pushl %%ebp\n\t"
-- "lcall *%%cs:apm_bios_entry\n\t"
-+ "lcall *%%ss:apm_bios_entry\n\t"
- "setc %%bl\n\t"
- "popl %%ebp\n\t"
- "popl %%edi\n\t"
-diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 5e5cd12..51cdc93 100644
---- a/arch/x86/include/asm/atomic.h
-+++ b/arch/x86/include/asm/atomic.h
-@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
- }
-
- /**
-+ * atomic_read_unchecked - read atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically reads the value of @v.
-+ */
-+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
-+{
-+ return ACCESS_ONCE((v)->counter);
-+}
-+
-+/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
-@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
- }
-
- /**
-+ * atomic_set_unchecked - set atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ * @i: required value
-+ *
-+ * Atomically sets the value of @v to @i.
-+ */
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
-+{
-+ v->counter = i;
-+}
-+
-+/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
-@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
- */
- static inline void atomic_add(int i, atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "addl %1,%0"
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subl %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter)
-+ : "ir" (i));
-+}
-+
-+/**
-+ * atomic_add_unchecked - add integer to atomic variable
-+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
- }
-@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
- */
- static inline void atomic_sub(int i, atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "subl %1,%0"
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addl %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter)
-+ : "ir" (i));
-+}
-+
-+/**
-+ * atomic_sub_unchecked - subtract integer from atomic variable
-+ * @i: integer value to subtract
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically subtracts @i from @v.
-+ */
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
- : "+m" (v->counter)
- : "ir" (i));
- }
-@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
- */
- static inline int atomic_sub_and_test(int i, atomic_t *v)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
-+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
- }
-
- /**
-@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
- */
- static inline void atomic_inc(atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "incl %0"
-+ asm volatile(LOCK_PREFIX "incl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter));
-+}
-+
-+/**
-+ * atomic_inc_unchecked - increment atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically increments @v by 1.
-+ */
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "incl %0\n"
- : "+m" (v->counter));
- }
-
-@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
- */
- static inline void atomic_dec(atomic_t *v)
- {
-- asm volatile(LOCK_PREFIX "decl %0"
-+ asm volatile(LOCK_PREFIX "decl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "+m" (v->counter));
-+}
-+
-+/**
-+ * atomic_dec_unchecked - decrement atomic variable
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically decrements @v by 1.
-+ */
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "decl %0\n"
- : "+m" (v->counter));
- }
-
-@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
- */
- static inline int atomic_dec_and_test(atomic_t *v)
- {
-- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
-+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
- }
-
- /**
-@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
- */
- static inline int atomic_inc_and_test(atomic_t *v)
- {
-- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
-+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
-+}
-+
-+/**
-+ * atomic_inc_and_test_unchecked - increment and test
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically increments @v by 1
-+ * and returns true if the result is zero, or false for all
-+ * other cases.
-+ */
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
-+{
-+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
- }
-
- /**
-@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
- */
- static inline int atomic_add_negative(int i, atomic_t *v)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
-+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
- }
-
- /**
-@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
- *
- * Atomically adds @i to @v and returns @i + @v
- */
--static inline int atomic_add_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
-+{
-+ return i + xadd_check_overflow(&v->counter, i);
-+}
-+
-+/**
-+ * atomic_add_return_unchecked - add integer and return
-+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + @v
-+ */
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
- {
- return i + xadd(&v->counter, i);
- }
-@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
- *
- * Atomically subtracts @i from @v and returns @v - @i
- */
--static inline int atomic_sub_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
- {
- return atomic_add_return(-i, v);
- }
-
- #define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
-+{
-+ return atomic_add_return_unchecked(1, v);
-+}
- #define atomic_dec_return(v) (atomic_sub_return(1, v))
-
--static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
- {
- return cmpxchg(&v->counter, old, new);
- }
-@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
- return xchg(&v->counter, new);
- }
-
-+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
-+{
-+ return xchg(&v->counter, new);
-+}
-+
- /**
- * __atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
-@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
- */
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- {
-- int c, old;
-+ int c, old, new;
- c = atomic_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("addl %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "subl %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a));
-+
-+ old = atomic_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
-@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
- }
-
- /**
-+ * atomic_inc_not_zero_hint - increment if not null
-+ * @v: pointer of type atomic_t
-+ * @hint: probable value of the atomic before the increment
-+ *
-+ * This version of atomic_inc_not_zero() gives a hint of probable
-+ * value of the atomic. This helps processor to not read the memory
-+ * before doing the atomic read/modify/write cycle, lowering
-+ * number of bus transactions on some arches.
-+ *
-+ * Returns: 0 if increment was not done, 1 otherwise.
-+ */
-+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
-+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
-+{
-+ int val, c = hint, new;
-+
-+ /* sanity test, should be removed by compiler if hint is a constant */
-+ if (!hint)
-+ return __atomic_add_unless(v, 1, 0);
-+
-+ do {
-+ asm volatile("incl %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "decl %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c));
-+
-+ val = atomic_cmpxchg(v, c, new);
-+ if (val == c)
-+ return 1;
-+ c = val;
-+ } while (c);
-+
-+ return 0;
-+}
-+
-+/**
- * atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
-@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
- }
-
- /* These are x86-specific, used by some header files */
--#define atomic_clear_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "andl %0,%1" \
-- : : "r" (~(mask)), "m" (*(addr)) : "memory")
-+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-
--#define atomic_set_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "orl %0,%1" \
-- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
-- : "memory")
-+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-
- #ifdef CONFIG_X86_32
- # include <asm/atomic64_32.h>
-diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index b154de7..bf18a5a 100644
---- a/arch/x86/include/asm/atomic64_32.h
-+++ b/arch/x86/include/asm/atomic64_32.h
-@@ -12,6 +12,14 @@ typedef struct {
- u64 __aligned(8) counter;
- } atomic64_t;
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+typedef struct {
-+ u64 __aligned(8) counter;
-+} atomic64_unchecked_t;
-+#else
-+typedef atomic64_t atomic64_unchecked_t;
-+#endif
-+
- #define ATOMIC64_INIT(val) { (val) }
-
- #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
-@@ -37,21 +45,31 @@ typedef struct {
- ATOMIC64_DECL_ONE(sym##_386)
-
- ATOMIC64_DECL_ONE(add_386);
-+ATOMIC64_DECL_ONE(add_unchecked_386);
- ATOMIC64_DECL_ONE(sub_386);
-+ATOMIC64_DECL_ONE(sub_unchecked_386);
- ATOMIC64_DECL_ONE(inc_386);
-+ATOMIC64_DECL_ONE(inc_unchecked_386);
- ATOMIC64_DECL_ONE(dec_386);
-+ATOMIC64_DECL_ONE(dec_unchecked_386);
- #endif
-
- #define alternative_atomic64(f, out, in...) \
- __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
-
- ATOMIC64_DECL(read);
-+ATOMIC64_DECL(read_unchecked);
- ATOMIC64_DECL(set);
-+ATOMIC64_DECL(set_unchecked);
- ATOMIC64_DECL(xchg);
- ATOMIC64_DECL(add_return);
-+ATOMIC64_DECL(add_return_unchecked);
- ATOMIC64_DECL(sub_return);
-+ATOMIC64_DECL(sub_return_unchecked);
- ATOMIC64_DECL(inc_return);
-+ATOMIC64_DECL(inc_return_unchecked);
- ATOMIC64_DECL(dec_return);
-+ATOMIC64_DECL(dec_return_unchecked);
- ATOMIC64_DECL(dec_if_positive);
- ATOMIC64_DECL(inc_not_zero);
- ATOMIC64_DECL(add_unless);
-@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
- }
-
- /**
-+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
-+ * @p: pointer to type atomic64_unchecked_t
-+ * @o: expected value
-+ * @n: new value
-+ *
-+ * Atomically sets @v to @n if it was equal to @o and returns
-+ * the old value.
-+ */
-+
-+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
-+{
-+ return cmpxchg64(&v->counter, o, n);
-+}
-+
-+/**
- * atomic64_xchg - xchg atomic64 variable
- * @v: pointer to type atomic64_t
- * @n: value to assign
-@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
- }
-
- /**
-+ * atomic64_set_unchecked - set atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ * @n: value to assign
-+ *
-+ * Atomically sets the value of @v to @n.
-+ */
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
-+{
-+ unsigned high = (unsigned)(i >> 32);
-+ unsigned low = (unsigned)i;
-+ alternative_atomic64(set, /* no output */,
-+ "S" (v), "b" (low), "c" (high)
-+ : "eax", "edx", "memory");
-+}
-+
-+/**
- * atomic64_read - read atomic64 variable
- * @v: pointer to type atomic64_t
- *
-@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
- }
-
- /**
-+ * atomic64_read_unchecked - read atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically reads the value of @v and returns it.
-+ */
-+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
-+{
-+ long long r;
-+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
-+ return r;
-+ }
-+
-+/**
- * atomic64_add_return - add and return
- * @i: integer value to add
- * @v: pointer to type atomic64_t
-@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
- return i;
- }
-
-+/**
-+ * atomic64_add_return_unchecked - add and return
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + *@v
-+ */
-+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
-+{
-+ alternative_atomic64(add_return_unchecked,
-+ ASM_OUTPUT2("+A" (i), "+c" (v)),
-+ ASM_NO_INPUT_CLOBBER("memory"));
-+ return i;
-+}
-+
- /*
- * Other variants with different arithmetic operators:
- */
-@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
- return a;
- }
-
-+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ long long a;
-+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
-+ "S" (v) : "memory", "ecx");
-+ return a;
-+}
-+
- static inline long long atomic64_dec_return(atomic64_t *v)
- {
- long long a;
-@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
- }
-
- /**
-+ * atomic64_add_unchecked - add integer to atomic64 variable
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
-+{
-+ __alternative_atomic64(add_unchecked, add_return_unchecked,
-+ ASM_OUTPUT2("+A" (i), "+c" (v)),
-+ ASM_NO_INPUT_CLOBBER("memory"));
-+ return i;
-+}
-+
-+/**
- * atomic64_sub - subtract the atomic64 variable
- * @i: integer value to subtract
- * @v: pointer to type atomic64_t
-diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index f8d273e..02f39f3 100644
---- a/arch/x86/include/asm/atomic64_64.h
-+++ b/arch/x86/include/asm/atomic64_64.h
-@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
- }
-
- /**
-+ * atomic64_read_unchecked - read atomic64 variable
-+ * @v: pointer of type atomic64_unchecked_t
-+ *
-+ * Atomically reads the value of @v.
-+ * Doesn't imply a read memory barrier.
-+ */
-+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
-+{
-+ return ACCESS_ONCE((v)->counter);
-+}
-+
-+/**
- * atomic64_set - set atomic64 variable
- * @v: pointer to type atomic64_t
- * @i: required value
-@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
- }
-
- /**
-+ * atomic64_set_unchecked - set atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ * @i: required value
-+ *
-+ * Atomically sets the value of @v to @i.
-+ */
-+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
-+{
-+ v->counter = i;
-+}
-+
-+/**
- * atomic64_add - add integer to atomic64 variable
- * @i: integer value to add
- * @v: pointer to type atomic64_t
-@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
- */
- static inline void atomic64_add(long i, atomic64_t *v)
- {
-+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "subq %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "er" (i), "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_add_unchecked - add integer to atomic64 variable
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v.
-+ */
-+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
-+{
- asm volatile(LOCK_PREFIX "addq %1,%0"
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
-@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
- */
- static inline void atomic64_sub(long i, atomic64_t *v)
- {
-- asm volatile(LOCK_PREFIX "subq %1,%0"
-+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "addq %1,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "er" (i), "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_sub_unchecked - subtract the atomic64 variable
-+ * @i: integer value to subtract
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically subtracts @i from @v.
-+ */
-+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
- : "=m" (v->counter)
- : "er" (i), "m" (v->counter));
- }
-@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
- */
- static inline int atomic64_sub_and_test(long i, atomic64_t *v)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
-+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
- }
-
- /**
-@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
- */
- static inline void atomic64_inc(atomic64_t *v)
- {
-+ asm volatile(LOCK_PREFIX "incq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "decq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_inc_unchecked - increment atomic64 variable
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically increments @v by 1.
-+ */
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
-+{
- asm volatile(LOCK_PREFIX "incq %0"
- : "=m" (v->counter)
- : "m" (v->counter));
-@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
- */
- static inline void atomic64_dec(atomic64_t *v)
- {
-- asm volatile(LOCK_PREFIX "decq %0"
-+ asm volatile(LOCK_PREFIX "decq %0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ LOCK_PREFIX "incq %0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=m" (v->counter)
-+ : "m" (v->counter));
-+}
-+
-+/**
-+ * atomic64_dec_unchecked - decrement atomic64 variable
-+ * @v: pointer to type atomic64_t
-+ *
-+ * Atomically decrements @v by 1.
-+ */
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "decq %0\n"
- : "=m" (v->counter)
- : "m" (v->counter));
- }
-@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
- */
- static inline int atomic64_dec_and_test(atomic64_t *v)
- {
-- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
-+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
- }
-
- /**
-@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
- */
- static inline int atomic64_inc_and_test(atomic64_t *v)
- {
-- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
-+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
- }
-
- /**
-@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
- */
- static inline int atomic64_add_negative(long i, atomic64_t *v)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
-+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
- }
-
- /**
-@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
- */
- static inline long atomic64_add_return(long i, atomic64_t *v)
- {
-+ return i + xadd_check_overflow(&v->counter, i);
-+}
-+
-+/**
-+ * atomic64_add_return_unchecked - add and return
-+ * @i: integer value to add
-+ * @v: pointer to type atomic64_unchecked_t
-+ *
-+ * Atomically adds @i to @v and returns @i + @v
-+ */
-+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
-+{
- return i + xadd(&v->counter, i);
- }
-
-@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
- }
-
- #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
-+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
-+{
-+ return atomic64_add_return_unchecked(1, v);
-+}
- #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
-
- static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
-@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
- return cmpxchg(&v->counter, old, new);
- }
-
-+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
-+{
-+ return cmpxchg(&v->counter, old, new);
-+}
-+
- static inline long atomic64_xchg(atomic64_t *v, long new)
- {
- return xchg(&v->counter, new);
-@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
- */
- static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
- {
-- long c, old;
-+ long c, old, new;
- c = atomic64_read(v);
- for (;;) {
-- if (unlikely(c == (u)))
-+ if (unlikely(c == u))
- break;
-- old = atomic64_cmpxchg((v), c, c + (a));
-+
-+ asm volatile("add %2,%0\n"
-+
-+#ifdef CONFIG_PAX_REFCOUNT
-+ "jno 0f\n"
-+ "sub %2,%0\n"
-+ "int $4\n0:\n"
-+ _ASM_EXTABLE(0b, 0b)
-+#endif
-+
-+ : "=r" (new)
-+ : "0" (c), "ir" (a));
-+
-+ old = atomic64_cmpxchg(v, c, new);
- if (likely(old == c))
- break;
- c = old;
- }
-- return c != (u);
-+ return c != u;
- }
-
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
-index 2ab1eb3..1e8cc5d 100644
---- a/arch/x86/include/asm/barrier.h
-+++ b/arch/x86/include/asm/barrier.h
-@@ -57,7 +57,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-@@ -74,7 +74,7 @@ do { \
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
-index cfe3b95..d01b118 100644
---- a/arch/x86/include/asm/bitops.h
-+++ b/arch/x86/include/asm/bitops.h
-@@ -50,7 +50,7 @@
- * a mask operation on a byte.
- */
- #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
--#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
-+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
- #define CONST_MASK(nr) (1 << ((nr) & 7))
-
- /**
-@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
- */
- static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
-+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
- }
-
- /**
-@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
- */
- static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
-+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
- }
-
- /**
-@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
- */
- static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
- {
-- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
-+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
- }
-
- static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
-@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
--static inline unsigned long __ffs(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
- {
- asm("rep; bsf %1,%0"
- : "=r" (word)
-@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
--static inline unsigned long ffz(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
- {
- asm("rep; bsf %1,%0"
- : "=r" (word)
-@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
--static inline unsigned long __fls(unsigned long word)
-+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
- {
- asm("bsr %1,%0"
- : "=r" (word)
-@@ -434,7 +434,7 @@ static inline int ffs(int x)
- * set bit if value is nonzero. The last (most significant) bit is
- * at position 32.
- */
--static inline int fls(int x)
-+static inline int __intentional_overflow(-1) fls(int x)
- {
- int r;
-
-@@ -476,7 +476,7 @@ static inline int fls(int x)
- * at position 64.
- */
- #ifdef CONFIG_X86_64
--static __always_inline int fls64(__u64 x)
-+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
- {
- int bitpos = -1;
- /*
-diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
-index 4fa687a..60f2d39 100644
---- a/arch/x86/include/asm/boot.h
-+++ b/arch/x86/include/asm/boot.h
-@@ -6,10 +6,15 @@
- #include <uapi/asm/boot.h>
-
- /* Physical address where kernel should be loaded. */
--#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
-+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
- + (CONFIG_PHYSICAL_ALIGN - 1)) \
- & ~(CONFIG_PHYSICAL_ALIGN - 1))
-
-+#ifndef __ASSEMBLY__
-+extern unsigned char __LOAD_PHYSICAL_ADDR[];
-+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
-+#endif
-+
- /* Minimum kernel alignment, as a power of two */
- #ifdef CONFIG_X86_64
- #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
-diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
-index 48f99f1..d78ebf9 100644
---- a/arch/x86/include/asm/cache.h
-+++ b/arch/x86/include/asm/cache.h
-@@ -5,12 +5,13 @@
-
- /* L1 cache line size */
- #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
--#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
-
- #define __read_mostly __attribute__((__section__(".data..read_mostly")))
-+#define __read_only __attribute__((__section__(".data..read_only")))
-
- #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
--#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
-+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
-
- #ifdef CONFIG_X86_VSMP
- #ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
-index 76659b6..72b8439 100644
---- a/arch/x86/include/asm/calling.h
-+++ b/arch/x86/include/asm/calling.h
-@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
- #define RSP 152
- #define SS 160
-
--#define ARGOFFSET R11
--#define SWFRAME ORIG_RAX
-+#define ARGOFFSET R15
-
- .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
-- subq $9*8+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
-- movq_cfi rdi, 8*8
-- movq_cfi rsi, 7*8
-- movq_cfi rdx, 6*8
-+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
-+ movq_cfi rdi, RDI
-+ movq_cfi rsi, RSI
-+ movq_cfi rdx, RDX
-
- .if \save_rcx
-- movq_cfi rcx, 5*8
-+ movq_cfi rcx, RCX
- .endif
-
- .if \rax_enosys
-- movq $-ENOSYS, 4*8(%rsp)
-+ movq $-ENOSYS, RAX(%rsp)
- .else
-- movq_cfi rax, 4*8
-+ movq_cfi rax, RAX
- .endif
-
- .if \save_r891011
-- movq_cfi r8, 3*8
-- movq_cfi r9, 2*8
-- movq_cfi r10, 1*8
-- movq_cfi r11, 0*8
-+ movq_cfi r8, R8
-+ movq_cfi r9, R9
-+ movq_cfi r10, R10
-+ movq_cfi r11, R11
- .endif
-
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
- .endm
-
--#define ARG_SKIP (9*8)
-+#define ARG_SKIP ORIG_RAX
-
- .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
- rstor_r8910=1, rstor_rdx=1
-+
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12, r12
-+#endif
-+
- .if \rstor_r11
-- movq_cfi_restore 0*8, r11
-+ movq_cfi_restore R11, r11
- .endif
-
- .if \rstor_r8910
-- movq_cfi_restore 1*8, r10
-- movq_cfi_restore 2*8, r9
-- movq_cfi_restore 3*8, r8
-+ movq_cfi_restore R10, r10
-+ movq_cfi_restore R9, r9
-+ movq_cfi_restore R8, r8
- .endif
-
- .if \rstor_rax
-- movq_cfi_restore 4*8, rax
-+ movq_cfi_restore RAX, rax
- .endif
-
- .if \rstor_rcx
-- movq_cfi_restore 5*8, rcx
-+ movq_cfi_restore RCX, rcx
- .endif
-
- .if \rstor_rdx
-- movq_cfi_restore 6*8, rdx
-+ movq_cfi_restore RDX, rdx
- .endif
-
-- movq_cfi_restore 7*8, rsi
-- movq_cfi_restore 8*8, rdi
-+ movq_cfi_restore RSI, rsi
-+ movq_cfi_restore RDI, rdi
-
-- .if ARG_SKIP+\addskip > 0
-- addq $ARG_SKIP+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
-+ .if ORIG_RAX+\addskip > 0
-+ addq $ORIG_RAX+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
- .endif
- .endm
-
-- .macro LOAD_ARGS offset, skiprax=0
-- movq \offset(%rsp), %r11
-- movq \offset+8(%rsp), %r10
-- movq \offset+16(%rsp), %r9
-- movq \offset+24(%rsp), %r8
-- movq \offset+40(%rsp), %rcx
-- movq \offset+48(%rsp), %rdx
-- movq \offset+56(%rsp), %rsi
-- movq \offset+64(%rsp), %rdi
-+ .macro LOAD_ARGS skiprax=0
-+ movq R11(%rsp), %r11
-+ movq R10(%rsp), %r10
-+ movq R9(%rsp), %r9
-+ movq R8(%rsp), %r8
-+ movq RCX(%rsp), %rcx
-+ movq RDX(%rsp), %rdx
-+ movq RSI(%rsp), %rsi
-+ movq RDI(%rsp), %rdi
- .if \skiprax
- .else
-- movq \offset+72(%rsp), %rax
-+ movq ORIG_RAX(%rsp), %rax
- .endif
- .endm
-
--#define REST_SKIP (6*8)
--
- .macro SAVE_REST
-- subq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET REST_SKIP
-- movq_cfi rbx, 5*8
-- movq_cfi rbp, 4*8
-- movq_cfi r12, 3*8
-- movq_cfi r13, 2*8
-- movq_cfi r14, 1*8
-- movq_cfi r15, 0*8
-+ movq_cfi rbx, RBX
-+ movq_cfi rbp, RBP
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
-+ movq_cfi r13, R13
-+ movq_cfi r14, R14
-+ movq_cfi r15, R15
- .endm
-
- .macro RESTORE_REST
-- movq_cfi_restore 0*8, r15
-- movq_cfi_restore 1*8, r14
-- movq_cfi_restore 2*8, r13
-- movq_cfi_restore 3*8, r12
-- movq_cfi_restore 4*8, rbp
-- movq_cfi_restore 5*8, rbx
-- addq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
-+ movq_cfi_restore R15, r15
-+ movq_cfi_restore R14, r14
-+ movq_cfi_restore R13, r13
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12, r12
-+#endif
-+
-+ movq_cfi_restore RBP, rbp
-+ movq_cfi_restore RBX, rbx
- .endm
-
- .macro SAVE_ALL
-diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
-index f50de69..2b0a458 100644
---- a/arch/x86/include/asm/checksum_32.h
-+++ b/arch/x86/include/asm/checksum_32.h
-@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
-
-+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
-+ int len, __wsum sum,
-+ int *src_err_ptr, int *dst_err_ptr);
-+
-+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
-+ int len, __wsum sum,
-+ int *src_err_ptr, int *dst_err_ptr);
-+
- /*
- * Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
-@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
-
- might_sleep();
- stac();
-- ret = csum_partial_copy_generic((__force void *)src, dst,
-+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
- len, sum, err_ptr, NULL);
- clac();
-
-@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
- might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len)) {
- stac();
-- ret = csum_partial_copy_generic(src, (__force void *)dst,
-+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
- clac();
- return ret;
-diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
-index 99c105d7..2f667ac 100644
---- a/arch/x86/include/asm/cmpxchg.h
-+++ b/arch/x86/include/asm/cmpxchg.h
-@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
- __compiletime_error("Bad argument size for cmpxchg");
- extern void __xadd_wrong_size(void)
- __compiletime_error("Bad argument size for xadd");
-+extern void __xadd_check_overflow_wrong_size(void)
-+ __compiletime_error("Bad argument size for xadd_check_overflow");
- extern void __add_wrong_size(void)
- __compiletime_error("Bad argument size for add");
-+extern void __add_check_overflow_wrong_size(void)
-+ __compiletime_error("Bad argument size for add_check_overflow");
-
- /*
- * Constants for operation sizes. On 32-bit, the 64-bit size it set to
-@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
- __ret; \
- })
-
-+#ifdef CONFIG_PAX_REFCOUNT
-+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
-+ ({ \
-+ __typeof__ (*(ptr)) __ret = (arg); \
-+ switch (sizeof(*(ptr))) { \
-+ case __X86_CASE_L: \
-+ asm volatile (lock #op "l %0, %1\n" \
-+ "jno 0f\n" \
-+ "mov %0,%1\n" \
-+ "int $4\n0:\n" \
-+ _ASM_EXTABLE(0b, 0b) \
-+ : "+r" (__ret), "+m" (*(ptr)) \
-+ : : "memory", "cc"); \
-+ break; \
-+ case __X86_CASE_Q: \
-+ asm volatile (lock #op "q %q0, %1\n" \
-+ "jno 0f\n" \
-+ "mov %0,%1\n" \
-+ "int $4\n0:\n" \
-+ _ASM_EXTABLE(0b, 0b) \
-+ : "+r" (__ret), "+m" (*(ptr)) \
-+ : : "memory", "cc"); \
-+ break; \
-+ default: \
-+ __ ## op ## _check_overflow_wrong_size(); \
-+ } \
-+ __ret; \
-+ })
-+#else
-+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
-+#endif
-+
- /*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
- * Since this is generally used to protect other memory information, we
-@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
- #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
- #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
-
-+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
-+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
-+
- #define __add(ptr, inc, lock) \
- ({ \
- __typeof__ (*(ptr)) __ret = (inc); \
-diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
-index 59c6c40..5e0b22c 100644
---- a/arch/x86/include/asm/compat.h
-+++ b/arch/x86/include/asm/compat.h
-@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
- typedef u32 compat_uint_t;
- typedef u32 compat_ulong_t;
- typedef u64 __attribute__((aligned(4))) compat_u64;
--typedef u32 compat_uptr_t;
-+typedef u32 __user compat_uptr_t;
-
- struct compat_timespec {
- compat_time_t tv_sec;
-diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index aede2c3..40d7a8f 100644
---- a/arch/x86/include/asm/cpufeature.h
-+++ b/arch/x86/include/asm/cpufeature.h
-@@ -212,7 +212,7 @@
- #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
- #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
- #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
--
-+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
-
- /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
- #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-@@ -220,7 +220,7 @@
- #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
- #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
- #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
--#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
- #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
- #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
- #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
- #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
- #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
- #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
-+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
-
- #if __GNUC__ >= 4
- extern void warn_pre_alternatives(void);
-@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
-
- #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
- t_warn:
-- warn_pre_alternatives();
-+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
-+ warn_pre_alternatives();
- return false;
- #endif
-
-@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
- ".previous\n"
-- ".section .altinstr_replacement,\"ax\"\n"
-+ ".section .altinstr_replacement,\"a\"\n"
- "3: movb $1,%0\n"
- "4:\n"
- ".previous\n"
-@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
- " .byte 2b - 1b\n" /* src len */
- " .byte 4f - 3f\n" /* repl len */
- ".previous\n"
-- ".section .altinstr_replacement,\"ax\"\n"
-+ ".section .altinstr_replacement,\"a\"\n"
- "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
- "4:\n"
- ".previous\n"
-@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
- ".previous\n"
-- ".section .altinstr_replacement,\"ax\"\n"
-+ ".section .altinstr_replacement,\"a\"\n"
- "3: movb $0,%0\n"
- "4:\n"
- ".previous\n"
-@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
- ".previous\n"
-- ".section .altinstr_replacement,\"ax\"\n"
-+ ".section .altinstr_replacement,\"a\"\n"
- "5: movb $1,%0\n"
- "6:\n"
- ".previous\n"
-diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index a94b82e..59ecefa 100644
---- a/arch/x86/include/asm/desc.h
-+++ b/arch/x86/include/asm/desc.h
-@@ -4,6 +4,7 @@
- #include <asm/desc_defs.h>
- #include <asm/ldt.h>
- #include <asm/mmu.h>
-+#include <asm/pgtable.h>
-
- #include <linux/smp.h>
- #include <linux/percpu.h>
-@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
-
- desc->type = (info->read_exec_only ^ 1) << 1;
- desc->type |= info->contents << 2;
-+ desc->type |= info->seg_not_present ^ 1;
-
- desc->s = 1;
- desc->dpl = 0x3;
-@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
- }
-
- extern struct desc_ptr idt_descr;
--extern gate_desc idt_table[];
--extern struct desc_ptr debug_idt_descr;
--extern gate_desc debug_idt_table[];
--
--struct gdt_page {
-- struct desc_struct gdt[GDT_ENTRIES];
--} __attribute__((aligned(PAGE_SIZE)));
--
--DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
-+extern gate_desc idt_table[IDT_ENTRIES];
-+extern const struct desc_ptr debug_idt_descr;
-+extern gate_desc debug_idt_table[IDT_ENTRIES];
-
-+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
- static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
- {
-- return per_cpu(gdt_page, cpu).gdt;
-+ return cpu_gdt_table[cpu];
- }
-
- #ifdef CONFIG_X86_64
-@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
- unsigned long base, unsigned dpl, unsigned flags,
- unsigned short seg)
- {
-- gate->a = (seg << 16) | (base & 0xffff);
-- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
-+ gate->gate.offset_low = base;
-+ gate->gate.seg = seg;
-+ gate->gate.reserved = 0;
-+ gate->gate.type = type;
-+ gate->gate.s = 0;
-+ gate->gate.dpl = dpl;
-+ gate->gate.p = 1;
-+ gate->gate.offset_high = base >> 16;
- }
-
- #endif
-@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
-
- static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
- {
-+ pax_open_kernel();
- memcpy(&idt[entry], gate, sizeof(*gate));
-+ pax_close_kernel();
- }
-
- static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
- {
-+ pax_open_kernel();
- memcpy(&ldt[entry], desc, 8);
-+ pax_close_kernel();
- }
-
- static inline void
-@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
- default: size = sizeof(*gdt); break;
- }
-
-+ pax_open_kernel();
- memcpy(&gdt[entry], desc, size);
-+ pax_close_kernel();
- }
-
- static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
-@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
-
- static inline void native_load_tr_desc(void)
- {
-+ pax_open_kernel();
- asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
-+ pax_close_kernel();
- }
-
- static inline void native_load_gdt(const struct desc_ptr *dtr)
-@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- unsigned int i;
-
-+ pax_open_kernel();
- for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
- gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
-+ pax_close_kernel();
- }
-
- /* This intentionally ignores lm, since 32-bit apps don't have that field. */
-@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
- preempt_enable();
- }
-
--static inline unsigned long get_desc_base(const struct desc_struct *desc)
-+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
- {
- return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
- }
-@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
- }
-
- #ifdef CONFIG_X86_64
--static inline void set_nmi_gate(int gate, void *addr)
-+static inline void set_nmi_gate(int gate, const void *addr)
- {
- gate_desc s;
-
-@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
- #endif
-
- #ifdef CONFIG_TRACING
--extern struct desc_ptr trace_idt_descr;
--extern gate_desc trace_idt_table[];
-+extern const struct desc_ptr trace_idt_descr;
-+extern gate_desc trace_idt_table[IDT_ENTRIES];
- static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
- {
- write_idt_entry(trace_idt_table, entry, gate);
- }
-
--static inline void _trace_set_gate(int gate, unsigned type, void *addr,
-+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
- {
- gate_desc s;
-@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
- #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
- #endif
-
--static inline void _set_gate(int gate, unsigned type, void *addr,
-+static inline void _set_gate(int gate, unsigned type, const void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
- {
- gate_desc s;
-@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
- #define set_intr_gate(n, addr) \
- do { \
- BUG_ON((unsigned)n > 0xFF); \
-- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
-+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
- __KERNEL_CS); \
-- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
-+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
- 0, 0, __KERNEL_CS); \
- } while (0)
-
-@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
- /*
- * This routine sets up an interrupt gate at directory privilege level 3.
- */
--static inline void set_system_intr_gate(unsigned int n, void *addr)
-+static inline void set_system_intr_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
- }
-
--static inline void set_system_trap_gate(unsigned int n, void *addr)
-+static inline void set_system_trap_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
- }
-
--static inline void set_trap_gate(unsigned int n, void *addr)
-+static inline void set_trap_gate(unsigned int n, const void *addr)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
-@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
- static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
- {
- BUG_ON((unsigned)n > 0xFF);
-- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
-+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
- }
-
--static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
-+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
- }
-
--static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
-+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
- {
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
-@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
- else
- load_idt((const struct desc_ptr *)&idt_descr);
- }
-+
-+#ifdef CONFIG_X86_32
-+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
-+{
-+ struct desc_struct d;
-+
-+ if (likely(limit))
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
-+}
-+#endif
-+
- #endif /* _ASM_X86_DESC_H */
-diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
-index 278441f..b95a174 100644
---- a/arch/x86/include/asm/desc_defs.h
-+++ b/arch/x86/include/asm/desc_defs.h
-@@ -31,6 +31,12 @@ struct desc_struct {
- unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
- unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
- };
-+ struct {
-+ u16 offset_low;
-+ u16 seg;
-+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
-+ unsigned offset_high: 16;
-+ } gate;
- };
- } __attribute__((packed));
-
-diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
-index ced283a..ffe04cc 100644
---- a/arch/x86/include/asm/div64.h
-+++ b/arch/x86/include/asm/div64.h
-@@ -39,7 +39,7 @@
- __mod; \
- })
-
--static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
-+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
- {
- union {
- u64 v64;
-diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index ca3347a..1a5082a 100644
---- a/arch/x86/include/asm/elf.h
-+++ b/arch/x86/include/asm/elf.h
-@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
-
- #include <asm/vdso.h>
-
--#ifdef CONFIG_X86_64
--extern unsigned int vdso64_enabled;
--#endif
- #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
- extern unsigned int vdso32_enabled;
- #endif
-@@ -249,7 +246,25 @@ extern int force_personality32;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
-+#else
- #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+#ifdef CONFIG_X86_32
-+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
-+
-+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#else
-+#define PAX_ELF_ET_DYN_BASE 0x400000UL
-+
-+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
-+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
-+#endif
-+#endif
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-@@ -298,17 +313,13 @@ do { \
-
- #define ARCH_DLINFO \
- do { \
-- if (vdso64_enabled) \
-- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
-- (unsigned long __force)current->mm->context.vdso); \
-+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
- } while (0)
-
- /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
- #define ARCH_DLINFO_X32 \
- do { \
-- if (vdso64_enabled) \
-- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
-- (unsigned long __force)current->mm->context.vdso); \
-+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
- } while (0)
-
- #define AT_SYSINFO 32
-@@ -323,10 +334,10 @@ else \
-
- #endif /* !CONFIG_X86_32 */
-
--#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
-+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
-
- #define VDSO_ENTRY \
-- ((unsigned long)current->mm->context.vdso + \
-+ (current->mm->context.vdso + \
- selected_vdso32->sym___kernel_vsyscall)
-
- struct linux_binprm;
-@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp);
- #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- /*
- * True on X86_32 or when emulating IA32 on X86_64
- */
-diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
-index 77a99ac..39ff7f5 100644
---- a/arch/x86/include/asm/emergency-restart.h
-+++ b/arch/x86/include/asm/emergency-restart.h
-@@ -1,6 +1,6 @@
- #ifndef _ASM_X86_EMERGENCY_RESTART_H
- #define _ASM_X86_EMERGENCY_RESTART_H
-
--extern void machine_emergency_restart(void);
-+extern void machine_emergency_restart(void) __noreturn;
-
- #endif /* _ASM_X86_EMERGENCY_RESTART_H */
-diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
-index 1c7eefe..d0e4702 100644
---- a/arch/x86/include/asm/floppy.h
-+++ b/arch/x86/include/asm/floppy.h
-@@ -229,18 +229,18 @@ static struct fd_routine_l {
- int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
- } fd_routine[] = {
- {
-- request_dma,
-- free_dma,
-- get_dma_residue,
-- dma_mem_alloc,
-- hard_dma_setup
-+ ._request_dma = request_dma,
-+ ._free_dma = free_dma,
-+ ._get_dma_residue = get_dma_residue,
-+ ._dma_mem_alloc = dma_mem_alloc,
-+ ._dma_setup = hard_dma_setup
- },
- {
-- vdma_request_dma,
-- vdma_nop,
-- vdma_get_dma_residue,
-- vdma_mem_alloc,
-- vdma_dma_setup
-+ ._request_dma = vdma_request_dma,
-+ ._free_dma = vdma_nop,
-+ ._get_dma_residue = vdma_get_dma_residue,
-+ ._dma_mem_alloc = vdma_mem_alloc,
-+ ._dma_setup = vdma_dma_setup
- }
- };
-
-diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
-index f895358..800c60d 100644
---- a/arch/x86/include/asm/fpu-internal.h
-+++ b/arch/x86/include/asm/fpu-internal.h
-@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
- #define user_insn(insn, output, input...) \
- ({ \
- int err; \
-+ pax_open_userland(); \
- asm volatile(ASM_STAC "\n" \
-- "1:" #insn "\n\t" \
-+ "1:" \
-+ __copyuser_seg \
-+ #insn "\n\t" \
- "2: " ASM_CLAC "\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl $-1,%[err]\n" \
-@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
- _ASM_EXTABLE(1b, 3b) \
- : [err] "=r" (err), output \
- : "0"(0), input); \
-+ pax_close_userland(); \
- err; \
- })
-
-@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
- "fnclex\n\t"
- "emms\n\t"
- "fildl %P[addr]" /* set F?P to defined value */
-- : : [addr] "m" (tsk->thread.fpu.has_fpu));
-+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
- }
-
- return fpu_restore_checking(&tsk->thread.fpu);
-diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
-index b4c1f54..e290c08 100644
---- a/arch/x86/include/asm/futex.h
-+++ b/arch/x86/include/asm/futex.h
-@@ -12,6 +12,7 @@
- #include <asm/smap.h>
-
- #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
-+ typecheck(u32 __user *, uaddr); \
- asm volatile("\t" ASM_STAC "\n" \
- "1:\t" insn "\n" \
- "2:\t" ASM_CLAC "\n" \
-@@ -20,15 +21,16 @@
- "\tjmp\t2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
-- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
-+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
- : "i" (-EFAULT), "0" (oparg), "1" (0))
-
- #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
-+ typecheck(u32 __user *, uaddr); \
- asm volatile("\t" ASM_STAC "\n" \
- "1:\tmovl %2, %0\n" \
- "\tmovl\t%0, %3\n" \
- "\t" insn "\n" \
-- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
-+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
- "\tjnz\t1b\n" \
- "3:\t" ASM_CLAC "\n" \
- "\t.section .fixup,\"ax\"\n" \
-@@ -38,7 +40,7 @@
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
- : "=&a" (oldval), "=&r" (ret), \
-- "+m" (*uaddr), "=&r" (tem) \
-+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0))
-
- static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-
- pagefault_disable();
-
-+ pax_open_userland();
- switch (op) {
- case FUTEX_OP_SET:
-- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
-+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
-- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
-+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
- uaddr, oparg);
- break;
- case FUTEX_OP_OR:
-@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
- default:
- ret = -ENOSYS;
- }
-+ pax_close_userland();
-
- pagefault_enable();
-
-diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
-index 9662290..49ca5e5 100644
---- a/arch/x86/include/asm/hw_irq.h
-+++ b/arch/x86/include/asm/hw_irq.h
-@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
- #endif /* CONFIG_X86_LOCAL_APIC */
-
- /* Statistics */
--extern atomic_t irq_err_count;
--extern atomic_t irq_mis_count;
-+extern atomic_unchecked_t irq_err_count;
-+extern atomic_unchecked_t irq_mis_count;
-
- /* EISA */
- extern void eisa_set_level_irq(unsigned int irq);
-diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
-index ccffa53..3c90c87 100644
---- a/arch/x86/include/asm/i8259.h
-+++ b/arch/x86/include/asm/i8259.h
-@@ -62,7 +62,7 @@ struct legacy_pic {
- void (*init)(int auto_eoi);
- int (*irq_pending)(unsigned int irq);
- void (*make_irq)(unsigned int irq);
--};
-+} __do_const;
-
- extern struct legacy_pic *legacy_pic;
- extern struct legacy_pic null_legacy_pic;
-diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index 34a5b93..27e40a6 100644
---- a/arch/x86/include/asm/io.h
-+++ b/arch/x86/include/asm/io.h
-@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
- "m" (*(volatile type __force *)addr) barrier); }
-
- build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
--build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
--build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
-+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
-+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
-
- build_mmio_read(__readb, "b", unsigned char, "=q", )
--build_mmio_read(__readw, "w", unsigned short, "=r", )
--build_mmio_read(__readl, "l", unsigned int, "=r", )
-+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
-+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
-