summaryrefslogtreecommitdiff
path: root/4.9.9
diff options
context:
space:
mode:
Diffstat (limited to '4.9.9')
-rw-r--r--4.9.9/0000_README60
-rw-r--r--4.9.9/1007_linux-4.9.8.patch2048
-rw-r--r--4.9.9/1008_linux-4.9.9.patch2333
-rw-r--r--4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch225786
-rw-r--r--4.9.9/4425_grsec_remove_EI_PAX.patch19
-rw-r--r--4.9.9/4426_default_XATTR_PAX_FLAGS.patch36
-rw-r--r--4.9.9/4427_force_XATTR_PAX_tmpfs.patch48
-rw-r--r--4.9.9/4430_grsec-remove-localversion-grsec.patch9
-rw-r--r--4.9.9/4435_grsec-mute-warnings.patch43
-rw-r--r--4.9.9/4440_grsec-remove-protected-paths.patch20
-rw-r--r--4.9.9/4450_grsec-kconfig-default-gids.patch111
-rw-r--r--4.9.9/4465_selinux-avc_audit-log-curr_ip.patch73
-rw-r--r--4.9.9/4470_disable-compat_vdso.patch58
-rw-r--r--4.9.9/4475_emutramp_default_on.patch34
14 files changed, 230678 insertions, 0 deletions
diff --git a/4.9.9/0000_README b/4.9.9/0000_README
new file mode 100644
index 0000000..e2a9385
--- /dev/null
+++ b/4.9.9/0000_README
@@ -0,0 +1,60 @@
+README
+-----------------------------------------------------------------------------
+Individual Patch Descriptions:
+-----------------------------------------------------------------------------
+Patch: 1007_linux-4.9.8.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.8
+
+Patch: 1008_linux-4.9.9.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.9
+
+Patch: 4420_grsecurity-3.1-4.9.9-201702122044.patch
+From: http://www.grsecurity.net
+Desc: hardened-sources base patch from upstream grsecurity
+
+Patch: 4425_grsec_remove_EI_PAX.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Remove EI_PAX option and force off
+
+Patch: 4426_default_XATTR_PAX_FLAGS.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Defalut PT_PAX_FLAGS off and XATTR_PAX_FLAGS on
+
+Patch: 4427_force_XATTR_PAX_tmpfs.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Force XATTR_PAX on tmpfs
+
+Patch: 4430_grsec-remove-localversion-grsec.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Removes grsecurity's localversion-grsec file
+
+Patch: 4435_grsec-mute-warnings.patch
+From: Alexander Gabert <gaberta@fh-trier.de>
+ Gordon Malm <gengor@gentoo.org>
+Desc: Removes verbose compile warning settings from grsecurity, restores
+ mainline Linux kernel behavior
+
+Patch: 4440_grsec-remove-protected-paths.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Removes chmod statements from grsecurity/Makefile
+
+Patch: 4450_grsec-kconfig-default-gids.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Sets sane(r) default GIDs on various grsecurity group-dependent
+ features
+
+Patch: 4465_selinux-avc_audit-log-curr_ip.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Anthony G. Basile <blueness@gentoo.org>
+Desc: Configurable option to add src IP address to SELinux log messages
+
+Patch: 4470_disable-compat_vdso.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Kerin Millar <kerframil@gmail.com>
+Desc: Disables VDSO_COMPAT operation completely
+
+Patch: 4475_emutramp_default_on.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Set PAX_EMUTRAMP default on for libffi, bugs #329499 and #457194
diff --git a/4.9.9/1007_linux-4.9.8.patch b/4.9.9/1007_linux-4.9.8.patch
new file mode 100644
index 0000000..a93aab4
--- /dev/null
+++ b/4.9.9/1007_linux-4.9.8.patch
@@ -0,0 +1,2048 @@
+diff --git a/Makefile b/Makefile
+index da704d9..1130803 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 25d1eb4..be7ec5a 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+- struct netdev_queue *txq;
+ u32 hw_ind;
+
+- txq = netdev_get_tx_queue(ndev, ring->index);
+-
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+
+ ring->c_index = c_index;
+
+- if (netif_tx_queue_stopped(txq) && pkts_compl)
+- netif_tx_wake_queue(txq);
+-
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+ {
++ struct netdev_queue *txq;
+ unsigned int released;
+ unsigned long flags;
+
++ txq = netdev_get_tx_queue(priv->netdev, ring->index);
++
+ spin_lock_irqsave(&ring->lock, flags);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
++ if (released)
++ netif_tx_wake_queue(txq);
++
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+ }
+
++/* Locked version of the per-ring TX reclaim, but does not wake the queue */
++static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
++ struct bcm_sysport_tx_ring *ring)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ring->lock, flags);
++ __bcm_sysport_tx_reclaim(priv, ring);
++ spin_unlock_irqrestore(&ring->lock, flags);
++}
++
+ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+ {
+ struct bcm_sysport_tx_ring *ring =
+@@ -1253,7 +1264,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+- bcm_sysport_tx_reclaim(priv, ring);
++ bcm_sysport_tx_clean(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index fb8bb02..d223e7c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1740,8 +1740,11 @@ int mlx4_en_start_port(struct net_device *dev)
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+- for (i = 0; i < priv->rx_ring_num; i++)
++ for (i = 0; i < priv->rx_ring_num; i++) {
++ local_bh_disable();
+ napi_schedule(&priv->rx_cq[i]->napi);
++ local_bh_enable();
++ }
+
+ netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 33495d8..e7b2158 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ return false;
+ }
+
++ if (unlikely(page_is_pfmemalloc(dma_info->page)))
++ return false;
++
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
+index d942a3e..846fd4d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
+@@ -211,21 +211,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+ /* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
++MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
+
+ /* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
++MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
+
+ /* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
++MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
+
+ /* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
++MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
+
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index dda5761..f902c4d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
++ dev_consume_skb_any(skb_orig);
+ }
+
+ if (eth_skb_pad(skb)) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+index 92bda87..d548f0a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+@@ -314,6 +314,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
++ dev_consume_skb_any(skb_orig);
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ /* TX header is consumed by HW on the way so we shouldn't count its
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index d6a2178..862f18e 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1508,6 +1508,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
++ /* Zero length DMA descriptors are problematic as they seem to
++ * terminate DMA transfers. Avoid them by simply using a length of
++ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
++ *
++ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
++ * data by the call to skb_put_padto() above this is safe with
++ * respect to both the length of the first DMA descriptor (len)
++ * overflowing the available data and the length of the second DMA
++ * descriptor (skb->len - len) being negative.
++ */
++ if (len == 0)
++ len = DPTR_ALIGN;
++
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index c9140c3..ff038e5 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ * policy filters on the host). Deliver these via the VF
+ * interface in the guest.
+ */
++ rcu_read_lock();
+ vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
+ if (vf_netdev && (vf_netdev->flags & IFF_UP))
+ net = vf_netdev;
+@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ if (unlikely(!skb)) {
+ ++net->stats.rx_dropped;
++ rcu_read_unlock();
+ return NVSP_STAT_FAIL;
+ }
+
+@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ * TODO - use NAPI?
+ */
+ netif_rx(skb);
++ rcu_read_unlock();
+
+ return 0;
+ }
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 7869b06..6f38daf 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -827,7 +827,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ return -EINVAL;
+
+ ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
+- macvtap_is_little_endian(q));
++ macvtap_is_little_endian(q), true);
+ if (ret)
+ BUG();
+
+diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
+index e741bf6..b0492ef 100644
+--- a/drivers/net/phy/bcm63xx.c
++++ b/drivers/net/phy/bcm63xx.c
+@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
+ MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+ MODULE_LICENSE("GPL");
+
++static int bcm63xx_config_intr(struct phy_device *phydev)
++{
++ int reg, err;
++
++ reg = phy_read(phydev, MII_BCM63XX_IR);
++ if (reg < 0)
++ return reg;
++
++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
++ reg &= ~MII_BCM63XX_IR_GMASK;
++ else
++ reg |= MII_BCM63XX_IR_GMASK;
++
++ err = phy_write(phydev, MII_BCM63XX_IR, reg);
++ return err;
++}
++
+ static int bcm63xx_config_init(struct phy_device *phydev)
+ {
+ int reg, err;
+@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+- .config_intr = bcm_phy_config_intr,
++ .config_intr = bcm63xx_config_intr,
+ }, {
+ /* same phy as above, with just a different OUI */
+ .phy_id = 0x002bdc00,
+@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+- .config_intr = bcm_phy_config_intr,
++ .config_intr = bcm63xx_config_intr,
+ } };
+
+ module_phy_driver(bcm63xx_driver);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index db6acec..18402d7 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1374,7 +1374,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ return -EINVAL;
+
+ ret = virtio_net_hdr_from_skb(skb, &gso,
+- tun_is_little_endian(tun));
++ tun_is_little_endian(tun), true);
+ if (ret) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ pr_err("unexpected GSO type: "
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index dd623f6..b82be81 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define LENOVO_VENDOR_ID 0x17ef
+ #define NVIDIA_VENDOR_ID 0x0955
++#define HP_VENDOR_ID 0x03f0
+
+ static const struct usb_device_id products[] = {
+ /* BLACKLIST !!
+@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
+ .driver_info = 0,
+ },
+
++/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
++{
++ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++ .driver_info = 0,
++},
++
+ /* AnyDATA ADU960S - handled by qmi_wwan */
+ {
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 6fe1cdb..24d5272 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
++ { /* HP lt2523 (Novatel E371) */
++ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET,
++ USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&qmi_wwan_info,
++ },
+ { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 4b5cb16..90b426c 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -32,7 +32,7 @@
+ #define NETNEXT_VERSION "08"
+
+ /* Information for net */
+-#define NET_VERSION "6"
++#define NET_VERSION "7"
+
+ #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
+ #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
+@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
+ u8 checksum = CHECKSUM_NONE;
+ u32 opts2, opts3;
+
+- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
++ if (!(tp->netdev->features & NETIF_F_RXCSUM))
+ goto return_result;
+
+ opts2 = le32_to_cpu(rx_desc->opts2);
+@@ -3572,6 +3572,8 @@ static bool delay_autosuspend(struct r8152 *tp)
+ */
+ if (!sw_linking && tp->rtl_ops.in_nway(tp))
+ return true;
++ else if (!skb_queue_empty(&tp->tx_queue))
++ return true;
+ else
+ return false;
+ }
+@@ -4358,6 +4360,11 @@ static int rtl8152_probe(struct usb_interface *intf,
+ NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
++ if (tp->version == RTL_VER_01) {
++ netdev->features &= ~NETIF_F_RXCSUM;
++ netdev->hw_features &= ~NETIF_F_RXCSUM;
++ }
++
+ netdev->ethtool_ops = &ops;
+ netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index cbf1c61..51fc0c3 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -840,7 +840,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ hdr = skb_vnet_hdr(skb);
+
+ if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+- virtio_is_little_endian(vi->vdev)))
++ virtio_is_little_endian(vi->vdev), false))
+ BUG();
+
+ if (vi->mergeable_rx_bufs)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 2ba01ca..0fafaa9 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2887,7 +2887,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ memcpy(&vxlan->cfg, conf, sizeof(*conf));
+ if (!vxlan->cfg.dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+- vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
++ vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
+ else
+ vxlan->cfg.dst_port = default_port;
+ }
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 5050056..9f06a21 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -95,10 +95,7 @@ unsigned int
+ xfs_alloc_set_aside(
+ struct xfs_mount *mp)
+ {
+- unsigned int blocks;
+-
+- blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
+- return blocks;
++ return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
+ }
+
+ /*
+@@ -365,36 +362,12 @@ xfs_alloc_fix_len(
+ return;
+ ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
+ ASSERT(rlen % args->prod == args->mod);
++ ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
++ rlen + args->minleft);
+ args->len = rlen;
+ }
+
+ /*
+- * Fix up length if there is too little space left in the a.g.
+- * Return 1 if ok, 0 if too little, should give up.
+- */
+-STATIC int
+-xfs_alloc_fix_minleft(
+- xfs_alloc_arg_t *args) /* allocation argument structure */
+-{
+- xfs_agf_t *agf; /* a.g. freelist header */
+- int diff; /* free space difference */
+-
+- if (args->minleft == 0)
+- return 1;
+- agf = XFS_BUF_TO_AGF(args->agbp);
+- diff = be32_to_cpu(agf->agf_freeblks)
+- - args->len - args->minleft;
+- if (diff >= 0)
+- return 1;
+- args->len += diff; /* shrink the allocated space */
+- /* casts to (int) catch length underflows */
+- if ((int)args->len >= (int)args->minlen)
+- return 1;
+- args->agbno = NULLAGBLOCK;
+- return 0;
+-}
+-
+-/*
+ * Update the two btrees, logically removing from freespace the extent
+ * starting at rbno, rlen blocks. The extent is contained within the
+ * actual (current) free extent fbno for flen blocks.
+@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
+ xfs_alloc_arg_t *args) /* argument structure for allocation */
+ {
+ int error=0;
+- xfs_extlen_t reservation;
+- xfs_extlen_t oldmax;
+
+ ASSERT(args->minlen > 0);
+ ASSERT(args->maxlen > 0);
+@@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
+ ASSERT(args->alignment > 0);
+
+ /*
+- * Clamp maxlen to the amount of free space minus any reservations
+- * that have been made.
+- */
+- oldmax = args->maxlen;
+- reservation = xfs_ag_resv_needed(args->pag, args->resv);
+- if (args->maxlen > args->pag->pagf_freeblks - reservation)
+- args->maxlen = args->pag->pagf_freeblks - reservation;
+- if (args->maxlen == 0) {
+- args->agbno = NULLAGBLOCK;
+- args->maxlen = oldmax;
+- return 0;
+- }
+-
+- /*
+ * Branch to correct routine based on the type.
+ */
+ args->wasfromfl = 0;
+@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
+ /* NOTREACHED */
+ }
+
+- args->maxlen = oldmax;
+-
+ if (error || args->agbno == NULLAGBLOCK)
+ return error;
+
+@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
+ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
+ - args->agbno;
+ xfs_alloc_fix_len(args);
+- if (!xfs_alloc_fix_minleft(args))
+- goto not_found;
+-
+ ASSERT(args->agbno + args->len <= tend);
+
+ /*
+@@ -1149,12 +1101,7 @@ xfs_alloc_ag_vextent_near(
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ args->len = blen;
+- if (!xfs_alloc_fix_minleft(args)) {
+- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+- trace_xfs_alloc_near_nominleft(args);
+- return 0;
+- }
+- blen = args->len;
++
+ /*
+ * We are allocating starting at bnew for blen blocks.
+ */
+@@ -1346,12 +1293,6 @@ xfs_alloc_ag_vextent_near(
+ */
+ args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
+ xfs_alloc_fix_len(args);
+- if (!xfs_alloc_fix_minleft(args)) {
+- trace_xfs_alloc_near_nominleft(args);
+- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+- return 0;
+- }
+ rlen = args->len;
+ (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
+ args->datatype, ltbnoa, ltlena, &ltnew);
+@@ -1553,8 +1494,6 @@ xfs_alloc_ag_vextent_size(
+ }
+ xfs_alloc_fix_len(args);
+
+- if (!xfs_alloc_fix_minleft(args))
+- goto out_nominleft;
+ rlen = args->len;
+ XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
+ /*
+@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
+ int flags)
+ {
+ struct xfs_perag *pag = args->pag;
+- xfs_extlen_t longest;
++ xfs_extlen_t alloc_len, longest;
+ xfs_extlen_t reservation; /* blocks that are still reserved */
+ int available;
+
+@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
+ reservation = xfs_ag_resv_needed(pag, args->resv);
+
+ /* do we have enough contiguous free space for the allocation? */
++ alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
+ longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
+ reservation);
+- if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
++ if (longest < alloc_len)
+ return false;
+
+ /* do we have enough free space remaining for the allocation? */
+ available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
+- reservation - min_free - args->total);
+- if (available < (int)args->minleft || available <= 0)
++ reservation - min_free - args->minleft);
++ if (available < (int)max(args->total, alloc_len))
+ return false;
+
++ /*
++ * Clamp maxlen to the amount of free space available for the actual
++ * extent allocation.
++ */
++ if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
++ args->maxlen = available;
++ ASSERT(args->maxlen > 0);
++ ASSERT(args->maxlen >= args->minlen);
++ }
++
+ return true;
+ }
+
+@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
+ }
+
+ need = xfs_alloc_min_freelist(mp, pag);
+- if (!xfs_alloc_space_available(args, need, flags))
++ if (!xfs_alloc_space_available(args, need, flags |
++ XFS_ALLOC_FLAG_CHECK))
+ goto out_agbp_relse;
+
+ /*
+@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
+ xfs_agblock_t agsize; /* allocation group size */
+ int error;
+ int flags; /* XFS_ALLOC_FLAG_... locking flags */
+- xfs_extlen_t minleft;/* minimum left value, temp copy */
+ xfs_mount_t *mp; /* mount structure pointer */
+ xfs_agnumber_t sagno; /* starting allocation group number */
+ xfs_alloctype_t type; /* input allocation type */
+ int bump_rotor = 0;
+- int no_min = 0;
+ xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+
+ mp = args->mp;
+@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
+ trace_xfs_alloc_vextent_badargs(args);
+ return 0;
+ }
+- minleft = args->minleft;
+
+ switch (type) {
+ case XFS_ALLOCTYPE_THIS_AG:
+@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
+ */
+ args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ args->pag = xfs_perag_get(mp, args->agno);
+- args->minleft = 0;
+ error = xfs_alloc_fix_freelist(args, 0);
+- args->minleft = minleft;
+ if (error) {
+ trace_xfs_alloc_vextent_nofix(args);
+ goto error0;
+@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
+ */
+ for (;;) {
+ args->pag = xfs_perag_get(mp, args->agno);
+- if (no_min) args->minleft = 0;
+ error = xfs_alloc_fix_freelist(args, flags);
+- args->minleft = minleft;
+ if (error) {
+ trace_xfs_alloc_vextent_nofix(args);
+ goto error0;
+@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
+ * or switch to non-trylock mode.
+ */
+ if (args->agno == sagno) {
+- if (no_min == 1) {
++ if (flags == 0) {
+ args->agbno = NULLAGBLOCK;
+ trace_xfs_alloc_vextent_allfailed(args);
+ break;
+ }
+- if (flags == 0) {
+- no_min = 1;
+- } else {
+- flags = 0;
+- if (type == XFS_ALLOCTYPE_START_BNO) {
+- args->agbno = XFS_FSB_TO_AGBNO(mp,
+- args->fsbno);
+- args->type = XFS_ALLOCTYPE_NEAR_BNO;
+- }
++
++ flags = 0;
++ if (type == XFS_ALLOCTYPE_START_BNO) {
++ args->agbno = XFS_FSB_TO_AGBNO(mp,
++ args->fsbno);
++ args->type = XFS_ALLOCTYPE_NEAR_BNO;
+ }
+ }
+ xfs_perag_put(args->pag);
+diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
+index 7c404a6..1d0f48a 100644
+--- a/fs/xfs/libxfs/xfs_alloc.h
++++ b/fs/xfs/libxfs/xfs_alloc.h
+@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
+ #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
+ #define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
+ #define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
+-
++#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
+
+ /*
+ * Argument structure for xfs_alloc routines.
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index af1ecb1..6622d46 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -131,9 +131,6 @@ xfs_attr_get(
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return -EIO;
+
+- if (!xfs_inode_hasattr(ip))
+- return -ENOATTR;
+-
+ error = xfs_attr_args_init(&args, ip, name, flags);
+ if (error)
+ return error;
+@@ -392,9 +389,6 @@ xfs_attr_remove(
+ if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+ return -EIO;
+
+- if (!xfs_inode_hasattr(dp))
+- return -ENOATTR;
+-
+ error = xfs_attr_args_init(&args, dp, name, flags);
+ if (error)
+ return error;
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 89d727b..f52fd63 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -3720,7 +3720,7 @@ xfs_bmap_btalloc(
+ align = xfs_get_cowextsz_hint(ap->ip);
+ else if (xfs_alloc_is_userdata(ap->datatype))
+ align = xfs_get_extsz_hint(ap->ip);
+- if (unlikely(align)) {
++ if (align) {
+ error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
+ align, 0, ap->eof, 0, ap->conv,
+ &ap->offset, &ap->length);
+@@ -3792,7 +3792,7 @@ xfs_bmap_btalloc(
+ args.minlen = ap->minlen;
+ }
+ /* apply extent size hints if obtained earlier */
+- if (unlikely(align)) {
++ if (align) {
+ args.prod = align;
+ if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
+ args.mod = (xfs_extlen_t)(args.prod - args.mod);
+@@ -3903,7 +3903,6 @@ xfs_bmap_btalloc(
+ args.fsbno = 0;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+ args.total = ap->minlen;
+- args.minleft = 0;
+ if ((error = xfs_alloc_vextent(&args)))
+ return error;
+ ap->dfops->dop_low = true;
+@@ -4437,8 +4436,6 @@ xfs_bmapi_allocate(
+ if (error)
+ return error;
+
+- if (bma->dfops->dop_low)
+- bma->minleft = 0;
+ if (bma->cur)
+ bma->cur->bc_private.b.firstblock = *bma->firstblock;
+ if (bma->blkno == NULLFSBLOCK)
+@@ -4610,8 +4607,6 @@ xfs_bmapi_write(
+ int n; /* current extent index */
+ xfs_fileoff_t obno; /* old block number (offset) */
+ int whichfork; /* data or attr fork */
+- char inhole; /* current location is hole in file */
+- char wasdelay; /* old extent was delayed */
+
+ #ifdef DEBUG
+ xfs_fileoff_t orig_bno; /* original block number value */
+@@ -4697,22 +4692,44 @@ xfs_bmapi_write(
+ bma.firstblock = firstblock;
+
+ while (bno < end && n < *nmap) {
+- inhole = eof || bma.got.br_startoff > bno;
+- wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
++ bool need_alloc = false, wasdelay = false;
+
+- /*
+- * Make sure we only reflink into a hole.
+- */
+- if (flags & XFS_BMAPI_REMAP)
+- ASSERT(inhole);
+- if (flags & XFS_BMAPI_COWFORK)
+- ASSERT(!inhole);
++ /* in hole or beyoned EOF? */
++ if (eof || bma.got.br_startoff > bno) {
++ if (flags & XFS_BMAPI_DELALLOC) {
++ /*
++ * For the COW fork we can reasonably get a
++ * request for converting an extent that races
++ * with other threads already having converted
++ * part of it, as there converting COW to
++ * regular blocks is not protected using the
++ * IOLOCK.
++ */
++ ASSERT(flags & XFS_BMAPI_COWFORK);
++ if (!(flags & XFS_BMAPI_COWFORK)) {
++ error = -EIO;
++ goto error0;
++ }
++
++ if (eof || bno >= end)
++ break;
++ } else {
++ need_alloc = true;
++ }
++ } else {
++ /*
++ * Make sure we only reflink into a hole.
++ */
++ ASSERT(!(flags & XFS_BMAPI_REMAP));
++ if (isnullstartblock(bma.got.br_startblock))
++ wasdelay = true;
++ }
+
+ /*
+ * First, deal with the hole before the allocated space
+ * that we found, if any.
+ */
+- if (inhole || wasdelay) {
++ if (need_alloc || wasdelay) {
+ bma.eof = eof;
+ bma.conv = !!(flags & XFS_BMAPI_CONVERT);
+ bma.wasdel = wasdelay;
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index d6d175a..e7d40b3 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -110,6 +110,9 @@ struct xfs_extent_free_item
+ /* Map something in the CoW fork. */
+ #define XFS_BMAPI_COWFORK 0x200
+
++/* Only convert delalloc space, don't allocate entirely new extents */
++#define XFS_BMAPI_DELALLOC 0x400
++
+ #define XFS_BMAPI_FLAGS \
+ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
+ { XFS_BMAPI_METADATA, "METADATA" }, \
+@@ -120,7 +123,8 @@ struct xfs_extent_free_item
+ { XFS_BMAPI_CONVERT, "CONVERT" }, \
+ { XFS_BMAPI_ZERO, "ZERO" }, \
+ { XFS_BMAPI_REMAP, "REMAP" }, \
+- { XFS_BMAPI_COWFORK, "COWFORK" }
++ { XFS_BMAPI_COWFORK, "COWFORK" }, \
++ { XFS_BMAPI_DELALLOC, "DELALLOC" }
+
+
+ static inline int xfs_bmapi_aflag(int w)
+diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
+index 049fa59..f76c169 100644
+--- a/fs/xfs/libxfs/xfs_bmap_btree.c
++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
+@@ -502,12 +502,11 @@ xfs_bmbt_alloc_block(
+ if (args.fsbno == NULLFSBLOCK && args.minleft) {
+ /*
+ * Could not find an AG with enough free space to satisfy
+- * a full btree split. Try again without minleft and if
++ * a full btree split. Try again and if
+ * successful activate the lowspace algorithm.
+ */
+ args.fsbno = 0;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+- args.minleft = 0;
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto error0;
+diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
+index 20a96dd..7825d78 100644
+--- a/fs/xfs/libxfs/xfs_dir2.c
++++ b/fs/xfs/libxfs/xfs_dir2.c
+@@ -36,21 +36,29 @@
+ struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
+
+ /*
+- * @mode, if set, indicates that the type field needs to be set up.
+- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
+- * for file type specification. This will be propagated into the directory
+- * structure if appropriate for the given operation and filesystem config.
++ * Convert inode mode to directory entry filetype
+ */
+-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
+- [0] = XFS_DIR3_FT_UNKNOWN,
+- [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
+- [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
+- [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
+- [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
+- [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
+- [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
+- [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
+-};
++unsigned char xfs_mode_to_ftype(int mode)
++{
++ switch (mode & S_IFMT) {
++ case S_IFREG:
++ return XFS_DIR3_FT_REG_FILE;
++ case S_IFDIR:
++ return XFS_DIR3_FT_DIR;
++ case S_IFCHR:
++ return XFS_DIR3_FT_CHRDEV;
++ case S_IFBLK:
++ return XFS_DIR3_FT_BLKDEV;
++ case S_IFIFO:
++ return XFS_DIR3_FT_FIFO;
++ case S_IFSOCK:
++ return XFS_DIR3_FT_SOCK;
++ case S_IFLNK:
++ return XFS_DIR3_FT_SYMLINK;
++ default:
++ return XFS_DIR3_FT_UNKNOWN;
++ }
++}
+
+ /*
+ * ASCII case-insensitive (ie. A-Z) support for directories that was
+@@ -631,7 +639,8 @@ xfs_dir2_isblock(
+ if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
+ return rval;
+ rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
+- ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
++ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
++ return -EFSCORRUPTED;
+ *vp = rval;
+ return 0;
+ }
+diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
+index becc926..ae0d55b 100644
+--- a/fs/xfs/libxfs/xfs_dir2.h
++++ b/fs/xfs/libxfs/xfs_dir2.h
+@@ -18,6 +18,9 @@
+ #ifndef __XFS_DIR2_H__
+ #define __XFS_DIR2_H__
+
++#include "xfs_da_format.h"
++#include "xfs_da_btree.h"
++
+ struct xfs_defer_ops;
+ struct xfs_da_args;
+ struct xfs_inode;
+@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
+ extern struct xfs_name xfs_name_dotdot;
+
+ /*
+- * directory filetype conversion tables.
++ * Convert inode mode to directory entry filetype
+ */
+-#define S_SHIFT 12
+-extern const unsigned char xfs_mode_to_ftype[];
++extern unsigned char xfs_mode_to_ftype(int mode);
+
+ /*
+ * directory operations vector for encode/decode routines
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index c906e50..37ee7f0 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -29,6 +29,7 @@
+ #include "xfs_icache.h"
+ #include "xfs_trans.h"
+ #include "xfs_ialloc.h"
++#include "xfs_dir2.h"
+
+ /*
+ * Check that none of the inode's in the buffer have a next
+@@ -386,6 +387,7 @@ xfs_dinode_verify(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+ {
++ uint16_t mode;
+ uint16_t flags;
+ uint64_t flags2;
+
+@@ -396,8 +398,12 @@ xfs_dinode_verify(
+ if (be64_to_cpu(dip->di_size) & (1ULL << 63))
+ return false;
+
+- /* No zero-length symlinks. */
+- if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
++ mode = be16_to_cpu(dip->di_mode);
++ if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
++ return false;
++
++ /* No zero-length symlinks/dirs. */
++ if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
+ return false;
+
+ /* only version 3 or greater inodes are extensively verified here */
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 2580262..584ec89 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
+ sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
+ sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
+- sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
++ sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
+ sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
+ sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 0670a8b..efb8ccd 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -528,7 +528,6 @@ xfs_getbmap(
+ xfs_bmbt_irec_t *map; /* buffer for user's data */
+ xfs_mount_t *mp; /* file system mount point */
+ int nex; /* # of user extents can do */
+- int nexleft; /* # of user extents left */
+ int subnex; /* # of bmapi's can do */
+ int nmap; /* number of map entries */
+ struct getbmapx *out; /* output structure */
+@@ -686,10 +685,8 @@ xfs_getbmap(
+ goto out_free_map;
+ }
+
+- nexleft = nex;
+-
+ do {
+- nmap = (nexleft > subnex) ? subnex : nexleft;
++ nmap = (nex> subnex) ? subnex : nex;
+ error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
+ XFS_BB_TO_FSB(mp, bmv->bmv_length),
+ map, &nmap, bmapi_flags);
+@@ -697,8 +694,8 @@ xfs_getbmap(
+ goto out_free_map;
+ ASSERT(nmap <= subnex);
+
+- for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
+- cur_ext < bmv->bmv_count; i++) {
++ for (i = 0; i < nmap && bmv->bmv_length &&
++ cur_ext < bmv->bmv_count - 1; i++) {
+ out[cur_ext].bmv_oflags = 0;
+ if (map[i].br_state == XFS_EXT_UNWRITTEN)
+ out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
+@@ -760,16 +757,27 @@ xfs_getbmap(
+ continue;
+ }
+
++ /*
++ * In order to report shared extents accurately,
++ * we report each distinct shared/unshared part
++ * of a single bmbt record using multiple bmap
++ * extents. To make that happen, we iterate the
++ * same map array item multiple times, each
++ * time trimming out the subextent that we just
++ * reported.
++ *
++ * Because of this, we must check the out array
++ * index (cur_ext) directly against bmv_count-1
++ * to avoid overflows.
++ */
+ if (inject_map.br_startblock != NULLFSBLOCK) {
+ map[i] = inject_map;
+ i--;
+- } else
+- nexleft--;
++ }
+ bmv->bmv_entries++;
+ cur_ext++;
+ }
+- } while (nmap && nexleft && bmv->bmv_length &&
+- cur_ext < bmv->bmv_count);
++ } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
+
+ out_free_map:
+ kmem_free(map);
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index b5b9bff..d7a67d7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -423,6 +423,7 @@ xfs_buf_allocate_memory(
+ out_free_pages:
+ for (i = 0; i < bp->b_page_count; i++)
+ __free_page(bp->b_pages[i]);
++ bp->b_flags &= ~_XBF_PAGES;
+ return error;
+ }
+
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 7a30b8f..9d06cc3 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
+ /* Simple advance */
+ next_id = *id + 1;
+
++ /* If we'd wrap past the max ID, stop */
++ if (next_id < *id)
++ return -ENOENT;
++
+ /* If new ID is within the current chunk, advancing it sufficed */
+ if (next_id % mp->m_quotainfo->qi_dqperchunk) {
+ *id = next_id;
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 15a83813..cdc6bdd 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
+ xfs_trans_t *tp;
+ int nimaps;
+ int error = 0;
+- int flags = 0;
++ int flags = XFS_BMAPI_DELALLOC;
+ int nres;
+
+ if (whichfork == XFS_COW_FORK)
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 405a65c..f5e0f60 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -98,12 +98,27 @@ xfs_init_security(
+ static void
+ xfs_dentry_to_name(
+ struct xfs_name *namep,
++ struct dentry *dentry)
++{
++ namep->name = dentry->d_name.name;
++ namep->len = dentry->d_name.len;
++ namep->type = XFS_DIR3_FT_UNKNOWN;
++}
++
++static int
++xfs_dentry_mode_to_name(
++ struct xfs_name *namep,
+ struct dentry *dentry,
+ int mode)
+ {
+ namep->name = dentry->d_name.name;
+ namep->len = dentry->d_name.len;
+- namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
++ namep->type = xfs_mode_to_ftype(mode);
++
++ if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
++ return -EFSCORRUPTED;
++
++ return 0;
+ }
+
+ STATIC void
+@@ -119,7 +134,7 @@ xfs_cleanup_inode(
+ * xfs_init_security we must back out.
+ * ENOSPC can hit here, among other things.
+ */
+- xfs_dentry_to_name(&teardown, dentry, 0);
++ xfs_dentry_to_name(&teardown, dentry);
+
+ xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
+ }
+@@ -154,8 +169,12 @@ xfs_generic_create(
+ if (error)
+ return error;
+
++ /* Verify mode is valid also for tmpfile case */
++ error = xfs_dentry_mode_to_name(&name, dentry, mode);
++ if (unlikely(error))
++ goto out_free_acl;
++
+ if (!tmpfile) {
+- xfs_dentry_to_name(&name, dentry, mode);
+ error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+ } else {
+ error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+@@ -248,7 +267,7 @@ xfs_vn_lookup(
+ if (dentry->d_name.len >= MAXNAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- xfs_dentry_to_name(&name, dentry, 0);
++ xfs_dentry_to_name(&name, dentry);
+ error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
+ if (unlikely(error)) {
+ if (unlikely(error != -ENOENT))
+@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
+ if (dentry->d_name.len >= MAXNAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- xfs_dentry_to_name(&xname, dentry, 0);
++ xfs_dentry_to_name(&xname, dentry);
+ error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
+ if (unlikely(error)) {
+ if (unlikely(error != -ENOENT))
+@@ -310,7 +329,9 @@ xfs_vn_link(
+ struct xfs_name name;
+ int error;
+
+- xfs_dentry_to_name(&name, dentry, inode->i_mode);
++ error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
++ if (unlikely(error))
++ return error;
+
+ error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
+ if (unlikely(error))
+@@ -329,7 +350,7 @@ xfs_vn_unlink(
+ struct xfs_name name;
+ int error;
+
+- xfs_dentry_to_name(&name, dentry, 0);
++ xfs_dentry_to_name(&name, dentry);
+
+ error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
+ if (error)
+@@ -359,7 +380,9 @@ xfs_vn_symlink(
+
+ mode = S_IFLNK |
+ (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
+- xfs_dentry_to_name(&name, dentry, mode);
++ error = xfs_dentry_mode_to_name(&name, dentry, mode);
++ if (unlikely(error))
++ goto out;
+
+ error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
+ if (unlikely(error))
+@@ -395,6 +418,7 @@ xfs_vn_rename(
+ {
+ struct inode *new_inode = d_inode(ndentry);
+ int omode = 0;
++ int error;
+ struct xfs_name oname;
+ struct xfs_name nname;
+
+@@ -405,8 +429,14 @@ xfs_vn_rename(
+ if (flags & RENAME_EXCHANGE)
+ omode = d_inode(ndentry)->i_mode;
+
+- xfs_dentry_to_name(&oname, odentry, omode);
+- xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
++ error = xfs_dentry_mode_to_name(&oname, odentry, omode);
++ if (omode && unlikely(error))
++ return error;
++
++ error = xfs_dentry_mode_to_name(&nname, ndentry,
++ d_inode(odentry)->i_mode);
++ if (unlikely(error))
++ return error;
+
+ return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
+ XFS_I(ndir), &nname,
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 68640fb..1455b2520 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -330,11 +330,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ }
+
+ #define ASSERT_ALWAYS(expr) \
+- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+ #ifdef DEBUG
+ #define ASSERT(expr) \
+- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+ #ifndef STATIC
+ # define STATIC noinline
+@@ -345,7 +345,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ #ifdef XFS_WARN
+
+ #define ASSERT(expr) \
+- (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+
+ #ifndef STATIC
+ # define STATIC static noinline
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 3b74fa0..4017aa9 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -3324,12 +3324,8 @@ xfs_log_force(
+ xfs_mount_t *mp,
+ uint flags)
+ {
+- int error;
+-
+ trace_xfs_log_force(mp, 0, _RET_IP_);
+- error = _xfs_log_force(mp, flags, NULL);
+- if (error)
+- xfs_warn(mp, "%s: error %d returned.", __func__, error);
++ _xfs_log_force(mp, flags, NULL);
+ }
+
+ /*
+@@ -3473,12 +3469,8 @@ xfs_log_force_lsn(
+ xfs_lsn_t lsn,
+ uint flags)
+ {
+- int error;
+-
+ trace_xfs_log_force(mp, lsn, _RET_IP_);
+- error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
+- if (error)
+- xfs_warn(mp, "%s: error %d returned.", __func__, error);
++ _xfs_log_force_lsn(mp, lsn, flags, NULL);
+ }
+
+ /*
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index a17ae7b..647532b 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+
+ /* TCP Fast Open Cookie as stored in memory */
+ struct tcp_fastopen_cookie {
++ union {
++ u8 val[TCP_FASTOPEN_COOKIE_MAX];
++#if IS_ENABLED(CONFIG_IPV6)
++ struct in6_addr addr;
++#endif
++ };
+ s8 len;
+- u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
+ };
+
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 1c912f8..f211c34 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+- bool little_endian)
++ bool little_endian,
++ bool has_data_valid)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+
+@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ skb_checksum_start_offset(skb));
+ hdr->csum_offset = __cpu_to_virtio16(little_endian,
+ skb->csum_offset);
+- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
++ } else if (has_data_valid &&
++ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+ } /* else everything is zero */
+
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index ea3f80f..fc7c0db 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -43,13 +43,12 @@ struct lwtunnel_encap_ops {
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
++
++ struct module *owner;
+ };
+
+ #ifdef CONFIG_LWTUNNEL
+-static inline void lwtstate_free(struct lwtunnel_state *lws)
+-{
+- kfree(lws);
+-}
++void lwtstate_free(struct lwtunnel_state *lws);
+
+ static inline struct lwtunnel_state *
+ lwtstate_get(struct lwtunnel_state *lws)
+@@ -106,6 +105,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
++int lwtunnel_valid_encap_type(u16 encap_type);
++int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
+ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+@@ -169,6 +170,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ return -EOPNOTSUPP;
+ }
+
++static inline int lwtunnel_valid_encap_type(u16 encap_type)
++{
++ return -EOPNOTSUPP;
++}
++static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
++{
++ return -EOPNOTSUPP;
++}
++
+ static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
+index 655a7d4..983f0b5 100644
+--- a/net/ax25/ax25_subr.c
++++ b/net/ax25/ax25_subr.c
+@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
+ {
+ ax25_clear_queues(ax25);
+
+- if (!sock_flag(ax25->sk, SOCK_DESTROY))
++ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_stop_heartbeat(ax25);
+ ax25_stop_t1timer(ax25);
+ ax25_stop_t2timer(ax25);
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index e99037c..0474106 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+- struct nlattr *tb[], struct nlattr *data[])
+-{
+- struct net_bridge *br = netdev_priv(dev);
+-
+- if (tb[IFLA_ADDRESS]) {
+- spin_lock_bh(&br->lock);
+- br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+- spin_unlock_bh(&br->lock);
+- }
+-
+- return register_netdevice(dev);
+-}
+-
+ static int br_port_slave_changelink(struct net_device *brdev,
+ struct net_device *dev,
+ struct nlattr *tb[],
+@@ -1093,6 +1079,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
+ return 0;
+ }
+
++static int br_dev_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[])
++{
++ struct net_bridge *br = netdev_priv(dev);
++ int err;
++
++ if (tb[IFLA_ADDRESS]) {
++ spin_lock_bh(&br->lock);
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
++ spin_unlock_bh(&br->lock);
++ }
++
++ err = br_changelink(dev, tb, data);
++ if (err)
++ return err;
++
++ return register_netdevice(dev);
++}
++
+ static size_t br_get_size(const struct net_device *brdev)
+ {
+ return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
+diff --git a/net/core/dev.c b/net/core/dev.c
+index e1d731f..df51c50 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2815,9 +2815,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
+ if (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, type)) {
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+- } else if (illegal_highdma(skb->dev, skb)) {
+- features &= ~NETIF_F_SG;
+ }
++ if (illegal_highdma(skb->dev, skb))
++ features &= ~NETIF_F_SG;
+
+ return features;
+ }
+diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
+index e5f84c2..afa64f0 100644
+--- a/net/core/lwtunnel.c
++++ b/net/core/lwtunnel.c
+@@ -26,6 +26,7 @@
+ #include <net/lwtunnel.h>
+ #include <net/rtnetlink.h>
+ #include <net/ip6_fib.h>
++#include <net/nexthop.h>
+
+ #ifdef CONFIG_MODULES
+
+@@ -65,6 +66,15 @@ EXPORT_SYMBOL(lwtunnel_state_alloc);
+ static const struct lwtunnel_encap_ops __rcu *
+ lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
++void lwtstate_free(struct lwtunnel_state *lws)
++{
++ const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
++
++ kfree(lws);
++ module_put(ops->owner);
++}
++EXPORT_SYMBOL(lwtstate_free);
++
+ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int num)
+ {
+@@ -110,25 +120,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
++ ret = ops->build_state(dev, encap, family, cfg, lws);
++ if (ret)
++ module_put(ops->owner);
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL(lwtunnel_build_state);
++
++int lwtunnel_valid_encap_type(u16 encap_type)
++{
++ const struct lwtunnel_encap_ops *ops;
++ int ret = -EINVAL;
++
++ if (encap_type == LWTUNNEL_ENCAP_NONE ||
++ encap_type > LWTUNNEL_ENCAP_MAX)
++ return ret;
++
++ rcu_read_lock();
++ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ rcu_read_unlock();
+ #ifdef CONFIG_MODULES
+ if (!ops) {
+ const char *encap_type_str = lwtunnel_encap_str(encap_type);
+
+ if (encap_type_str) {
+- rcu_read_unlock();
++ __rtnl_unlock();
+ request_module("rtnl-lwt-%s", encap_type_str);
++ rtnl_lock();
++
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ rcu_read_unlock();
+ }
+ }
+ #endif
+- if (likely(ops && ops->build_state))
+- ret = ops->build_state(dev, encap, family, cfg, lws);
+- rcu_read_unlock();
++ return ops ? 0 : -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(lwtunnel_valid_encap_type);
+
+- return ret;
++int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
++{
++ struct rtnexthop *rtnh = (struct rtnexthop *)attr;
++ struct nlattr *nla_entype;
++ struct nlattr *attrs;
++ struct nlattr *nla;
++ u16 encap_type;
++ int attrlen;
++
++ while (rtnh_ok(rtnh, remaining)) {
++ attrlen = rtnh_attrlen(rtnh);
++ if (attrlen > 0) {
++ attrs = rtnh_attrs(rtnh);
++ nla = nla_find(attrs, attrlen, RTA_ENCAP);
++ nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
++
++ if (nla_entype) {
++ encap_type = nla_get_u16(nla_entype);
++
++ if (lwtunnel_valid_encap_type(encap_type) != 0)
++ return -EOPNOTSUPP;
++ }
++ }
++ rtnh = rtnh_next(rtnh, &remaining);
++ }
++
++ return 0;
+ }
+-EXPORT_SYMBOL(lwtunnel_build_state);
++EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
+
+ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+ {
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 30e2e21..3ff9d97 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1201,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
+ {
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
++ netif_device_detach(slave_dev);
++
+ if (p->phy) {
+ phy_stop(p->phy);
+ p->old_pause = -1;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 3e4f183..5b03d7f 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -46,6 +46,7 @@
+ #include <net/rtnetlink.h>
+ #include <net/xfrm.h>
+ #include <net/l3mdev.h>
++#include <net/lwtunnel.h>
+ #include <trace/events/fib.h>
+
+ #ifndef CONFIG_IP_MULTIPLE_TABLES
+@@ -676,6 +677,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ cfg->fc_mx_len = nla_len(attr);
+ break;
+ case RTA_MULTIPATH:
++ err = lwtunnel_valid_encap_type_attr(nla_data(attr),
++ nla_len(attr));
++ if (err < 0)
++ goto errout;
+ cfg->fc_mp = nla_data(attr);
+ cfg->fc_mp_len = nla_len(attr);
+ break;
+@@ -690,6 +695,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ break;
+ case RTA_ENCAP_TYPE:
+ cfg->fc_encap_type = nla_get_u16(attr);
++ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
++ if (err < 0)
++ goto errout;
+ break;
+ }
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index a8508b7..6a40680 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1278,8 +1278,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
+ nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+ goto nla_put_failure;
+ #endif
+- if (fi->fib_nh->nh_lwtstate)
+- lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
++ if (fi->fib_nh->nh_lwtstate &&
++ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
++ goto nla_put_failure;
+ }
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ if (fi->fib_nhs > 1) {
+@@ -1315,8 +1316,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
+ nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+ goto nla_put_failure;
+ #endif
+- if (nh->nh_lwtstate)
+- lwtunnel_fill_encap(skb, nh->nh_lwtstate);
++ if (nh->nh_lwtstate &&
++ lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
++ goto nla_put_failure;
++
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
+ } endfor_nexthops(fi);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index fed3d29..0fd1976 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+ .fill_encap = ip_tun_fill_encap_info,
+ .get_encap_size = ip_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
++ .owner = THIS_MODULE,
+ };
+
+ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+ .fill_encap = ip6_tun_fill_encap_info,
+ .get_encap_size = ip6_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
++ .owner = THIS_MODULE,
+ };
+
+ void __init ip_tunnel_core_init(void)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 8197b06..d851cae 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2440,7 +2440,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
+ r->rtm_dst_len = 32;
+ r->rtm_src_len = 0;
+ r->rtm_tos = fl4->flowi4_tos;
+- r->rtm_table = table_id;
++ r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
+ if (nla_put_u32(skb, RTA_TABLE, table_id))
+ goto nla_put_failure;
+ r->rtm_type = rt->rt_type;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 4e777a3..dd2560c 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
+ struct tcp_fastopen_cookie tmp;
+
+ if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
+- struct in6_addr *buf = (struct in6_addr *) tmp.val;
++ struct in6_addr *buf = &tmp.addr;
+ int i;
+
+ for (i = 0; i < 4; i++)
+@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
+ * scaled. So correct it appropriately.
+ */
+ tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
++ tp->max_window = tp->snd_wnd;
+
+ /* Activate the retrans timer so that SYNACK can be retransmitted.
+ * The request socket is not added to the ehash
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 4bc5ba3..95dfcba 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5515,8 +5515,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+- rcu_read_lock();
+- for_each_netdev_rcu(net, dev) {
++ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
+@@ -5525,7 +5524,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ dev_disable_change(idev);
+ }
+ }
+- rcu_read_unlock();
+ }
+
+ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index e50c27a..f3db364 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -164,6 +164,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
+ .fill_encap = ila_fill_encap_info,
+ .get_encap_size = ila_encap_nlsize,
+ .cmp_encap = ila_encap_cmp,
++ .owner = THIS_MODULE,
+ };
+
+ int ila_lwt_init(void)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index d76674e..f95437f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1108,7 +1108,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ t->parms.name);
+ goto tx_err_dst_release;
+ }
+- mtu = dst_mtu(dst) - psh_hlen;
++ mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
+ if (encap_limit >= 0) {
+ max_headroom += 8;
+ mtu -= 8;
+@@ -1117,7 +1117,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ mtu = IPV6_MIN_MTU;
+ if (skb_dst(skb) && !t->parms.collect_md)
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+- if (skb->len > mtu && !skb_is_gso(skb)) {
++ if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
+ *pmtu = mtu;
+ err = -EMSGSIZE;
+ goto tx_err_dst_release;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1b57e11..bff4460 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2885,6 +2885,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (tb[RTA_MULTIPATH]) {
+ cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
+ cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
++
++ err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
++ cfg->fc_mp_len);
++ if (err < 0)
++ goto errout;
+ }
+
+ if (tb[RTA_PREF]) {
+@@ -2898,9 +2903,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (tb[RTA_ENCAP])
+ cfg->fc_encap = tb[RTA_ENCAP];
+
+- if (tb[RTA_ENCAP_TYPE])
++ if (tb[RTA_ENCAP_TYPE]) {
+ cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
++ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
++ if (err < 0)
++ goto errout;
++ }
++
+ if (tb[RTA_EXPIRES]) {
+ unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
+
+@@ -3306,7 +3316,8 @@ static int rt6_fill_node(struct net *net,
+ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
+ goto nla_put_failure;
+
+- lwtunnel_fill_encap(skb, rt->dst.lwtstate);
++ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
++ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 15fe976..5b77377 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+ }
+ EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
+
+-static u32 mpls_multipath_hash(struct mpls_route *rt,
+- struct sk_buff *skb, bool bos)
++static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
+ {
+ struct mpls_entry_decoded dec;
++ unsigned int mpls_hdr_len = 0;
+ struct mpls_shim_hdr *hdr;
+ bool eli_seen = false;
+ int label_index;
+ u32 hash = 0;
+
+- for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
++ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
+ label_index++) {
+- if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
++ mpls_hdr_len += sizeof(*hdr);
++ if (!pskb_may_pull(skb, mpls_hdr_len))
+ break;
+
+ /* Read and decode the current label */
+@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
+ eli_seen = true;
+ }
+
+- bos = dec.bos;
+- if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+- sizeof(struct iphdr))) {
++ if (!dec.bos)
++ continue;
++
++ /* found bottom label; does skb have room for a header? */
++ if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
+ const struct iphdr *v4hdr;
+
+- v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+- label_index);
++ v4hdr = (const struct iphdr *)(hdr + 1);
+ if (v4hdr->version == 4) {
+ hash = jhash_3words(ntohl(v4hdr->saddr),
+ ntohl(v4hdr->daddr),
+ v4hdr->protocol, hash);
+ } else if (v4hdr->version == 6 &&
+- pskb_may_pull(skb, sizeof(*hdr) * label_index +
+- sizeof(struct ipv6hdr))) {
++ pskb_may_pull(skb, mpls_hdr_len +
++ sizeof(struct ipv6hdr))) {
+ const struct ipv6hdr *v6hdr;
+
+- v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+- label_index);
+-
++ v6hdr = (const struct ipv6hdr *)(hdr + 1);
+ hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+ hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+ hash = jhash_1word(v6hdr->nexthdr, hash);
+ }
+ }
++
++ break;
+ }
+
+ return hash;
+ }
+
+ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+- struct sk_buff *skb, bool bos)
++ struct sk_buff *skb)
+ {
+ int alive = ACCESS_ONCE(rt->rt_nhn_alive);
+ u32 hash = 0;
+@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ if (alive <= 0)
+ return NULL;
+
+- hash = mpls_multipath_hash(rt, skb, bos);
++ hash = mpls_multipath_hash(rt, skb);
+ nh_index = hash % alive;
+ if (alive == rt->rt_nhn)
+ goto out;
+@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+ hdr = mpls_hdr(skb);
+ dec = mpls_entry_decode(hdr);
+
+- /* Pop the label */
+- skb_pull(skb, sizeof(*hdr));
+- skb_reset_network_header(skb);
+-
+- skb_orphan(skb);
+-
+ rt = mpls_route_input_rcu(net, dec.label);
+ if (!rt)
+ goto drop;
+
+- nh = mpls_select_multipath(rt, skb, dec.bos);
++ nh = mpls_select_multipath(rt, skb);
+ if (!nh)
+ goto drop;
+
+@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+ if (!mpls_output_possible(out_dev))
+ goto drop;
+
++ /* Pop the label */
++ skb_pull(skb, sizeof(*hdr));
++ skb_reset_network_header(skb);
++
++ skb_orphan(skb);
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
+index cf52cf3..bc9aaf5 100644
+--- a/net/mpls/mpls_iptunnel.c
++++ b/net/mpls/mpls_iptunnel.c
+@@ -218,6 +218,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+ .fill_encap = mpls_fill_encap_info,
+ .get_encap_size = mpls_encap_nlsize,
+ .cmp_encap = mpls_encap_cmp,
++ .owner = THIS_MODULE,
+ };
+
+ static int __init mpls_iptunnel_init(void)
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index fecefa2..eab210b 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
+ int hooknum, nh_off, err = NF_ACCEPT;
+
+ nh_off = skb_network_offset(skb);
+- skb_pull(skb, nh_off);
++ skb_pull_rcsum(skb, nh_off);
+
+ /* See HOOK2MANIP(). */
+ if (maniptype == NF_NAT_MANIP_SRC)
+@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
+ err = nf_nat_packet(ct, ctinfo, hooknum, skb);
+ push:
+ skb_push(skb, nh_off);
++ skb_postpush_rcsum(skb, skb->data, nh_off);
+
+ return err;
+ }
+@@ -890,7 +891,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+
+ /* The conntrack module expects to be working at L3. */
+ nh_ofs = skb_network_offset(skb);
+- skb_pull(skb, nh_ofs);
++ skb_pull_rcsum(skb, nh_ofs);
+
+ if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
+ err = handle_fragments(net, key, info->zone.id, skb);
+@@ -904,6 +905,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+ err = ovs_ct_lookup(net, key, info, skb);
+
+ skb_push(skb, nh_ofs);
++ skb_postpush_rcsum(skb, skb->data, nh_ofs);
+ if (err)
+ kfree_skb(skb);
+ return err;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index dd23323..94e4a59 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1972,7 +1972,7 @@ static int __packet_rcv_vnet(const struct sk_buff *skb,
+ {
+ *vnet_hdr = (const struct virtio_net_hdr) { 0 };
+
+- if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
++ if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le(), true))
+ BUG();
+
+ return 0;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index f893d18..c6c2a93 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -903,8 +903,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ goto err;
+ }
+ act->order = i;
+- if (event == RTM_GETACTION)
+- act->tcfa_refcnt++;
+ list_add_tail(&act->list, &actions);
+ }
+
+@@ -917,7 +915,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ return ret;
+ }
+ err:
+- tcf_action_destroy(&actions, 0);
++ if (event != RTM_GETACTION)
++ tcf_action_destroy(&actions, 0);
+ return ret;
+ }
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2358f26..2d03d5b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ unsigned int hash;
+ struct unix_address *addr;
+ struct hlist_head *list;
++ struct path path = { NULL, NULL };
+
+ err = -EINVAL;
+ if (sunaddr->sun_family != AF_UNIX)
+@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
++ if (sun_path[0]) {
++ umode_t mode = S_IFSOCK |
++ (SOCK_INODE(sock)->i_mode & ~current_umask());
++ err = unix_mknod(sun_path, mode, &path);
++ if (err) {
++ if (err == -EEXIST)
++ err = -EADDRINUSE;
++ goto out;
++ }
++ }
++
+ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+- goto out;
++ goto out_put;
+
+ err = -EINVAL;
+ if (u->addr)
+@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ atomic_set(&addr->refcnt, 1);
+
+ if (sun_path[0]) {
+- struct path path;
+- umode_t mode = S_IFSOCK |
+- (SOCK_INODE(sock)->i_mode & ~current_umask());
+- err = unix_mknod(sun_path, mode, &path);
+- if (err) {
+- if (err == -EEXIST)
+- err = -EADDRINUSE;
+- unix_release_addr(addr);
+- goto out_up;
+- }
+ addr->hash = UNIX_HASH_SIZE;
+ hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ spin_lock(&unix_table_lock);
+@@ -1065,6 +1067,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ spin_unlock(&unix_table_lock);
+ out_up:
+ mutex_unlock(&u->bindlock);
++out_put:
++ if (err)
++ path_put(&path);
+ out:
+ return err;
+ }
diff --git a/4.9.9/1008_linux-4.9.9.patch b/4.9.9/1008_linux-4.9.9.patch
new file mode 100644
index 0000000..411ce9b
--- /dev/null
+++ b/4.9.9/1008_linux-4.9.9.patch
@@ -0,0 +1,2333 @@
+diff --git a/Makefile b/Makefile
+index 1130803..c0c41c9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
+index c53dbea..838dad5 100644
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
+ cbz w6, .Lcbcencloop
+
+ ld1 {v0.16b}, [x5] /* get iv */
+- enc_prepare w3, x2, x5
++ enc_prepare w3, x2, x6
+
+ .Lcbcencloop:
+ ld1 {v1.16b}, [x1], #16 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+- encrypt_block v0, w3, x2, x5, w6
++ encrypt_block v0, w3, x2, x6, w7
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcencloop
++ st1 {v0.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_encrypt)
+
+@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ cbz w6, .LcbcdecloopNx
+
+ ld1 {v7.16b}, [x5] /* get iv */
+- dec_prepare w3, x2, x5
++ dec_prepare w3, x2, x6
+
+ .LcbcdecloopNx:
+ #if INTERLEAVE >= 2
+@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ .Lcbcdecloop:
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
+ mov v0.16b, v1.16b /* ...and copy to v0 */
+- decrypt_block v0, w3, x2, x5, w6
++ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
+ mov v7.16b, v1.16b /* ct is next iv */
+ st1 {v0.16b}, [x0], #16
+@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ bne .Lcbcdecloop
+ .Lcbcdecout:
+ FRAME_POP
++ st1 {v7.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_decrypt)
+
+@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
+
+ AES_ENTRY(aes_ctr_encrypt)
+ FRAME_PUSH
+- cbnz w6, .Lctrfirst /* 1st time around? */
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
+-#if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
+- bcs .Lctrinc
+- add x5, x5, #1 /* increment BE ctr */
+- b .LctrincNx
+-#else
+- b .Lctrinc
+-#endif
+-.Lctrfirst:
++ cbz w6, .Lctrnotfirst /* 1st time around? */
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
++
++.Lctrnotfirst:
++ umov x8, v4.d[1] /* keep swabbed ctr in reg */
++ rev x8, x8
+ #if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
++ cmn w8, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
+ .LctrloopNx:
+ subs w4, w4, #INTERLEAVE
+@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
+ #if INTERLEAVE == 2
+ mov v0.8b, v4.8b
+ mov v1.8b, v4.8b
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v0.d[1], x7
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v1.d[1], x7
+ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
+ do_encrypt_block2x
+@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
+ st1 {v0.16b-v1.16b}, [x0], #32
+ #else
+ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
+- dup v7.4s, w5
++ dup v7.4s, w8
+ mov v0.16b, v4.16b
+ add v7.4s, v7.4s, v8.4s
+ mov v1.16b, v4.16b
+@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
+ eor v2.16b, v7.16b, v2.16b
+ eor v3.16b, v5.16b, v3.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+- add x5, x5, #INTERLEAVE
++ add x8, x8, #INTERLEAVE
+ #endif
+- cbz w4, .LctroutNx
+-.LctrincNx:
+- rev x7, x5
++ rev x7, x8
+ ins v4.d[1], x7
++ cbz w4, .Lctrout
+ b .LctrloopNx
+-.LctroutNx:
+- sub x5, x5, #1
+- rev x7, x5
+- ins v4.d[1], x7
+- b .Lctrout
+ .Lctr1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lctrout
+@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
+ .Lctrloop:
+ mov v0.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
++
++ adds x8, x8, #1 /* increment BE ctr */
++ rev x7, x8
++ ins v4.d[1], x7
++ bcs .Lctrcarry /* overflow? */
++
++.Lctrcarrydone:
+ subs w4, w4, #1
+ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
+ ld1 {v3.16b}, [x1], #16
+ eor v3.16b, v0.16b, v3.16b
+ st1 {v3.16b}, [x0], #16
+- beq .Lctrout
+-.Lctrinc:
+- adds x5, x5, #1 /* increment BE ctr */
+- rev x7, x5
+- ins v4.d[1], x7
+- bcc .Lctrloop /* no overflow? */
+- umov x7, v4.d[0] /* load upper word of ctr */
+- rev x7, x7 /* ... to handle the carry */
+- add x7, x7, #1
+- rev x7, x7
+- ins v4.d[0], x7
+- b .Lctrloop
++ bne .Lctrloop
++
++.Lctrout:
++ st1 {v4.16b}, [x5] /* return next CTR value */
++ FRAME_POP
++ ret
++
+ .Lctrhalfblock:
+ ld1 {v3.8b}, [x1]
+ eor v3.8b, v0.8b, v3.8b
+ st1 {v3.8b}, [x0]
+-.Lctrout:
+ FRAME_POP
+ ret
++
++.Lctrcarry:
++ umov x7, v4.d[0] /* load upper word of ctr */
++ rev x7, x7 /* ... to handle the carry */
++ add x7, x7, #1
++ rev x7, x7
++ ins v4.d[0], x7
++ b .Lctrcarrydone
+ AES_ENDPROC(aes_ctr_encrypt)
+ .ltorg
+
+diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
+index b312b15..6e834ca 100644
+--- a/arch/powerpc/include/asm/cpu_has_feature.h
++++ b/arch/powerpc/include/asm/cpu_has_feature.h
+@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
+ {
+ int i;
+
++#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
++#endif
+
+ #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
+index e311c25..a244e09 100644
+--- a/arch/powerpc/include/asm/mmu.h
++++ b/arch/powerpc/include/asm/mmu.h
+@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
+ {
+ int i;
+
++#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
++#endif
+
+ #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 5c31369..a5dd493 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
+ static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
+ {
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+- bool *clear_sw_state = flag;
++ bool clear_sw_state = *(bool *)flag;
+ int i, rc = 1;
+
+ for (i = 0; rc && i < 3; i++)
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 88ac964..1e8c572 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2747,6 +2747,9 @@ static void __init prom_find_boot_cpu(void)
+
+ cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+
++ if (!PHANDLE_VALID(cpu_pkg))
++ return;
++
+ prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
+ prom.cpu = be32_to_cpu(rval);
+
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
+index ebb7f46..9a25dce 100644
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+ if (!pmdp)
+ return -ENOMEM;
+ if (map_page_size == PMD_SIZE) {
+- ptep = (pte_t *)pudp;
++ ptep = pmdp_ptep(pmdp);
+ goto set_the_pte;
+ }
+ ptep = pte_alloc_kernel(pmdp, ea);
+@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+ }
+ pmdp = pmd_offset(pudp, ea);
+ if (map_page_size == PMD_SIZE) {
+- ptep = (pte_t *)pudp;
++ ptep = pmdp_ptep(pmdp);
+ goto set_the_pte;
+ }
+ if (!pmd_present(*pmdp)) {
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index dbaaf7dc..19d646a 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -763,30 +763,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
+ pmu->registered = false;
+ }
+
+-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
+-{
+- struct intel_uncore_pmu *pmu = type->pmus;
+- struct intel_uncore_box *box;
+- int i, pkg;
+-
+- if (pmu) {
+- pkg = topology_physical_package_id(cpu);
+- for (i = 0; i < type->num_boxes; i++, pmu++) {
+- box = pmu->boxes[pkg];
+- if (box)
+- uncore_box_exit(box);
+- }
+- }
+-}
+-
+-static void uncore_exit_boxes(void *dummy)
+-{
+- struct intel_uncore_type **types;
+-
+- for (types = uncore_msr_uncores; *types; types++)
+- __uncore_exit_boxes(*types++, smp_processor_id());
+-}
+-
+ static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
+ {
+ int pkg;
+@@ -1077,22 +1053,12 @@ static int uncore_cpu_dying(unsigned int cpu)
+ return 0;
+ }
+
+-static int first_init;
+-
+ static int uncore_cpu_starting(unsigned int cpu)
+ {
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+- int i, pkg, ncpus = 1;
+-
+- if (first_init) {
+- /*
+- * On init we get the number of online cpus in the package
+- * and set refcount for all of them.
+- */
+- ncpus = cpumask_weight(topology_core_cpumask(cpu));
+- }
++ int i, pkg;
+
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+@@ -1103,7 +1069,7 @@ static int uncore_cpu_starting(unsigned int cpu)
+ if (!box)
+ continue;
+ /* The first cpu on a package activates the box */
+- if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
++ if (atomic_inc_return(&box->refcnt) == 1)
+ uncore_box_init(box);
+ }
+ }
+@@ -1407,19 +1373,17 @@ static int __init intel_uncore_init(void)
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ }
+- first_init = 1;
++
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+ "AP_PERF_X86_UNCORE_STARTING",
+ uncore_cpu_starting, uncore_cpu_dying);
+- first_init = 0;
++
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "AP_PERF_X86_UNCORE_ONLINE",
+ uncore_event_cpu_online, uncore_event_cpu_offline);
+ return 0;
+
+ err:
+- /* Undo box->init_box() */
+- on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
+ uncore_types_exit(uncore_msr_uncores);
+ uncore_pci_exit();
+ return ret;
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 3d8ff40..7249f15 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2118,6 +2118,7 @@ static inline void __init check_timer(void)
+ if (idx != -1 && irq_trigger(idx))
+ unmask_ioapic_irq(irq_get_chip_data(0));
+ }
++ irq_domain_deactivate_irq(irq_data);
+ irq_domain_activate_irq(irq_data);
+ if (timer_irq_works()) {
+ if (disable_timer_pin_1 > 0)
+@@ -2139,6 +2140,7 @@ static inline void __init check_timer(void)
+ * legacy devices should be connected to IO APIC #0
+ */
+ replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
++ irq_domain_deactivate_irq(irq_data);
+ irq_domain_activate_irq(irq_data);
+ legacy_pic->unmask(0);
+ if (timer_irq_works()) {
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 274fab9..932348fb 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
+ } else {
+ struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
++ irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
+ irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
+ disable_irq(hdev->irq);
+ irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 487b957..731044e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3148,6 +3148,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV */
++ xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
+ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
+
+ /*
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 319148b..2f25a36 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ efi_scratch.use_pgd = true;
+
+ /*
++ * Certain firmware versions are way too sentimential and still believe
++ * they are exclusive and unquestionable owners of the first physical page,
++ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
++ * (but then write-access it later during SetVirtualAddressMap()).
++ *
++ * Create a 1:1 mapping for this page, to avoid triple faults during early
++ * boot with such firmware. We are free to hand this page to the BIOS,
++ * as trim_bios_range() will reserve the first page and isolate it away
++ * from memory allocators anyway.
++ */
++ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
++ pr_err("Failed to create 1:1 mapping for the first page!\n");
++ return 1;
++ }
++
++ /*
+ * When making calls to the firmware everything needs to be 1:1
+ * mapped and addressable with 32-bit pointers. Map the kernel
+ * text and allocate a new stack because we can't rely on the
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 88a044a..32cdc2c 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -540,7 +540,7 @@ subsys_initcall(topology_init);
+
+ void cpu_reset(void)
+ {
+-#if XCHAL_HAVE_PTP_MMU
++#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index df939b5..1fad2a6 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
+ struct crypto_larval *larval;
+ int err;
+
++ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
+ err = crypto_check_alg(alg);
+ if (err)
+ return err;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 223a770..33e363d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1695,6 +1695,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
+
+ if (qc->err_mask & ~AC_ERR_OTHER)
+ qc->err_mask &= ~AC_ERR_OTHER;
++ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
++ qc->result_tf.command |= ATA_SENSE;
+ }
+
+ /* finish up */
+@@ -4316,10 +4318,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+
+ /*
+- * Device times out with higher max sects.
++ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+- { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
++ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 823e938..2f32782 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
+ host->iomap = NULL;
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
++ if (!hpriv->base)
++ return -ENOMEM;
++
+ hpriv->base -= SATAHC0_REG_BASE;
+
+ hpriv->clk = clk_get(&pdev->dev, NULL);
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index e7f86a8..c5cdd19 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -391,33 +391,33 @@ static ssize_t show_valid_zones(struct device *dev,
+ {
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn, end_pfn;
++ unsigned long valid_start, valid_end, valid_pages;
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+- struct page *first_page;
+ struct zone *zone;
+ int zone_shift = 0;
+
+ start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ end_pfn = start_pfn + nr_pages;
+- first_page = pfn_to_page(start_pfn);
+
+ /* The block contains more than one zone can not be offlined. */
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return sprintf(buf, "none\n");
+
+- zone = page_zone(first_page);
++ zone = page_zone(pfn_to_page(valid_start));
++ valid_pages = valid_end - valid_start;
+
+ /* MMOP_ONLINE_KEEP */
+ sprintf(buf, "%s", zone->name);
+
+ /* MMOP_ONLINE_KERNEL */
+- zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+ }
+
+ /* MMOP_ONLINE_MOVABLE */
+- zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
+index f642c42..168fa17 100644
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
+ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+ /* driver_chipcommon_b.c */
+ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
+diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
+index b4f6520..62f5bfa 100644
+--- a/drivers/bcma/driver_chipcommon.c
++++ b/drivers/bcma/driver_chipcommon.c
+@@ -15,8 +15,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/bcma/bcma.h>
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+-
+ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
+ u32 mask, u32 value)
+ {
+@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_early_init(cc);
+
+- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+- bcma_chipco_serial_init(cc);
+-
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_core_chipcommon_flash_detect(cc);
+
+@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+ return res;
+ }
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+ {
+-#if IS_BUILTIN(CONFIG_BCM47XX)
+ unsigned int irq;
+ u32 baud_base;
+ u32 i;
+@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+ ports[i].baud_base = baud_base;
+ ports[i].reg_shift = 0;
+ }
+-#endif /* CONFIG_BCM47XX */
+ }
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index 96f1713..89af807 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
+
+ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+ {
++ struct bcma_bus *bus = mcore->core->bus;
++
+ if (mcore->early_setup_done)
+ return;
+
++ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_nvram_init(mcore);
+
+ mcore->early_setup_done = true;
+diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
+index d5ba43a..55c1782 100644
+--- a/drivers/dma/cppi41.c
++++ b/drivers/dma/cppi41.c
+@@ -153,6 +153,8 @@ struct cppi41_dd {
+
+ /* context for suspend/resume */
+ unsigned int dma_tdfdq;
++
++ bool is_suspended;
+ };
+
+ #define FIST_COMPLETION_QUEUE 93
+@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
+ BUG_ON(desc_num >= ALLOC_DECS_NUM);
+ c = cdd->chan_busy[desc_num];
+ cdd->chan_busy[desc_num] = NULL;
++
++ /* Usecount for chan_busy[], paired with push_desc_queue() */
++ pm_runtime_put(cdd->ddev.dev);
++
+ return c;
+ }
+
+@@ -447,6 +453,15 @@ static void push_desc_queue(struct cppi41_channel *c)
+ */
+ __iowmb();
+
++ /*
++ * DMA transfers can take at least 200ms to complete with USB mass
++ * storage connected. To prevent autosuspend timeouts, we must use
++ * pm_runtime_get/put() when chan_busy[] is modified. This will get
++ * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
++ * outcome of the transfer.
++ */
++ pm_runtime_get(cdd->ddev.dev);
++
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+@@ -457,20 +472,26 @@ static void push_desc_queue(struct cppi41_channel *c)
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ }
+
+-static void pending_desc(struct cppi41_channel *c)
++/*
++ * Caller must hold cdd->lock to prevent push_desc_queue()
++ * getting called out of order. We have both cppi41_dma_issue_pending()
++ * and cppi41_runtime_resume() call this function.
++ */
++static void cppi41_run_queue(struct cppi41_dd *cdd)
+ {
+- struct cppi41_dd *cdd = c->cdd;
+- unsigned long flags;
++ struct cppi41_channel *c, *_c;
+
+- spin_lock_irqsave(&cdd->lock, flags);
+- list_add_tail(&c->node, &cdd->pending);
+- spin_unlock_irqrestore(&cdd->lock, flags);
++ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
++ push_desc_queue(c);
++ list_del(&c->node);
++ }
+ }
+
+ static void cppi41_dma_issue_pending(struct dma_chan *chan)
+ {
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
++ unsigned long flags;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+@@ -482,10 +503,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
+ return;
+ }
+
+- if (likely(pm_runtime_active(cdd->ddev.dev)))
+- push_desc_queue(c);
+- else
+- pending_desc(c);
++ spin_lock_irqsave(&cdd->lock, flags);
++ list_add_tail(&c->node, &cdd->pending);
++ if (!cdd->is_suspended)
++ cppi41_run_queue(cdd);
++ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+@@ -705,6 +727,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
+ WARN_ON(!cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = NULL;
+
++ /* Usecount for chan_busy[], paired with push_desc_queue() */
++ pm_runtime_put(cdd->ddev.dev);
++
+ return 0;
+ }
+
+@@ -1150,8 +1175,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
+ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+ {
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
++ unsigned long flags;
+
++ spin_lock_irqsave(&cdd->lock, flags);
++ cdd->is_suspended = true;
+ WARN_ON(!list_empty(&cdd->pending));
++ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+ }
+@@ -1159,14 +1188,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+ static int __maybe_unused cppi41_runtime_resume(struct device *dev)
+ {
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+- struct cppi41_channel *c, *_c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+- list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+- push_desc_queue(c);
+- list_del(&c->node);
+- }
++ cdd->is_suspended = false;
++ cppi41_run_queue(cdd);
+ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 921dfa0..260c4b4 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+ struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
++ void *new_fdt_addr;
+ };
+
+ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+- return EFI_SUCCESS;
++ return update_fdt_memmap(p->new_fdt_addr, map);
+ }
+
+ /*
+@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
++ priv.new_fdt_addr = (void *)*new_fdt_addr;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+
+- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+- if (status != EFI_SUCCESS) {
+- /*
+- * The kernel won't get far without the memory map, but
+- * may still be able to print something meaningful so
+- * return success here.
+- */
+- return EFI_SUCCESS;
+- }
+-
+ /* Install the new virtual address map */
+ svam = sys_table->runtime->set_virtual_address_map;
+ status = svam(runtime_entry_count * desc_size, desc_size,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index b13c8aa..6df924f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -227,6 +227,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
++ if (adev->mode_info.num_crtc)
++ amdgpu_display_set_vga_render_state(adev, false);
++
+ gmc_v6_0_mc_stop(adev, &save);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+@@ -256,7 +259,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v6_0_mc_resume(adev, &save);
+- amdgpu_display_set_vga_render_state(adev, false);
+ }
+
+ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 67db157..4147e51 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -2152,30 +2152,42 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+
+ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+ {
+- struct i915_gem_context *ctx = dev_priv->kernel_context;
+ struct intel_engine_cs *engine;
++ struct i915_gem_context *ctx;
++
++ /* Because we emit WA_TAIL_DWORDS there may be a disparity
++ * between our bookkeeping in ce->ring->head and ce->ring->tail and
++ * that stored in context. As we only write new commands from
++ * ce->ring->tail onwards, everything before that is junk. If the GPU
++ * starts reading from its RING_HEAD from the context, it may try to
++ * execute that junk and die.
++ *
++ * So to avoid that we reset the context images upon resume. For
++ * simplicity, we just zero everything out.
++ */
++ list_for_each_entry(ctx, &dev_priv->context_list, link) {
++ for_each_engine(engine, dev_priv) {
++ struct intel_context *ce = &ctx->engine[engine->id];
++ u32 *reg;
+
+- for_each_engine(engine, dev_priv) {
+- struct intel_context *ce = &ctx->engine[engine->id];
+- void *vaddr;
+- uint32_t *reg_state;
+-
+- if (!ce->state)
+- continue;
+-
+- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+- if (WARN_ON(IS_ERR(vaddr)))
+- continue;
++ if (!ce->state)
++ continue;
+
+- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
++ reg = i915_gem_object_pin_map(ce->state->obj,
++ I915_MAP_WB);
++ if (WARN_ON(IS_ERR(reg)))
++ continue;
+
+- reg_state[CTX_RING_HEAD+1] = 0;
+- reg_state[CTX_RING_TAIL+1] = 0;
++ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
++ reg[CTX_RING_HEAD+1] = 0;
++ reg[CTX_RING_TAIL+1] = 0;
+
+- ce->state->obj->dirty = true;
+- i915_gem_object_unpin_map(ce->state->obj);
++ ce->state->obj->dirty = true;
++ i915_gem_object_unpin_map(ce->state->obj);
+
+- ce->ring->head = 0;
+- ce->ring->tail = 0;
++ ce->ring->head = ce->ring->tail = 0;
++ ce->ring->last_retired_head = -1;
++ intel_ring_update_space(ce->ring);
++ }
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+index 74856a8..e64f524 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ uint32_t mpllP;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++ mpllP = (mpllP >> 8) & 0xf;
+ if (!mpllP)
+ mpllP = 4;
+
+@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ uint32_t clock;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+- return clock;
++ return clock / 1000;
+ }
+
+ ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+index 6f0436d..f8f2f16 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
+ );
+ }
+ for (i = 0; i < size; i++)
+- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
++ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
+ for (; i < 0x60; i++)
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+ nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 60d3020..e06c134 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -167,7 +167,7 @@ struct cp2112_device {
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
+ u8 *in_out_buffer;
+- spinlock_t lock;
++ struct mutex lock;
+ };
+
+ static int gpio_push_pull = 0xFF;
+@@ -179,10 +179,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -206,8 +205,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+ ret = 0;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
+- return ret <= 0 ? ret : -EIO;
++ mutex_unlock(&dev->lock);
++ return ret < 0 ? ret : -EIO;
+ }
+
+ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+@@ -215,10 +214,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ buf[0] = CP2112_GPIO_SET;
+ buf[1] = value ? 0xff : 0;
+@@ -230,7 +228,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ if (ret < 0)
+ hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ }
+
+ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+@@ -238,10 +236,9 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+@@ -255,7 +252,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+ ret = (buf[1] >> offset) & 1;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ return ret;
+ }
+@@ -266,10 +263,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -290,7 +286,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ goto fail;
+ }
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ /*
+ * Set gpio value when output direction is already set,
+@@ -301,7 +297,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ return 0;
+
+ fail:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ return ret < 0 ? ret : -EIO;
+ }
+
+@@ -1057,7 +1053,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+- spin_lock_init(&dev->lock);
++ mutex_init(&dev->lock);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 575aa65..9845189 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -76,6 +76,9 @@
+ #define USB_VENDOR_ID_ALPS_JP 0x044E
+ #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
+
++#define USB_VENDOR_ID_AMI 0x046b
++#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
++
+ #define USB_VENDOR_ID_ANTON 0x1130
+ #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
+
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index c5c5fbe..52026dc 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
+ .driver_data = LG_NOGET | LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
+- .driver_data = LG_FF2 },
++ .driver_data = LG_NOGET | LG_FF2 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
+ .driver_data = LG_FF3 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e6cfd32..cde060f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -57,6 +57,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 1cb7992..623be90 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -164,19 +164,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ }
+
+- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+- if (features->pressure_max > 255)
+- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+- pressure += (features->pressure_max + 1) / 2;
+-
+- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+- input_report_abs(input, ABS_PRESSURE, pressure);
+-
+- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+- /* Only allow the stylus2 button to be reported for the pen tool. */
+- input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
++ if (prox) {
++ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
++ if (features->pressure_max > 255)
++ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
++ pressure += (features->pressure_max + 1) / 2;
++
++ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
++ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
++ input_report_abs(input, ABS_PRESSURE, pressure);
++
++ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
++ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
++ /* Only allow the stylus2 button to be reported for the pen tool. */
++ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
++ }
+
+ if (!prox)
+ wacom->id[0] = 0;
+diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
+index 2bbf0c5..7d61b56 100644
+--- a/drivers/iio/adc/palmas_gpadc.c
++++ b/drivers/iio/adc/palmas_gpadc.c
+@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
+
+ static int palmas_gpadc_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(indio_dev);
+ int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
+ int ret;
+@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
+
+ static int palmas_gpadc_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(indio_dev);
+ int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
+ int ret;
+diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
+index 9a08146..6bb23a4 100644
+--- a/drivers/iio/health/afe4403.c
++++ b/drivers/iio/health/afe4403.c
+@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
+
+ static int __maybe_unused afe4403_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
+
+ static int __maybe_unused afe4403_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
+index 4526640..964f523 100644
+--- a/drivers/iio/health/afe4404.c
++++ b/drivers/iio/health/afe4404.c
+@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
+
+ static int __maybe_unused afe4404_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
+
+ static int __maybe_unused afe4404_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
+index 90ab8a2d..183c143 100644
+--- a/drivers/iio/health/max30100.c
++++ b/drivers/iio/health/max30100.c
+@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
+
+ mutex_lock(&data->lock);
+
+- while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
++ while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
+ ret = max30100_read_measurement(data);
+ if (ret)
+ break;
+diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
+index 9c47bc9..2a22ad9 100644
+--- a/drivers/iio/humidity/dht11.c
++++ b/drivers/iio/humidity/dht11.c
+@@ -71,7 +71,8 @@
+ * a) select an implementation using busy loop polling on those systems
+ * b) use the checksum to do some probabilistic decoding
+ */
+-#define DHT11_START_TRANSMISSION 18 /* ms */
++#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
++#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
+ #define DHT11_MIN_TIMERES 34000 /* ns */
+ #define DHT11_THRESHOLD 49000 /* ns */
+ #define DHT11_AMBIG_LOW 23000 /* ns */
+@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
+ ret = gpio_direction_output(dht11->gpio, 0);
+ if (ret)
+ goto err;
+- msleep(DHT11_START_TRANSMISSION);
++ usleep_range(DHT11_START_TRANSMISSION_MIN,
++ DHT11_START_TRANSMISSION_MAX);
+ ret = gpio_direction_input(dht11->gpio);
+ if (ret)
+ goto err;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index bb0fde6..cc2243f 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+- FW_RI_RES_WR_FBMAX_V(2) |
++ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
++ FW_RI_RES_WR_FBMAX_V(3)) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+- FW_RI_RES_WR_FBMAX_V(2) |
++ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index e1e274a..ba637ff 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2719,7 +2719,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ if (intmask & SDHCI_INT_RETUNE)
+ mmc_retune_needed(host->mmc);
+
+- if (intmask & SDHCI_INT_CARD_INT) {
++ if ((intmask & SDHCI_INT_CARD_INT) &&
++ (host->ier & SDHCI_INT_CARD_INT)) {
+ sdhci_enable_sdio_irq_nolock(host, false);
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+index d02ca14..8d3e53f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+@@ -91,7 +91,7 @@
+
+ #define IWL8000_FW_PRE "iwlwifi-8000C-"
+ #define IWL8000_MODULE_FIRMWARE(api) \
+- IWL8000_FW_PRE "-" __stringify(api) ".ucode"
++ IWL8000_FW_PRE __stringify(api) ".ucode"
+
+ #define IWL8265_FW_PRE "iwlwifi-8265-"
+ #define IWL8265_MODULE_FIRMWARE(api) \
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index fc77188..52de3c6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -1144,9 +1144,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+- /* Make sure reserved queue is still marked as such (or allocated) */
+- mvm->queue_info[mvm_sta->reserved_queue].status =
+- IWL_MVM_QUEUE_RESERVED;
++ /* Make sure reserved queue is still marked as such (if allocated) */
++ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
++ mvm->queue_info[mvm_sta->reserved_queue].status =
++ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 0ec649d..b0916b1 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -518,25 +518,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
++
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
++
++ /*
++ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
++ * hierarchies.
++ */
++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
++ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
++ link->root = link;
++ } else {
+ struct pcie_link_state *parent;
++
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
++
+ link->parent = parent;
++ link->root = link->parent->root;
+ list_add(&link->link, &parent->children);
+ }
+- /* Setup a pointer to the root port link */
+- if (!link->parent)
+- link->root = link;
+- else
+- link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+ pdev->link_state = link;
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 0790153..583ae3f 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+ int reg)
+ {
+ struct byt_community *comm = byt_get_community(vg, offset);
+- u32 reg_offset = 0;
++ u32 reg_offset;
+
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
+- if (reg == BYT_INT_STAT_REG)
++ switch (reg) {
++ case BYT_INT_STAT_REG:
+ reg_offset = (offset / 32) * 4;
+- else
++ break;
++ case BYT_DEBOUNCE_REG:
++ reg_offset = 0;
++ break;
++ default:
+ reg_offset = comm->pad_map[offset] * 16;
++ break;
++ }
+
+ return comm->reg_base + reg_offset + reg;
+ }
+@@ -1612,7 +1619,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
+ continue;
+ }
+
++ raw_spin_lock(&vg->lock);
+ pending = readl(reg);
++ raw_spin_unlock(&vg->lock);
+ for_each_set_bit(pin, &pending, 32) {
+ virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ generic_handle_irq(virq);
+diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
+index 7826c7f..9931be6 100644
+--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
+@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned int i;
+ int ret;
+
++ if (!mrfld_buf_available(mp, pin))
++ return -ENOTSUPP;
++
+ for (i = 0; i < nconfigs; i++) {
+ switch (pinconf_to_config_param(configs[i])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index e6a512e..a3ade9e 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
+ 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
+- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
++ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
+ AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
+diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
+index 113f3d6..27f75b1 100644
+--- a/drivers/staging/greybus/timesync_platform.c
++++ b/drivers/staging/greybus/timesync_platform.c
+@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
+
+ int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
+ {
++ if (!arche_platform_change_state_cb)
++ return 0;
++
+ return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
+ pdata);
+ }
+
+ void gb_timesync_platform_unlock_bus(void)
+ {
++ if (!arche_platform_change_state_cb)
++ return;
++
+ arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
+ }
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index d2e50a2..24f9f98 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* CBM - Flash disk */
+ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* WORLDE easy key (easykey.25) MIDI controller */
++ { USB_DEVICE(0x0218, 0x0401), .driver_info =
++ USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ /* HP 5300/5370C scanner */
+ { USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 17989b7..8d412d8 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
+ return -EINVAL;
+ length = le32_to_cpu(d->dwSize);
++ if (len < length)
++ return -EINVAL;
+ type = le32_to_cpu(d->dwPropertyDataType);
+ if (type < USB_EXT_PROP_UNICODE ||
+ type > USB_EXT_PROP_UNICODE_MULTI) {
+@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ return -EINVAL;
+ }
+ pnl = le16_to_cpu(d->wPropertyNameLength);
++ if (length < 14 + pnl) {
++ pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
++ length, pnl, type);
++ return -EINVAL;
++ }
+ pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
+ if (length != 14 + pnl + pdl) {
+ pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
+@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
+ }
+ }
+ if (flags & (1 << i)) {
++ if (len < 4) {
++ goto error;
++ }
+ os_descs_count = get_unaligned_le32(data);
+ data += 4;
+ len -= 4;
+@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+
+ ENTER();
+
+- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
++ if (unlikely(len < 16 ||
++ get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ str_count = get_unaligned_le32(data + 8);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index c3e172e..338575f 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -578,11 +578,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+- musb->need_finish_resume = 1;
+-
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ musb_host_resume_root_hub(musb);
++ schedule_delayed_work(&musb->finish_resume_work,
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+@@ -2691,11 +2691,6 @@ static int musb_resume(struct device *dev)
+ mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
+ if ((devctl & mask) != (musb->context.devctl & mask))
+ musb->port1_status = 0;
+- if (musb->need_finish_resume) {
+- musb->need_finish_resume = 0;
+- schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(USB_RESUME_TIMEOUT));
+- }
+
+ /*
+ * The USB HUB code expects the device to be in RPM_ACTIVE once it came
+@@ -2747,12 +2742,6 @@ static int musb_runtime_resume(struct device *dev)
+
+ musb_restore_context(musb);
+
+- if (musb->need_finish_resume) {
+- musb->need_finish_resume = 0;
+- schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(USB_RESUME_TIMEOUT));
+- }
+-
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index 47331db..854fbf7 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -410,7 +410,6 @@ struct musb {
+
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+- unsigned need_finish_resume :1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7ce31a4..42cc72e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 46fca6b..1db4b61 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+ { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index e3b7af8..09d9be8 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID 0x0557
+ #define ATEN_VENDOR_ID2 0x0547
+ #define ATEN_PRODUCT_ID 0x2008
++#define ATEN_PRODUCT_ID2 0x2118
+
+ #define IODATA_VENDOR_ID 0x04bb
+ #define IODATA_PRODUCT_ID 0x0a03
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 1bc6089..696458d 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
+ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
++ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
+ {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
+ {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
+ {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c6f2d89..64613fb 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+
+ static void vhost_init_is_le(struct vhost_virtqueue *vq)
+ {
+- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
+- vq->is_le = true;
++ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
++ || virtio_legacy_is_little_endian();
+ }
+ #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
+ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+ {
+- vq->is_le = virtio_legacy_is_little_endian();
++ vhost_init_is_le(vq);
+ }
+
+ struct vhost_flush_struct {
+@@ -1713,10 +1713,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
+ int r;
+ bool is_le = vq->is_le;
+
+- if (!vq->private_data) {
+- vhost_reset_is_le(vq);
++ if (!vq->private_data)
+ return 0;
+- }
+
+ vhost_init_is_le(vq);
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index f136048..489bfc6 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
+ if (xen_domain())
+ return true;
+
+- /*
+- * On ARM-based machines, the DMA ops will do the right thing,
+- * so always use them with legacy devices.
+- */
+- if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+- return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+-
+ return false;
+ }
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 8f6a2a5..a27fc87 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
+ rc = -ENOMEM;
+ goto error_exit;
+ }
++ spin_lock_init(&cifsFile->file_info_lock);
+ file->private_data = cifsFile;
+ cifsFile->tlink = cifs_get_tlink(tlink);
+ tcon = tlink_tcon(tlink);
+diff --git a/fs/dax.c b/fs/dax.c
+index 014defd..bf6218d 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1270,6 +1270,11 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct blk_dax_ctl dax = { 0 };
+ ssize_t map_len;
+
++ if (fatal_signal_pending(current)) {
++ ret = -EINTR;
++ break;
++ }
++
+ dax.sector = iomap->blkno +
+ (((pos & PAGE_MASK) - iomap->offset) >> 9);
+ dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 478630a..bbc316d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3827,6 +3827,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ EXT4_DESC_PER_BLOCK(sb);
++ if (ext4_has_feature_meta_bg(sb)) {
++ if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
++ ext4_msg(sb, KERN_WARNING,
++ "first meta block group too large: %u "
++ "(group descriptor block count %u)",
++ le32_to_cpu(es->s_first_meta_bg), db_count);
++ goto failed_mount;
++ }
++ }
+ sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
+diff --git a/fs/iomap.c b/fs/iomap.c
+index a8ee8c3..814ae8f 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -113,6 +113,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
+
+ BUG_ON(pos + len > iomap->offset + iomap->length);
+
++ if (fatal_signal_pending(current))
++ return -EINTR;
++
+ page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index 42aace4..6481369 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stp;
+
+- stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
++ stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
++ nfsd4_free_layout_stateid);
+ if (!stp)
+ return NULL;
+- stp->sc_free = nfsd4_free_layout_stateid;
++
+ get_nfs4_file(fp);
+ stp->sc_file = fp;
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4b4beaa..a0dee8a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -633,8 +633,8 @@ find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
+ return co;
+ }
+
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+- struct kmem_cache *slab)
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++ void (*sc_free)(struct nfs4_stid *))
+ {
+ struct nfs4_stid *stid;
+ int new_id;
+@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ idr_preload_end();
+ if (new_id < 0)
+ goto out_free;
++
++ stid->sc_free = sc_free;
+ stid->sc_client = cl;
+ stid->sc_stateid.si_opaque.so_id = new_id;
+ stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
+@@ -675,15 +677,12 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
+ {
+ struct nfs4_stid *stid;
+- struct nfs4_ol_stateid *stp;
+
+- stid = nfs4_alloc_stid(clp, stateid_slab);
++ stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
+ if (!stid)
+ return NULL;
+
+- stp = openlockstateid(stid);
+- stp->st_stid.sc_free = nfs4_free_ol_stateid;
+- return stp;
++ return openlockstateid(stid);
+ }
+
+ static void nfs4_free_deleg(struct nfs4_stid *stid)
+@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
+ goto out_dec;
+ if (delegation_blocked(&current_fh->fh_handle))
+ goto out_dec;
+- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
++ dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
+ if (dp == NULL)
+ goto out_dec;
+
+- dp->dl_stid.sc_free = nfs4_free_deleg;
+ /*
+ * delegation seqid's are never incremented. The 4.1 special
+ * meaning of seqid 0 isn't meaningful, really, but let's avoid
+@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
+ get_nfs4_file(fp);
+ stp->st_stid.sc_file = fp;
+- stp->st_stid.sc_free = nfs4_free_lock_stateid;
+ stp->st_access_bmap = 0;
+ stp->st_deny_bmap = open_stp->st_deny_bmap;
+ stp->st_openstp = open_stp;
+@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+ lst = find_lock_stateid(lo, fi);
+ if (lst == NULL) {
+ spin_unlock(&clp->cl_lock);
+- ns = nfs4_alloc_stid(clp, stateid_slab);
++ ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+ if (ns == NULL)
+ return NULL;
+
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index c939936..4516e8b 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
+ __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ stateid_t *stateid, unsigned char typemask,
+ struct nfs4_stid **s, struct nfsd_net *nn);
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+- struct kmem_cache *slab);
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++ void (*sc_free)(struct nfs4_stid *));
+ void nfs4_unhash_stid(struct nfs4_stid *s);
+ void nfs4_put_stid(struct nfs4_stid *s);
+ void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index e798755..39e3254 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -184,6 +184,7 @@ struct irq_data {
+ *
+ * IRQD_TRIGGER_MASK - Mask for the trigger type bits
+ * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
++ * IRQD_ACTIVATED - Interrupt has already been activated
+ * IRQD_NO_BALANCING - Balancing disabled for this IRQ
+ * IRQD_PER_CPU - Interrupt is per cpu
+ * IRQD_AFFINITY_SET - Interrupt affinity was set
+@@ -202,6 +203,7 @@ struct irq_data {
+ enum {
+ IRQD_TRIGGER_MASK = 0xf,
+ IRQD_SETAFFINITY_PENDING = (1 << 8),
++ IRQD_ACTIVATED = (1 << 9),
+ IRQD_NO_BALANCING = (1 << 10),
+ IRQD_PER_CPU = (1 << 11),
+ IRQD_AFFINITY_SET = (1 << 12),
+@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+ }
+
++static inline bool irqd_is_activated(struct irq_data *d)
++{
++ return __irqd_to_state(d) & IRQD_ACTIVATED;
++}
++
++static inline void irqd_set_activated(struct irq_data *d)
++{
++ __irqd_to_state(d) |= IRQD_ACTIVATED;
++}
++
++static inline void irqd_clr_activated(struct irq_data *d)
++{
++ __irqd_to_state(d) &= ~IRQD_ACTIVATED;
++}
++
+ #undef __irqd_to_state
+
+ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
+index c1784c0..134a2f6 100644
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
+ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+ /* VM interface that may be used by firmware interface */
+ extern int online_pages(unsigned long, unsigned long, int);
+-extern int test_pages_in_a_zone(unsigned long, unsigned long);
++extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end);
+ extern void __offline_isolated_pages(unsigned long, unsigned long);
+
+ typedef void (*online_page_callback_t)(struct page *page);
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
+index 1c7eec0..3a481a4 100644
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret;
++ bool ret;
+
+ rcu_read_lock_sched();
+
+@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret = false;
++ bool ret = false;
+
+ rcu_read_lock_sched();
+
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 85bc9be..4e2f3de 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5219,6 +5219,11 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ return ERR_PTR(err);
+ }
+
++/*
++ * The returned cgroup is fully initialized including its control mask, but
++ * it isn't associated with its kernfs_node and doesn't have the control
++ * mask applied.
++ */
+ static struct cgroup *cgroup_create(struct cgroup *parent)
+ {
+ struct cgroup_root *root = parent->root;
+@@ -5283,11 +5288,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+
+ cgroup_propagate_control(cgrp);
+
+- /* @cgrp doesn't have dir yet so the following will only create csses */
+- ret = cgroup_apply_control_enable(cgrp);
+- if (ret)
+- goto out_destroy;
+-
+ return cgrp;
+
+ out_cancel_ref:
+@@ -5295,9 +5295,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+ out_free_cgrp:
+ kfree(cgrp);
+ return ERR_PTR(ret);
+-out_destroy:
+- cgroup_destroy_locked(cgrp);
+- return ERR_PTR(ret);
+ }
+
+ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e5a8839..b1cfd74 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
+ static void
+ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ {
+-
+ lockdep_assert_held(&ctx->lock);
+
+ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
+ {
+ struct perf_event *group_leader = event->group_leader, *pos;
+
++ lockdep_assert_held(&event->ctx->lock);
++
+ /*
+ * We can have double attach due to group movement in perf_event_open.
+ */
+@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
+ struct perf_event *sibling, *tmp;
+ struct list_head *list = NULL;
+
++ lockdep_assert_held(&event->ctx->lock);
++
+ /*
+ * We can have double detach due to exit/hot-unplug + close.
+ */
+@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
+ */
+ static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
+ {
+- lockdep_assert_held(&event->ctx->mutex);
++ struct perf_event_context *ctx = event->ctx;
++
++ lockdep_assert_held(&ctx->mutex);
+
+ event_function_call(event, __perf_remove_from_context, (void *)flags);
++
++ /*
++ * The above event_function_call() can NO-OP when it hits
++ * TASK_TOMBSTONE. In that case we must already have been detached
++ * from the context (by perf_event_exit_event()) but the grouping
++ * might still be in-tact.
++ */
++ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
++ if ((flags & DETACH_GROUP) &&
++ (event->attach_state & PERF_ATTACH_GROUP)) {
++ /*
++ * Since in that case we cannot possibly be scheduled, simply
++ * detach now.
++ */
++ raw_spin_lock_irq(&ctx->lock);
++ perf_group_detach(event);
++ raw_spin_unlock_irq(&ctx->lock);
++ }
+ }
+
+ /*
+@@ -6583,6 +6606,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ char *buf = NULL;
+ char *name;
+
++ if (vma->vm_flags & VM_READ)
++ prot |= PROT_READ;
++ if (vma->vm_flags & VM_WRITE)
++ prot |= PROT_WRITE;
++ if (vma->vm_flags & VM_EXEC)
++ prot |= PROT_EXEC;
++
++ if (vma->vm_flags & VM_MAYSHARE)
++ flags = MAP_SHARED;
++ else
++ flags = MAP_PRIVATE;
++
++ if (vma->vm_flags & VM_DENYWRITE)
++ flags |= MAP_DENYWRITE;
++ if (vma->vm_flags & VM_MAYEXEC)
++ flags |= MAP_EXECUTABLE;
++ if (vma->vm_flags & VM_LOCKED)
++ flags |= MAP_LOCKED;
++ if (vma->vm_flags & VM_HUGETLB)
++ flags |= MAP_HUGETLB;
++
+ if (file) {
+ struct inode *inode;
+ dev_t dev;
+@@ -6609,27 +6653,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ maj = MAJOR(dev);
+ min = MINOR(dev);
+
+- if (vma->vm_flags & VM_READ)
+- prot |= PROT_READ;
+- if (vma->vm_flags & VM_WRITE)
+- prot |= PROT_WRITE;
+- if (vma->vm_flags & VM_EXEC)
+- prot |= PROT_EXEC;
+-
+- if (vma->vm_flags & VM_MAYSHARE)
+- flags = MAP_SHARED;
+- else
+- flags = MAP_PRIVATE;
+-
+- if (vma->vm_flags & VM_DENYWRITE)
+- flags |= MAP_DENYWRITE;
+- if (vma->vm_flags & VM_MAYEXEC)
+- flags |= MAP_EXECUTABLE;
+- if (vma->vm_flags & VM_LOCKED)
+- flags |= MAP_LOCKED;
+- if (vma->vm_flags & VM_HUGETLB)
+- flags |= MAP_HUGETLB;
+-
+ goto got_name;
+ } else {
+ if (vma->vm_ops && vma->vm_ops->name) {
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 8c0a0ae..b59e676 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
+
++static void __irq_domain_activate_irq(struct irq_data *irq_data)
++{
++ if (irq_data && irq_data->domain) {
++ struct irq_domain *domain = irq_data->domain;
++
++ if (irq_data->parent_data)
++ __irq_domain_activate_irq(irq_data->parent_data);
++ if (domain->ops->activate)
++ domain->ops->activate(domain, irq_data);
++ }
++}
++
++static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
++{
++ if (irq_data && irq_data->domain) {
++ struct irq_domain *domain = irq_data->domain;
++
++ if (domain->ops->deactivate)
++ domain->ops->deactivate(domain, irq_data);
++ if (irq_data->parent_data)
++ __irq_domain_deactivate_irq(irq_data->parent_data);
++ }
++}
++
+ /**
+ * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
+ * interrupt
+@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
+ */
+ void irq_domain_activate_irq(struct irq_data *irq_data)
+ {
+- if (irq_data && irq_data->domain) {
+- struct irq_domain *domain = irq_data->domain;
+-
+- if (irq_data->parent_data)
+- irq_domain_activate_irq(irq_data->parent_data);
+- if (domain->ops->activate)
+- domain->ops->activate(domain, irq_data);
++ if (!irqd_is_activated(irq_data)) {
++ __irq_domain_activate_irq(irq_data);
++ irqd_set_activated(irq_data);
+ }
+ }
+
+@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
+ */
+ void irq_domain_deactivate_irq(struct irq_data *irq_data)
+ {
+- if (irq_data && irq_data->domain) {
+- struct irq_domain *domain = irq_data->domain;
+-
+- if (domain->ops->deactivate)
+- domain->ops->deactivate(domain, irq_data);
+- if (irq_data->parent_data)
+- irq_domain_deactivate_irq(irq_data->parent_data);
++ if (irqd_is_activated(irq_data)) {
++ __irq_domain_deactivate_irq(irq_data);
++ irqd_clr_activated(irq_data);
+ }
+ }
+
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index b97286c..f00b013 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -266,7 +266,7 @@ static int get_sample(void)
+ static struct cpumask save_cpumask;
+ static bool disable_migrate;
+
+-static void move_to_next_cpu(void)
++static void move_to_next_cpu(bool initmask)
+ {
+ static struct cpumask *current_mask;
+ int next_cpu;
+@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
+ return;
+
+ /* Just pick the first CPU on first iteration */
+- if (!current_mask) {
++ if (initmask) {
+ current_mask = &save_cpumask;
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
+ static int kthread_fn(void *data)
+ {
+ u64 interval;
++ bool initmask = true;
+
+ while (!kthread_should_stop()) {
+
+- move_to_next_cpu();
++ move_to_next_cpu(initmask);
++ initmask = false;
+
+ local_irq_disable();
+ get_sample();
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 7798010..d8d7df8 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1703,6 +1703,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+
+ cond_resched();
+ find_page:
++ if (fatal_signal_pending(current)) {
++ error = -EINTR;
++ goto out;
++ }
++
+ page = find_get_page(mapping, index);
+ if (!page) {
+ page_cache_sync_readahead(mapping,
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index c3a8141..ede13734 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+ }
+
+ /*
+- * Confirm all pages in a range [start, end) is belongs to the same zone.
++ * Confirm all pages in a range [start, end) belong to the same zone.
++ * When true, return its valid [start, end).
+ */
+-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
++int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end)
+ {
+ unsigned long pfn, sec_end_pfn;
++ unsigned long start, end;
+ struct zone *zone = NULL;
+ struct page *page;
+ int i;
+- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+ pfn < end_pfn;
+- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
+ /* Make sure the memory section is present first */
+ if (!present_section_nr(pfn_to_section_nr(pfn)))
+ continue;
+@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ page = pfn_to_page(pfn + i);
+ if (zone && page_zone(page) != zone)
+ return 0;
++ if (!zone)
++ start = pfn + i;
+ zone = page_zone(page);
++ end = pfn + MAX_ORDER_NR_PAGES;
+ }
+ }
+- return 1;
++
++ if (zone) {
++ *valid_start = start;
++ *valid_end = end;
++ return 1;
++ } else {
++ return 0;
++ }
+ }
+
+ /*
+@@ -1859,6 +1872,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
+ long offlined_pages;
+ int ret, drain, retry_max, node;
+ unsigned long flags;
++ unsigned long valid_start, valid_end;
+ struct zone *zone;
+ struct memory_notify arg;
+
+@@ -1869,10 +1883,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
+ return -EINVAL;
+ /* This makes hotplug much easier...and readable.
+ we assume this for now. .*/
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return -EINVAL;
+
+- zone = page_zone(pfn_to_page(start_pfn));
++ zone = page_zone(pfn_to_page(valid_start));
+ node = zone_to_nid(zone);
+ nr_pages = end_pfn - start_pfn;
+
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 275b22c..dbef278 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
+
+ /* Enable/disable zswap (disabled by default) */
+ static bool zswap_enabled;
+-module_param_named(enabled, zswap_enabled, bool, 0644);
++static int zswap_enabled_param_set(const char *,
++ const struct kernel_param *);
++static struct kernel_param_ops zswap_enabled_param_ops = {
++ .set = zswap_enabled_param_set,
++ .get = param_get_bool,
++};
++module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
+
+ /* Crypto compressor to use */
+ #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
+@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
+ /* used by param callback function */
+ static bool zswap_init_started;
+
++/* fatal error during init */
++static bool zswap_init_failed;
++
+ /*********************************
+ * helpers and fwd declarations
+ **********************************/
+@@ -706,6 +715,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
+ char *s = strstrip((char *)val);
+ int ret;
+
++ if (zswap_init_failed) {
++ pr_err("can't set param, initialization failed\n");
++ return -ENODEV;
++ }
++
+ /* no change required */
+ if (!strcmp(s, *(char **)kp->arg))
+ return 0;
+@@ -785,6 +799,17 @@ static int zswap_zpool_param_set(const char *val,
+ return __zswap_param_set(val, kp, NULL, zswap_compressor);
+ }
+
++static int zswap_enabled_param_set(const char *val,
++ const struct kernel_param *kp)
++{
++ if (zswap_init_failed) {
++ pr_err("can't enable, initialization failed\n");
++ return -ENODEV;
++ }
++
++ return param_set_bool(val, kp);
++}
++
+ /*********************************
+ * writeback code
+ **********************************/
+@@ -1271,6 +1296,9 @@ static int __init init_zswap(void)
+ dstmem_fail:
+ zswap_entry_cache_destroy();
+ cache_fail:
++ /* if built-in, we aren't unloaded on failure; don't allow use */
++ zswap_init_failed = true;
++ zswap_enabled = false;
+ return -ENOMEM;
+ }
+ /* must be late so crypto has time to come up */
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 436a753..5e9ed5e 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
+
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+- hrtimer_cancel(&op->timer);
+- hrtimer_cancel(&op->thrtimer);
+-
+- if (op->tsklet.func)
+- tasklet_kill(&op->tsklet);
++ if (op->tsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
++ hrtimer_active(&op->timer)) {
++ hrtimer_cancel(&op->timer);
++ tasklet_kill(&op->tsklet);
++ }
++ }
+
+- if (op->thrtsklet.func)
+- tasklet_kill(&op->thrtsklet);
++ if (op->thrtsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
++ hrtimer_active(&op->thrtimer)) {
++ hrtimer_cancel(&op->thrtimer);
++ tasklet_kill(&op->thrtsklet);
++ }
++ }
+
+ if ((op->frames) && (op->frames != &op->sframe))
+ kfree(op->frames);
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index dc6fb79..25d9a9c 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ if (!oa->data)
+ return -ENOMEM;
+
+- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
++ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ if (!creds) {
+ kfree(oa->data);
+ return -ENOMEM;
diff --git a/4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch b/4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch
new file mode 100644
index 0000000..32e3834
--- /dev/null
+++ b/4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch
@@ -0,0 +1,225786 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index 5385cba..607c6a0 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -7,6 +7,7 @@
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -16,6 +17,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -52,14 +54,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -73,9 +78,11 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -84,6 +91,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -96,11 +104,14 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -109,19 +120,23 @@ config-*
+ config_data.h*
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+ crc32table.h*
+ cscope.*
+ defkeymap.c
++devicetable-offsets.h
+ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -129,12 +144,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -149,14 +167,14 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+ kxgettext
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -167,12 +185,14 @@ machtypes.h
+ map
+ map_hugetlb
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -188,6 +208,8 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
++pasyms.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -197,6 +219,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -206,7 +229,12 @@ r200_reg_safe.h
+ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
++randomize_layout_hash.h
++randomize_layout_seed.h
++realmode.lds
++realmode.relocs
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -216,8 +244,17 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++aux.h
++disable.h
++e_fields.h
++e_fns.h
++e_fptrs.h
++e_vars.h
+ sImage
++slabinfo
+ sm_tbl*
++sortextable
+ split-include
+ syscalltab.h
+ tables.c
+@@ -227,6 +264,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -238,13 +276,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -252,9 +294,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index 9b9c479..5a635ff 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
+ === 4 Host Program support
+ --- 4.1 Simple Host Program
+ --- 4.2 Composite Host Programs
+- --- 4.3 Using C++ for host programs
+- --- 4.4 Controlling compiler options for host programs
+- --- 4.5 When host programs are actually built
+- --- 4.6 Using hostprogs-$(CONFIG_FOO)
++ --- 4.3 Defining shared libraries
++ --- 4.4 Using C++ for host programs
++ --- 4.5 Controlling compiler options for host programs
++ --- 4.6 When host programs are actually built
++ --- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ === 5 Kbuild clean infrastructure
+
+@@ -645,7 +646,29 @@ Both possibilities are described in the following.
+ Finally, the two .o files are linked to the executable, lxdialog.
+ Note: The syntax <executable>-y is not permitted for host-programs.
+
+---- 4.3 Using C++ for host programs
++--- 4.3 Defining shared libraries
++
++ Objects with extension .so are considered shared libraries, and
++ will be compiled as position independent objects.
++ Kbuild provides support for shared libraries, but the usage
++ shall be restricted.
++ In the following example the libkconfig.so shared library is used
++ to link the executable conf.
++
++ Example:
++ #scripts/kconfig/Makefile
++ hostprogs-y := conf
++ conf-objs := conf.o libkconfig.so
++ libkconfig-objs := expr.o type.o
++
++ Shared libraries always require a corresponding -objs line, and
++ in the example above the shared library libkconfig is composed by
++ the two objects expr.o and type.o.
++ expr.o and type.o will be built as position independent code and
++ linked as a shared library libkconfig.so. C++ is not supported for
++ shared libraries.
++
++--- 4.4 Using C++ for host programs
+
+ kbuild offers support for host programs written in C++. This was
+ introduced solely to support kconfig, and is not recommended
+@@ -668,7 +691,7 @@ Both possibilities are described in the following.
+ qconf-cxxobjs := qconf.o
+ qconf-objs := check.o
+
+---- 4.4 Controlling compiler options for host programs
++--- 4.5 Controlling compiler options for host programs
+
+ When compiling host programs, it is possible to set specific flags.
+ The programs will always be compiled utilising $(HOSTCC) passed
+@@ -696,7 +719,7 @@ Both possibilities are described in the following.
+ When linking qconf, it will be passed the extra option
+ "-L$(QTDIR)/lib".
+
+---- 4.5 When host programs are actually built
++--- 4.6 When host programs are actually built
+
+ Kbuild will only build host-programs when they are referenced
+ as a prerequisite.
+@@ -727,7 +750,7 @@ Both possibilities are described in the following.
+ This will tell kbuild to build lxdialog even if not referenced in
+ any rule.
+
+---- 4.6 Using hostprogs-$(CONFIG_FOO)
++--- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ A typical pattern in a Kbuild file looks like this:
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 922dec8..a45d4a2 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1422,6 +1422,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ [KNL] Should the hard-lockup detector generate
+ backtraces on all cpus.
+ Format: <integer>
++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++ ignore grsecurity's /proc restrictions
++
++ grsec_sysfs_restrict= Format: 0 | 1
++ Default: 1
++ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
+
+ hashdist= [KNL,NUMA] Large hashes allocated during boot
+ are distributed across NUMA nodes. Defaults on
+@@ -2651,6 +2657,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ noexec=on: enable non-executable mappings (default)
+ noexec=off: disable non-executable mappings
+
++ nopcid [X86-64]
++ Disable PCID (Process-Context IDentifier) even if it
++ is supported by the processor.
++
+ nosmap [X86]
+ Disable SMAP (Supervisor Mode Access Prevention)
+ even if it is supported by processor.
+@@ -2959,6 +2969,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_sanitize_slab=
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
++ pax_extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
++ pax_size_overflow_report_only
++ Enables rate-limited logging of size_overflow plugin
++ violations while disabling killing of the violating
++ task.
++
++ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
++ when the processor supports PCID.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index ffab8b5..b8fcd61 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id [ sysv ipc ]
+@@ -409,6 +410,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
+diff --git a/Kbuild b/Kbuild
+index 3d0ae15..84e5412 100644
+--- a/Kbuild
++++ b/Kbuild
+@@ -91,6 +91,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
+ always += missing-syscalls
+ targets += missing-syscalls
+
++GCC_PLUGINS_missing-syscalls := n
+ quiet_cmd_syscalls = CALL $<
+ cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
+
+diff --git a/Makefile b/Makefile
+index c0c41c9..630adc4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ HOSTCC = gcc
+ HOSTCXX = g++
+ HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
+
+ ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
+ HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
+@@ -731,7 +733,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+ else
+ KBUILD_CFLAGS += -g
+ endif
+-KBUILD_AFLAGS += -Wa,-gdwarf-2
++KBUILD_AFLAGS += -Wa,--gdwarf-2
+ endif
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
+@@ -910,7 +912,7 @@ export mod_sign_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -1274,7 +1276,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
+ signing_key.pem signing_key.priv signing_key.x509 \
+ x509.genkey extra_certificates signing_key.x509.keyid \
+- signing_key.x509.signer vmlinux-gdb.py
++ signing_key.x509.signer vmlinux-gdb.py \
++ scripts/gcc-plugins/size_overflow_plugin/e_*.h \
++ scripts/gcc-plugins/size_overflow_plugin/disable.h \
++ scripts/gcc-plugins/randomize_layout_seed.h
+
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1314,7 +1319,7 @@ distclean: mrproper
+ @find $(srctree) $(RCS_FIND_IGNORE) \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
++ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 659bdd0..4179181 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -164,6 +164,7 @@ config ARCH_USE_BUILTIN_BSWAP
+ config KRETPROBES
+ def_bool y
+ depends on KPROBES && HAVE_KRETPROBES
++ depends on !PAX_RAP
+
+ config USER_RETURN_NOTIFIER
+ bool
+@@ -355,7 +356,7 @@ config HAVE_GCC_PLUGINS
+ menuconfig GCC_PLUGINS
+ bool "GCC plugins"
+ depends on HAVE_GCC_PLUGINS
+- depends on !COMPILE_TEST
++ default y
+ help
+ GCC plugins are loadable modules that provide extra features to the
+ compiler. They are useful for runtime instrumentation and static analysis.
+@@ -759,6 +760,7 @@ config VMAP_STACK
+ default y
+ bool "Use a virtually-mapped stack"
+ depends on HAVE_ARCH_VMAP_STACK && !KASAN
++ depends on !GRKERNSEC_KSTACKOVERFLOW
+ ---help---
+ Enable this if you want the use virtually-mapped kernel stacks
+ with guard pages. This causes kernel stack overflows to be
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 498933a..78d2b22 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -308,4 +308,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ALPHA_ATOMIC_H */
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 968d999..d36b2df 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index c2ebb6f..93a0613 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index a9a1195..e9b8417 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 936bc8f..bb1859f 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
++ gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index ffb93f49..ced8233 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+- unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long limit, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ info.flags = 0;
+ info.length = len;
+@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ info.high_limit = limit;
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+
+ return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 83e9eee..db02682 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -132,8 +250,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
+index 42b0504..6013221 100644
+--- a/arch/arc/kernel/kprobes.c
++++ b/arch/arc/kernel/kprobes.c
+@@ -424,6 +424,7 @@ static void __used kretprobe_trampoline_holder(void)
+ "kretprobe_trampoline:\n" "nop\n");
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -433,6 +434,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+@@ -509,6 +511,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+@@ -516,6 +519,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+ {
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index b5d529f..0bb4d4f 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1622,6 +1622,7 @@ config AEABI
+ config OABI_COMPAT
+ bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
+ depends on AEABI && !THUMB2_KERNEL
++ depends on !GRKERNSEC
+ help
+ This option preserves the old syscall interface along with the
+ new (ARM EABI) one. It also provides a compatibility layer to
+@@ -1690,6 +1691,7 @@ config HIGHPTE
+ config CPU_SW_DOMAIN_PAN
+ bool "Enable use of CPU domains to implement privileged no-access"
+ depends on MMU && !ARM_LPAE
++ depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ default y
+ help
+ Increase kernel security by ensuring that normal kernel accesses
+@@ -1766,7 +1768,7 @@ config ALIGNMENT_TRAP
+
+ config UACCESS_WITH_MEMCPY
+ bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+- depends on MMU
++ depends on MMU && !PAX_MEMORY_UDEREF
+ default y if CPU_FEROCEON
+ help
+ Implement faster copy_to_user and clear_user methods for CPU
+@@ -2021,6 +2023,7 @@ config KEXEC
+ depends on (!SMP || PM_SLEEP_SMP)
+ depends on !CPU_V7M
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -2065,7 +2068,7 @@ config EFI_STUB
+
+ config EFI
+ bool "UEFI runtime support"
+- depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
++ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_STUB
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index d83f7c3..a6aba4c 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -7,6 +7,7 @@ config ARM_PTDUMP
+ depends on DEBUG_KERNEL
+ depends on MMU
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index d50430c..39509a6 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -24,6 +24,8 @@ endif
+
+ GCOV_PROFILE := n
+
++GCC_PLUGINS := n
++
+ #
+ # Architecture dependencies
+ #
+diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
+index 6fc73bf..d0af3c7b 100644
+--- a/arch/arm/crypto/sha1_glue.c
++++ b/arch/arm/crypto/sha1_glue.c
+@@ -27,8 +27,8 @@
+
+ #include "sha1.h"
+
+-asmlinkage void sha1_block_data_order(u32 *digest,
+- const unsigned char *data, unsigned int rounds);
++asmlinkage void sha1_block_data_order(struct sha1_state *digest,
++ const u8 *data, int rounds);
+
+ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -36,22 +36,20 @@ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ /* make sure casting to sha1_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
+
+- return sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_block_data_order);
++ return sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ }
+ EXPORT_SYMBOL_GPL(sha1_update_arm);
+
+ static int sha1_final(struct shash_desc *desc, u8 *out)
+ {
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order);
++ sha1_base_do_finalize(desc, sha1_block_data_order);
+ return sha1_base_finish(desc, out);
+ }
+
+ int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_block_data_order);
++ sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ return sha1_final(desc, out);
+ }
+ EXPORT_SYMBOL_GPL(sha1_finup_arm);
+diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
+index 4e22f12..49902aa 100644
+--- a/arch/arm/crypto/sha1_neon_glue.c
++++ b/arch/arm/crypto/sha1_neon_glue.c
+@@ -31,8 +31,8 @@
+
+ #include "sha1.h"
+
+-asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+- unsigned int rounds);
++asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data,
++ int rounds);
+
+ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -45,7 +45,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_transform_neon);
++ sha1_transform_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -60,8 +60,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
+ kernel_neon_begin();
+ if (len)
+ sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_transform_neon);
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
++ sha1_transform_neon);
++ sha1_base_do_finalize(desc, sha1_transform_neon);
+ kernel_neon_end();
+
+ return sha1_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
+index a84e869..53a0c61 100644
+--- a/arch/arm/crypto/sha256_glue.c
++++ b/arch/arm/crypto/sha256_glue.c
+@@ -30,8 +30,8 @@
+
+ #include "sha256_glue.h"
+
+-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+- unsigned int num_blks);
++asmlinkage void sha256_block_data_order(struct sha256_state *digest, const u8 *data,
++ int num_blks);
+
+ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -39,23 +39,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ /* make sure casting to sha256_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+
+- return sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_update);
+
+ static int sha256_final(struct shash_desc *desc, u8 *out)
+ {
+- sha256_base_do_finalize(desc,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_finalize(desc, sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+ }
+
+ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ return sha256_final(desc, out);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_finup);
+diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
+index 39ccd65..f9511cb 100644
+--- a/arch/arm/crypto/sha256_neon_glue.c
++++ b/arch/arm/crypto/sha256_neon_glue.c
+@@ -26,8 +26,8 @@
+
+ #include "sha256_glue.h"
+
+-asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
+- unsigned int num_blks);
++asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data,
++ int num_blks);
+
+ static int sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -39,8 +39,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
+ return crypto_sha256_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order_neon);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -54,10 +53,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ if (len)
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order_neon);
+- sha256_base_do_finalize(desc,
+- (sha256_block_fn *)sha256_block_data_order_neon);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
++ sha256_base_do_finalize(desc, sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha256_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
+index 269a394..c7a91f1 100644
+--- a/arch/arm/crypto/sha512-glue.c
++++ b/arch/arm/crypto/sha512-glue.c
+@@ -28,27 +28,24 @@ MODULE_ALIAS_CRYPTO("sha512");
+ MODULE_ALIAS_CRYPTO("sha384-arm");
+ MODULE_ALIAS_CRYPTO("sha512-arm");
+
+-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
++asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks);
+
+ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+- return sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ }
+
+ int sha512_arm_final(struct shash_desc *desc, u8 *out)
+ {
+- sha512_base_do_finalize(desc,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_finalize(desc, sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ return sha512_arm_final(desc, out);
+ }
+
+diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
+index 3269368..9fcbc00 100644
+--- a/arch/arm/crypto/sha512-neon-glue.c
++++ b/arch/arm/crypto/sha512-neon-glue.c
+@@ -22,7 +22,7 @@
+ MODULE_ALIAS_CRYPTO("sha384-neon");
+ MODULE_ALIAS_CRYPTO("sha512-neon");
+
+-asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
++asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, u8 const *src,
+ int blocks);
+
+ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+@@ -35,8 +35,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+ return sha512_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order_neon);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -50,10 +49,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ if (len)
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order_neon);
+- sha512_base_do_finalize(desc,
+- (sha512_block_fn *)sha512_block_data_order_neon);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
++ sha512_base_do_finalize(desc, sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha512_base_finish(desc, out);
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 66d0e21..8fa3237 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -18,17 +18,41 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
++#endif
++
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
+ /*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+ #define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+ #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -38,45 +62,74 @@
+ * to ensure that the update happens.
+ */
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " bvc 3f\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_POST_RETURN \
++ " bvc 3f\n" \
++ " mov %1, %0\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "4:\n" \
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_POST_RETURN
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic_" #op "\n" \
++ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+ " " #asm_op " %0, %0, %4\n" \
++ __OVERFLOW_POST \
+ " strex %1, %0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op)\
++ __ATOMIC_OP(op, , c_op, asm_op##s)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
+ { \
+- unsigned long tmp; \
++ int tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+-" " #asm_op " %0, %0, %4\n" \
+-" strex %1, %0, [%3]\n" \
+-" teq %1, #0\n" \
+-" bne 1b" \
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
++" " #asm_op " %1, %0, %4\n" \
++ __OVERFLOW_POST_RETURN \
++" strex %0, %1, [%3]\n" \
++" teq %0, #0\n" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)\
++ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+ { \
+@@ -99,6 +152,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+ }
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+ #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+ #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+@@ -141,12 +195,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+ "1: ldrex %0, [%4]\n"
+ " teq %0, %5\n"
+-" beq 2f\n"
+-" add %1, %0, %6\n"
++" beq 4f\n"
++" adds %1, %0, %6\n"
++
++ __OVERFLOW_POST
++
+ " strex %2, %1, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++
++ __OVERFLOW_EXTABLE
++
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -157,14 +216,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+
+ #ifdef CONFIG_SMP
+ #error SMP not supported on pre-ARMv6 CPUs
+ #endif
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long flags; \
+ \
+@@ -173,8 +254,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ raw_local_irq_restore(flags); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
++ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
+ { \
+ unsigned long flags; \
+ int val; \
+@@ -201,6 +285,9 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
+ return val; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
++ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
++
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+ int ret;
+@@ -215,6 +302,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return ret;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return atomic_cmpxchg((atomic_t *)v, old, new);
++}
++
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+@@ -250,16 +342,29 @@ ATOMIC_OPS(xor, ^=, eor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new))
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0)
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
++#define atomic_inc_return_unchecked_relaxed(v) (atomic_add_return_unchecked_relaxed(1, v))
+ #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -270,6 +375,14 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -286,6 +399,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ __asm__ __volatile__("@ atomic64_set\n"
+@@ -294,6 +420,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ );
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++" strd %2, %H2, [%1]"
++ : "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ );
++}
+ #else
+ static inline long long atomic64_read(const atomic64_t *v)
+ {
+@@ -308,6 +443,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ long long tmp;
+@@ -322,50 +470,82 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ long long tmp;
++
++ prefetchw(&v->counter);
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
+ #endif
+
+-#define ATOMIC64_OP(op, op1, op2) \
+-static inline void atomic64_##op(long long i, atomic64_t *v) \
++#define __OVERFLOW_POST_RETURN64 \
++ " bvc 3f\n" \
++" mov %Q1, %Q0\n" \
++" mov %R1, %R0\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++
++#define __ATOMIC64_OP(op, suffix, op1, op2) \
++static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
+ { \
+ long long result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic64_" #op "\n" \
++ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+ " " #op1 " %Q0, %Q0, %Q4\n" \
+ " " #op2 " %R0, %R0, %R4\n" \
++ __OVERFLOW_POST \
+ " strexd %1, %0, %H0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC64_OP_RETURN(op, op1, op2) \
++#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2) \
++ __ATOMIC64_OP(op, , op1, op2##s)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2) \
+ static inline long long \
+-atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
++atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
+ { \
+ long long result; \
+- unsigned long tmp; \
++ long long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+-" " #op1 " %Q0, %Q0, %Q4\n" \
+-" " #op2 " %R0, %R0, %R4\n" \
+-" strexd %1, %0, %H0, [%3]\n" \
+-" teq %1, #0\n" \
+-" bne 1b" \
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
++" " #op1 " %Q1, %Q0, %Q4\n" \
++" " #op2 " %R1, %R0, %R4\n" \
++ __OVERFLOW_POST_RETURN64 \
++" strexd %0, %1, %H1, [%3]\n" \
++" teq %0, #0\n" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2) \
++ __ATOMIC64_OP_RETURN(op, , op1, op2##s)
++
+ #define ATOMIC64_FETCH_OP(op, op1, op2) \
+ static inline long long \
+ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
+@@ -398,6 +578,7 @@ ATOMIC64_OPS(add, adds, adc)
+ ATOMIC64_OPS(sub, subs, sbc)
+
+ #define atomic64_add_return_relaxed atomic64_add_return_relaxed
++#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed
+ #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+ #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+ #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+@@ -422,7 +603,10 @@ ATOMIC64_OPS(xor, eor, eor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_POST_RETURN
+
+ static inline long long
+ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -448,6 +632,13 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+ }
+ #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+
++static inline long long
++atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new)
++{
++ return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new);
++}
++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
++
+ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ {
+ long long result;
+@@ -468,25 +659,36 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ }
+ #define atomic64_xchg_relaxed atomic64_xchg_relaxed
+
++static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
++{
++ return atomic64_xchg_relaxed((atomic64_t *)ptr, new);
++}
++#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed
++
+ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+ long long result;
+- unsigned long tmp;
++ u64 tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+-" subs %Q0, %Q0, #1\n"
+-" sbc %R0, %R0, #0\n"
+-" teq %R0, #0\n"
+-" bmi 2f\n"
+-" strexd %1, %0, %H0, [%3]\n"
+-" teq %1, #0\n"
++" subs %Q1, %Q0, #1\n"
++" sbcs %R1, %R0, #0\n"
++
++ __OVERFLOW_POST_RETURN64
++
++" teq %R1, #0\n"
++" bmi 4f\n"
++" strexd %0, %1, %H1, [%3]\n"
++" teq %0, #0\n"
+ " bne 1b\n"
+-"2:"
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++
++ __OVERFLOW_EXTABLE
++
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+
+@@ -509,13 +711,18 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ " teq %0, %5\n"
+ " teqeq %H0, %H5\n"
+ " moveq %1, #0\n"
+-" beq 2f\n"
++" beq 4f\n"
+ " adds %Q0, %Q0, %Q6\n"
+-" adc %R0, %R0, %R6\n"
++" adcs %R0, %R0, %R6\n"
++
++ __OVERFLOW_POST
++
+ " strexd %2, %0, %H0, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++
++ __OVERFLOW_EXTABLE
++
+ : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -526,12 +733,19 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ return ret;
+ }
+
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST_RETURN64
++#undef __OVERFLOW_POST
++
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
++#define atomic64_inc_return_unchecked_relaxed(v) atomic64_add_return_unchecked_relaxed(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..2255c86 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index bdd283b..e66fb83 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -116,7 +116,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const __no_randomize_layout;
+
+ /*
+ * Select the calling method
+diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
+index 524692f..a8871ec 100644
+--- a/arch/arm/include/asm/checksum.h
++++ b/arch/arm/include/asm/checksum.h
+@@ -37,7 +37,19 @@ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+
+ __wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++
++static inline __wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
++{
++ __wsum ret;
++ pax_open_userland();
++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
++ pax_close_userland();
++ return ret;
++}
++
++
+
+ /*
+ * Fold a partial checksum without adding pseudo headers
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index 97882f9..ff9d6ac 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+ })
++#define xchg_unchecked_relaxed(ptr, x) ({ \
++ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
++ sizeof(*(ptr))); \
++})
+
+ #include <asm-generic/cmpxchg-local.h>
+
+@@ -128,6 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ #endif
+
+ #define xchg xchg_relaxed
++#define xchg_unchecked xchg_unchecked_relaxed
+
+ /*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
+index baefe1d..29cb35a 100644
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -32,7 +32,7 @@ struct device_node;
+ struct cpuidle_ops {
+ int (*suspend)(unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+-};
++} __no_const;
+
+ struct of_cpuidle_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index 99d9f63..ec44cb5 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -42,7 +42,6 @@
+ #define DOMAIN_USER 1
+ #define DOMAIN_IO 0
+ #endif
+-#define DOMAIN_VECTORS 3
+
+ /*
+ * Domain types
+@@ -51,9 +50,28 @@
+ #define DOMAIN_CLIENT 1
+ #ifdef CONFIG_CPU_USE_DOMAINS
+ #define DOMAIN_MANAGER 3
++#define DOMAIN_VECTORS 3
++#define DOMAIN_USERCLIENT DOMAIN_CLIENT
+ #else
++
++#ifdef CONFIG_PAX_KERNEXEC
+ #define DOMAIN_MANAGER 1
++#define DOMAIN_KERNEXEC 3
++#else
++#define DOMAIN_MANAGER 1
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define DOMAIN_USERCLIENT 0
++#define DOMAIN_UDEREF 1
++#define DOMAIN_VECTORS DOMAIN_KERNEL
++#else
++#define DOMAIN_USERCLIENT 1
++#define DOMAIN_VECTORS DOMAIN_USER
++#endif
++
+ #endif
++#define DOMAIN_KERNELCLIENT 1
+
+ #define domain_mask(dom) ((3) << (2 * (dom)))
+ #define domain_val(dom,type) ((type) << (2 * (dom)))
+@@ -62,13 +80,19 @@
+ #define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
++#define DACR_INIT \
++ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
+ #else
+ #define DACR_INIT \
+- (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
++ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+ #endif
+
+@@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
+ set_domain(domain); \
+ } while (0)
+
++#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#define modify_domain(dom,type) \
++ do { \
++ struct thread_info *thread = current_thread_info(); \
++ unsigned int domain = get_domain(); \
++ domain &= ~domain_mask(dom); \
++ domain = domain | domain_val(dom, type); \
++ thread->cpu_domain = domain; \
++ set_domain(domain); \
++ } while (0)
++
+ #else
+ static inline void modify_domain(unsigned dom, unsigned type) { }
+ #endif
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index d2315ff..f60b47b 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
+index de53547..52b9a28 100644
+--- a/arch/arm/include/asm/fncpy.h
++++ b/arch/arm/include/asm/fncpy.h
+@@ -81,7 +81,9 @@
+ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
+ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+ \
++ pax_open_kernel(); \
+ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
++ pax_close_kernel(); \
+ flush_icache_range((unsigned long)(dest_buf), \
+ (unsigned long)(dest_buf) + (size)); \
+ \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 6795368..6c4d749 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ preempt_disable();
++
+ __ua_flags = uaccess_save_and_enable();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index 83eb2f7..ed77159 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -4,6 +4,6 @@
+ /*
+ * This is the "bare minimum". AIO seems to require this.
+ */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif
+diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
+index 9e614a1..3302cca 100644
+--- a/arch/arm/include/asm/mach/dma.h
++++ b/arch/arm/include/asm/mach/dma.h
+@@ -22,7 +22,7 @@ struct dma_ops {
+ int (*residue)(unsigned int, dma_t *); /* optional */
+ int (*setspeed)(unsigned int, dma_t *, int); /* optional */
+ const char *type;
+-};
++} __do_const;
+
+ struct dma_struct {
+ void *addr; /* single DMA address */
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9b7c328..2dfe68b 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -23,17 +23,19 @@ struct map_desc {
+
+ /* types 0-3 are defined in asm/io.h */
+ enum {
+- MT_UNCACHED = 4,
+- MT_CACHECLEAN,
+- MT_MINICLEAN,
++ MT_UNCACHED_RW = 4,
++ MT_CACHECLEAN_RO,
++ MT_MINICLEAN_RO,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+- MT_MEMORY_RWX,
++ __MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+- MT_ROM,
+- MT_MEMORY_RWX_NONCACHED,
++ MT_MEMORY_RX,
++ MT_ROM_RX,
++ MT_MEMORY_RW_NONCACHED,
++ MT_MEMORY_RX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+- MT_MEMORY_RWX_ITCM,
++ MT_MEMORY_RX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+ };
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index c2bf24f..69e437c 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -39,7 +39,7 @@ struct outer_cache_fns {
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
+-};
++} __no_const;
+
+ extern struct outer_cache_fns outer_cache;
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index 4355f0e..cd9168e 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -23,6 +23,7 @@
+
+ #else
+
++#include <linux/compiler.h>
+ #include <asm/glue.h>
+
+ /*
+@@ -114,7 +115,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index b2902a5..da11e4d 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -17,6 +17,7 @@
+ #include <asm/processor.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/system_info.h>
+
+ #define check_pgt_cache() do { } while (0)
+
+@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+
+ /*
+@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd) do { } while (0)
+ #define pud_populate(mm,pmd,pte) BUG()
++#define pud_populate_kernel(mm,pmd,pte) BUG()
+
+ #endif /* CONFIG_ARM_LPAE */
+
+@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ __free_page(pte);
+ }
+
++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
++{
++#ifdef CONFIG_ARM_LPAE
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#else
++ if (addr & SECTION_SIZE)
++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
++ else
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#endif
++ flush_pmd_entry(pmdp);
++}
++
+ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+ pmdval_t prot)
+ {
+diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
+index 3f82e9d..2a85e8b 100644
+--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
+@@ -28,7 +28,7 @@
+ /*
+ * - section
+ */
+-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
+ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+@@ -40,6 +40,7 @@
+ #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+ #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+ #define PMD_SECT_AF (_AT(pmdval_t, 0))
++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
+
+ #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+ #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+@@ -70,6 +71,7 @@
+ * - extended small page/tiny page
+ */
+ #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
+ #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+ #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+ #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 92fd2c8..061dae1 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -127,6 +127,9 @@
+ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+ #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
++/* Two-level page tables only have PXN in the PGD, not in the PTE. */
++#define L_PTE_PXN (_AT(pteval_t, 0))
++
+ /*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 2a029bc..a0524c7 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -80,6 +80,7 @@
+ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+ #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
++#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
+ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+@@ -90,10 +91,12 @@
+ #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+ #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+ #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
++#define PMD_SECT_RDONLY PMD_SECT_AP2
+
+ /*
+ * To be used in assembly code with the upper page attributes.
+ */
++#define L_PTE_PXN_HIGH (1 << (53 - 32))
+ #define L_PTE_XN_HIGH (1 << (54 - 32))
+ #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
+
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a8d656d..2febb8a 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -33,6 +33,9 @@
+ #include <asm/pgtable-2level.h>
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+@@ -48,6 +51,9 @@
+ #define LIBRARY_TEXT_START 0x0c000000
+
+ #ifndef __ASSEMBLY__
++extern pteval_t __supported_pte_mask;
++extern pmdval_t __supported_pmd_mask;
++
+ extern void __pte_error(const char *file, int line, pte_t);
+ extern void __pmd_error(const char *file, int line, pmd_t);
+ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
+ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/domain.h>
++#include <linux/thread_info.h>
++#include <linux/preempt.h>
++
++static inline int test_domain(int domain, int domaintype)
++{
++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
++}
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ preempt_disable();
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
++#endif
++ return 0;
++}
++
++static inline unsigned long pax_close_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
++ /* DOMAIN_MANAGER = "client" under KERNEXEC */
++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
++ preempt_enable_no_resched();
++#endif
++ return 0;
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ /*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+- * as well as any architecture dependent bits like global/ASID and SMP
+- * shared mapping bits.
++ * as well as any architecture dependent bits like global/ASID, PXN,
++ * and SMP shared mapping bits.
+ */
+ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
+
+@@ -308,7 +356,7 @@ static inline pte_t pte_mknexec(pte_t pte)
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+- L_PTE_NONE | L_PTE_VALID;
++ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
+index 3d6dc8b..1262ad3 100644
+--- a/arch/arm/include/asm/smp.h
++++ b/arch/arm/include/asm/smp.h
+@@ -108,7 +108,7 @@ struct smp_operations {
+ int (*cpu_disable)(unsigned int cpu);
+ #endif
+ #endif
+-};
++} __no_const;
+
+ struct of_cpu_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
+index cf4f3aa..8f2f2d9 100644
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -7,19 +7,19 @@
+ */
+
+ #define __HAVE_ARCH_STRRCHR
+-extern char * strrchr(const char * s, int c);
++extern char * strrchr(const char * s, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCHR
+-extern char * strchr(const char * s, int c);
++extern char * strchr(const char * s, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMCPY
+-extern void * memcpy(void *, const void *, __kernel_size_t);
++extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMMOVE
+-extern void * memmove(void *, const void *, __kernel_size_t);
++extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMCHR
+-extern void * memchr(const void *, int, __kernel_size_t);
++extern void * memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMSET
+ extern void * memset(void *, int, __kernel_size_t);
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d..a552c1d 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -73,6 +73,9 @@ struct thread_info {
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
+ }
+
+ #define init_thread_info (init_thread_union.thread_info)
+@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++/* within 8 bits of TIF_SYSCALL_TRACE
++ * to meet flexible second operand requirements
++ */
++#define TIF_GRSEC_SETXID 8
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
+
+ /* Checks for any syscall work in entry-common.S */
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
+index f6fcc67..5895d62 100644
+--- a/arch/arm/include/asm/timex.h
++++ b/arch/arm/include/asm/timex.h
+@@ -13,6 +13,7 @@
+ #define _ASMARM_TIMEX_H
+
+ typedef unsigned long cycles_t;
++extern int read_current_timer(unsigned long *timer_val);
+ #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+
+ #endif
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 5f833f7..76e6644 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -3,6 +3,7 @@
+
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
++ pax_open_kernel();
+ *((unsigned int *)0xffff0ff0) = val;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 1f59ea05..81245f0 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -18,6 +18,7 @@
+ #include <asm/domain.h>
+ #include <asm/unified.h>
+ #include <asm/compiler.h>
++#include <asm/pgtable.h>
+
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -50,6 +51,59 @@ struct exception_table_entry
+ extern int fixup_exception(struct pt_regs *regs);
+
+ /*
++ * These two are intentionally not defined anywhere - if the kernel
++ * code generates any references to them, that's a bug.
++ */
++extern int __get_user_bad(void);
++extern int __put_user_bad(void);
++
++/*
++ * Note that this is actually 0x1,0000,0000
++ */
++#define KERNEL_DS 0x00000000
++#define get_ds() (KERNEL_DS)
++
++#ifdef CONFIG_MMU
++
++#define USER_DS TASK_SIZE
++#define get_fs() (current_thread_info()->addr_limit)
++
++static inline void set_fs(mm_segment_t fs)
++{
++ current_thread_info()->addr_limit = fs;
++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
++}
++
++#define segment_eq(a, b) ((a) == (b))
++
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++static inline void pax_open_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
++ }
++#endif
++
++}
++
++static inline void pax_close_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
++ }
++#endif
++
++}
++
++/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
+
+ return old_domain;
+ #else
++ pax_open_userland();
+ return 0;
+ #endif
+ }
+@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
++#else
++ pax_close_userland();
+ #endif
+ }
+
+-/*
+- * These two are intentionally not defined anywhere - if the kernel
+- * code generates any references to them, that's a bug.
+- */
+-extern int __get_user_bad(void);
+-extern int __put_user_bad(void);
+-
+-/*
+- * Note that this is actually 0x1,0000,0000
+- */
+-#define KERNEL_DS 0x00000000
+-#define get_ds() (KERNEL_DS)
+-
+-#ifdef CONFIG_MMU
+-
+-#define USER_DS TASK_SIZE
+-#define get_fs() (current_thread_info()->addr_limit)
+-
+-static inline void set_fs(mm_segment_t fs)
+-{
+- current_thread_info()->addr_limit = fs;
+- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+-}
+-
+-#define segment_eq(a, b) ((a) == (b))
+-
+ /* We use 33-bit arithmetic here... */
+ #define __range_ok(addr, size) ({ \
+ unsigned long flag, roksum; \
+@@ -268,6 +299,7 @@ static inline void set_fs(mm_segment_t fs)
+
+ #endif /* CONFIG_MMU */
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
+
+ #define user_addr_max() \
+@@ -474,10 +506,10 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+-static inline unsigned long __must_check
++static inline unsigned long __must_check __size_overflow(3)
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned int __ua_flags;
+@@ -489,9 +521,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ return n;
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+ static inline unsigned long __must_check
+@@ -511,9 +543,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ #endif
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ arm_clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ __clear_user_std(void __user *addr, unsigned long n);
+
+ static inline unsigned long __must_check
+@@ -534,6 +566,10 @@ __clear_user(void __user *addr, unsigned long n)
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned long res = n;
++
++ if ((long)n < 0)
++ return n;
++
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+@@ -543,6 +579,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 5af0ed1..cea83883 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -92,7 +92,7 @@
+ * ARMv7 groups of PSR bits
+ */
+ #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
+ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 7e45f69..2c047db 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+
+ /* networking */
+ EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_from_user);
++EXPORT_SYMBOL(__csum_partial_copy_from_user);
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
+ EXPORT_SYMBOL(__csum_ipv6_magic);
+
+diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
+index 9f43ba0..1cee475 100644
+--- a/arch/arm/kernel/efi.c
++++ b/arch/arm/kernel/efi.c
+@@ -60,9 +60,9 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+ * preference.
+ */
+ if (md->attribute & EFI_MEMORY_WB)
+- desc.type = MT_MEMORY_RWX;
++ desc.type = __MT_MEMORY_RWX;
+ else if (md->attribute & EFI_MEMORY_WT)
+- desc.type = MT_MEMORY_RWX_NONCACHED;
++ desc.type = MT_MEMORY_RW_NONCACHED;
+ else if (md->attribute & EFI_MEMORY_WC)
+ desc.type = MT_DEVICE_WC;
+ else
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 9f157e7..8e3f857 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -50,6 +50,87 @@
+ 9997:
+ .endm
+
++ .macro pax_enter_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ make aligned space for saved DACR
++ sub sp, sp, #8
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++ @ store old DACR on stack
++ str r1, [sp, #8]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
++ .endm
++
++ .macro pax_open_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_CLIENT
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_close_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
+ .macro pabt_helper
+ @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+ #ifdef MULTI_PABORT
+@@ -92,11 +173,15 @@
+ * Invalid mode handlers
+ */
+ .macro inv_entry, reason
++
++ pax_enter_kernel
++
+ sub sp, sp, #PT_REGS_SIZE
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
++
+ mov r1, #\reason
+ .endm
+
+@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
+ UNWIND(.fnstart )
+ UNWIND(.save {r0 - pc} )
++
++ pax_enter_kernel
++
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ #ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
+ ldmia r0, {r3 - r5}
+ add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r6, #-1 @ "" "" "" ""
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ offset sp by 8 as done in pax_enter_kernel
++ add r2, sp, #(SVC_REGS_SIZE + \stack_hole + 4)
++#else
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
++#endif
+ SPFIX( addeq r2, r2, #4 )
+ str r3, [sp, #-4]! @ save the "real" r0 copied
+ @ from the exception stack
+@@ -382,6 +475,9 @@ ENDPROC(__fiq_abt)
+ .macro usr_entry, trace=1, uaccess=1
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind ) @ don't unwind the user space
++
++ pax_enter_kernel_user
++
+ sub sp, sp, #PT_REGS_SIZE
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
+@@ -495,7 +591,9 @@ __und_usr:
+ tst r3, #PSR_T_BIT @ Thumb mode?
+ bne __und_usr_thumb
+ sub r4, r2, #4 @ ARM instr at LR - 4
++ pax_open_userland
+ 1: ldrt r0, [r4]
++ pax_close_userland
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
+ uaccess_disable ip
+@@ -531,11 +629,15 @@ __und_usr_thumb:
+ */
+ .arch armv6t2
+ #endif
++ pax_open_userland
+ 2: ldrht r5, [r4]
++ pax_close_userland
+ ARM_BE8(rev16 r5, r5) @ little endian instruction
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
++ pax_open_userland
+ 3: ldrht r0, [r2]
++ pax_close_userland
+ ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+@@ -566,7 +668,8 @@ ENDPROC(__und_usr)
+ */
+ .pushsection .text.fixup, "ax"
+ .align 2
+-4: str r4, [sp, #S_PC] @ retry current instruction
++4: pax_close_userland
++ str r4, [sp, #S_PC] @ retry current instruction
+ ret r9
+ .popsection
+ .pushsection __ex_table,"a"
+@@ -788,7 +891,7 @@ ENTRY(__switch_to)
+ THUMB( str lr, [ip], #4 )
+ ldr r4, [r2, #TI_TP_VALUE]
+ ldr r5, [r2, #TI_TP_VALUE + 4]
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
+ ldr r6, [r2, #TI_CPU_DOMAIN]
+@@ -799,7 +902,7 @@ ENTRY(__switch_to)
+ ldr r8, =__stack_chk_guard
+ ldr r7, [r7, #TSK_STACK_CANARY]
+ #endif
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+ #endif
+ mov r5, r0
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 10c3283..c47cdf5 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -11,18 +11,46 @@
+ #include <asm/assembler.h>
+ #include <asm/unistd.h>
+ #include <asm/ftrace.h>
++#include <asm/domain.h>
+ #include <asm/unwind.h>
+
++#include "entry-header.S"
++
+ #ifdef CONFIG_NEED_RET_TO_USER
+ #include <mach/entry-macro.S>
+ #else
+ .macro arch_ret_to_user, tmp1, tmp2
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_UDEREF
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
+ .endm
+ #endif
+
+-#include "entry-header.S"
+-
+-
+ .align 5
+ #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+ /*
+@@ -36,7 +64,9 @@ ret_fast_syscall:
+ UNWIND(.cantunwind )
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #_TIF_SYSCALL_WORK
++ bne fast_work_pending
++ tst r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+
+ /* perform architecture specific actions before user return */
+@@ -62,7 +92,9 @@ ret_fast_syscall:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #_TIF_SYSCALL_WORK
++ bne __sys_trace_return_nosave
++ tst r1, #_TIF_WORK_MASK
+ beq no_work_pending
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+@@ -199,6 +231,12 @@ ENTRY(vector_swi)
+
+ uaccess_disable tbl
+
++ /*
++ * do this here to avoid a performance hit of wrapping the code above
++ * that directly dereferences userland to parse the SWI instruction
++ */
++ pax_enter_kernel_user
++
+ adr tbl, sys_call_table @ load syscall table pointer
+
+ #if defined(CONFIG_OABI_COMPAT)
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 6391728..6bf90b8 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -196,6 +196,59 @@
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
++ .macro pax_enter_kernel_user
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read old DACR from stack into r1
++ ldr r1, [sp, #(8 + S_SP)]
++ sub r1, r1, #8
++ ldr r1, [r1]
++
++ @ write r1 to current_thread_info()->cpu_domain
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
+
+ .macro svc_exit, rpsr, irq = 0
+ .if \irq != 0
+@@ -219,6 +272,8 @@
+ uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
+
++ pax_exit_kernel
++
+ #ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
+ msr spsr_cxsf, \rpsr
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 059c3da..8e45cfc 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
+ void *base = vectors_page;
+ unsigned offset = FIQ_OFFSET;
+
++ pax_open_kernel();
+ memcpy(base + offset, start, length);
++ pax_close_kernel();
++
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 4f14b5c..91ff261 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -38,17 +38,47 @@
+ #endif
+
+ #ifdef CONFIG_MMU
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ void *p;
++
++ if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
++ return NULL;
++
++ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
+ return p;
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
++
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void module_memfree_exec(void *module_region)
++{
++ module_memfree(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
+ #endif
+
+ int
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 69bda1a..755113a 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ else
+ __acquire(&patch_lock);
+
++ pax_open_kernel();
+ if (thumb2 && __opcode_is_thumb16(insn)) {
+ *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
+ size = sizeof(u16);
+@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ *(u32 *)waddr = insn;
+ size = sizeof(u32);
+ }
++ pax_close_kernel();
+
+ if (waddr != addr) {
+ flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 91d2d5b..042c26e 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -118,8 +118,8 @@ void __show_regs(struct pt_regs *regs)
+
+ show_regs_print_info(KERN_DEFAULT);
+
+- print_symbol("PC is at %s\n", instruction_pointer(regs));
+- print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++ printk("LR is at %pA\n", (void *)regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -233,7 +233,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
+
+ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ /*
+ * Copy the initial value of the domain access control register
+ * from the current thread: thread->addr_limit will have been
+@@ -336,7 +336,7 @@ static struct vm_area_struct gate_vma = {
+
+ static int __init gate_vma_init(void)
+ {
+- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ return 0;
+ }
+ arch_initcall(gate_vma_init);
+@@ -365,92 +365,14 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+ return is_gate_vma(vma) ? "[vectors]" : NULL;
+ }
+
+-/* If possible, provide a placement hint at a random offset from the
+- * stack for the sigpage and vdso pages.
+- */
+-static unsigned long sigpage_addr(const struct mm_struct *mm,
+- unsigned int npages)
+-{
+- unsigned long offset;
+- unsigned long first;
+- unsigned long last;
+- unsigned long addr;
+- unsigned int slots;
+-
+- first = PAGE_ALIGN(mm->start_stack);
+-
+- last = TASK_SIZE - (npages << PAGE_SHIFT);
+-
+- /* No room after stack? */
+- if (first > last)
+- return 0;
+-
+- /* Just enough room? */
+- if (first == last)
+- return first;
+-
+- slots = ((last - first) >> PAGE_SHIFT) + 1;
+-
+- offset = get_random_int() % slots;
+-
+- addr = first + (offset << PAGE_SHIFT);
+-
+- return addr;
+-}
+-
+-static struct page *signal_page;
+-extern struct page *get_signal_page(void);
+-
+-static const struct vm_special_mapping sigpage_mapping = {
+- .name = "[sigpage]",
+- .pages = &signal_page,
+-};
+-
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma;
+- unsigned long npages;
+- unsigned long addr;
+- unsigned long hint;
+- int ret = 0;
+-
+- if (!signal_page)
+- signal_page = get_signal_page();
+- if (!signal_page)
+- return -ENOMEM;
+-
+- npages = 1; /* for sigpage */
+- npages += vdso_total_pages;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+- hint = sigpage_addr(mm, npages);
+- addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
+- if (IS_ERR_VALUE(addr)) {
+- ret = addr;
+- goto up_fail;
+- }
+-
+- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
+- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+- &sigpage_mapping);
+-
+- if (IS_ERR(vma)) {
+- ret = PTR_ERR(vma);
+- goto up_fail;
+- }
+-
+- mm->context.sigpage = addr;
+-
+- /* Unlike the sigpage, failure to install the vdso is unlikely
+- * to be fatal to the process, so no error check needed
+- * here.
+- */
+- arm_install_vdso(mm, addr + PAGE_SIZE);
+-
+- up_fail:
++ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
+ up_write(&mm->mmap_sem);
+- return ret;
++ return 0;
+ }
+ #endif
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ce131ed..26f9765 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
+ regs->ARM_ip = ip;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+ {
+ current_thread_info()->syscall = scno;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
+index 3fa867a..d610607 100644
+--- a/arch/arm/kernel/reboot.c
++++ b/arch/arm/kernel/reboot.c
+@@ -120,6 +120,7 @@ void machine_power_off(void)
+
+ if (pm_power_off)
+ pm_power_off();
++ while (1);
+ }
+
+ /*
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 34e3f3c..3d2dada 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -112,6 +112,8 @@ EXPORT_SYMBOL(elf_hwcap);
+ unsigned int elf_hwcap2 __read_mostly;
+ EXPORT_SYMBOL(elf_hwcap2);
+
++pteval_t __supported_pte_mask __read_only;
++pmdval_t __supported_pmd_mask __read_only;
+
+ #ifdef MULTI_CPU
+ struct processor processor __ro_after_init;
+@@ -257,9 +259,13 @@ static int __get_cpu_architecture(void)
+ * Register 0 and check for VMSAv7 or PMSAv7 */
+ unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
+ if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+- (mmfr0 & 0x000000f0) >= 0x00000030)
++ (mmfr0 & 0x000000f0) >= 0x00000030) {
+ cpu_arch = CPU_ARCH_ARMv7;
+- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
++ __supported_pte_mask |= L_PTE_PXN;
++ __supported_pmd_mask |= PMD_PXNTABLE;
++ }
++ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+ (mmfr0 & 0x000000f0) == 0x00000020)
+ cpu_arch = CPU_ARCH_ARMv6;
+ else
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 7b8f214..ece8e28 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -24,8 +24,6 @@
+
+ extern const unsigned long sigreturn_codes[7];
+
+-static unsigned long signal_return_offset;
+-
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -388,8 +386,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ * except when the MPU has protected the vectors
+ * page from PL0
+ */
+- retcode = mm->context.sigpage + signal_return_offset +
+- (idx << 2) + thumb;
++ retcode = mm->context.sigpage + (idx << 2) + thumb;
+ } else
+ #endif
+ {
+@@ -601,33 +598,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
+ }
+-
+-struct page *get_signal_page(void)
+-{
+- unsigned long ptr;
+- unsigned offset;
+- struct page *page;
+- void *addr;
+-
+- page = alloc_pages(GFP_KERNEL, 0);
+-
+- if (!page)
+- return NULL;
+-
+- addr = page_address(page);
+-
+- /* Give the signal return code some randomness */
+- offset = 0x200 + (get_random_int() & 0x7fc);
+- signal_return_offset = offset;
+-
+- /*
+- * Copy signal return handlers into the vector page, and
+- * set sigreturn to be a pointer to these.
+- */
+- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+-
+- ptr = (unsigned long)addr + offset;
+- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+-
+- return page;
+-}
+diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
+index b10e136..cb5edf9 100644
+--- a/arch/arm/kernel/tcm.c
++++ b/arch/arm/kernel/tcm.c
+@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
+ .virtual = ITCM_OFFSET,
+ .pfn = __phys_to_pfn(ITCM_OFFSET),
+ .length = 0,
+- .type = MT_MEMORY_RWX_ITCM,
++ .type = MT_MEMORY_RX_ITCM,
+ }
+ };
+
+@@ -362,7 +362,9 @@ void __init tcm_init(void)
+ start = &__sitcm_text;
+ end = &__eitcm_text;
+ ram = &__itcm_start;
++ pax_open_kernel();
+ memcpy(start, ram, itcm_code_sz);
++ pax_close_kernel();
+ pr_debug("CPU ITCM: copied code from %p - %p\n",
+ start, end);
+ itcm_present = true;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 9688ec0..dd072c0 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -287,6 +287,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
+
++extern void gr_handle_kernel_exploit(void);
++
+ static unsigned long oops_begin(void)
+ {
+ int cpu;
+@@ -329,6 +331,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (signr)
+ do_exit(signr);
+ }
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index f7f55df..49c9f9e 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -44,7 +44,8 @@
+ #endif
+
+ #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+- defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
++ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) || \
++ defined(CONFIG_PAX_REFCOUNT)
+ #define ARM_EXIT_KEEP(x) x
+ #define ARM_EXIT_DISCARD(x)
+ #else
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 19b5f5c..9aa8e58 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors;
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+
+ /* The VMID used in the VTTBR */
+-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static unsigned int kvm_vmid_bits __read_mostly;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+@@ -423,7 +423,7 @@ void force_vm_exit(const cpumask_t *mask)
+ */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
+ }
+
+ /**
+@@ -456,7 +456,7 @@ static void update_vttbr(struct kvm *kvm)
+
+ /* First user of a new VMID generation? */
+ if (unlikely(kvm_next_vmid == 0)) {
+- atomic64_inc(&kvm_vmid_gen);
++ atomic64_inc_unchecked(&kvm_vmid_gen);
+ kvm_next_vmid = 1;
+
+ /*
+@@ -473,7 +473,7 @@ static void update_vttbr(struct kvm *kvm)
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
++ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+ kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
+index 1712f13..a3165dc 100644
+--- a/arch/arm/lib/csumpartialcopyuser.S
++++ b/arch/arm/lib/csumpartialcopyuser.S
+@@ -71,8 +71,8 @@
+ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ */
+
+-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
+-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
+
+ #include "csumpartialcopygeneric.S"
+
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 6bd1089..e999400 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -84,7 +84,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
+ return 1;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(3)
+ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ {
+ unsigned long ua_flags;
+@@ -157,7 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
+ return n;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(2)
+ __clear_user_memset(void __user *addr, unsigned long n)
+ {
+ unsigned long ua_flags;
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index 06332f6..1fa0c71 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -724,8 +724,10 @@ void __init exynos_pm_init(void)
+ tmp |= pm_data->wake_disable_mask;
+ pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
+
+- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
+- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
++ pax_open_kernel();
++ const_cast(exynos_pm_syscore_ops.suspend) = pm_data->pm_suspend;
++ const_cast(exynos_pm_syscore_ops.resume) = pm_data->pm_resume;
++ pax_close_kernel();
+
+ register_syscore_ops(&exynos_pm_syscore_ops);
+ suspend_set_ops(&exynos_suspend_ops);
+diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
+index afba546..9e5403d 100644
+--- a/arch/arm/mach-mmp/mmp2.c
++++ b/arch/arm/mach-mmp/mmp2.c
+@@ -98,7 +98,9 @@ void __init mmp2_init_irq(void)
+ {
+ mmp2_init_icu();
+ #ifdef CONFIG_PM
+- icu_irq_chip.irq_set_wake = mmp2_set_wake;
++ pax_open_kernel();
++ const_cast(icu_irq_chip.irq_set_wake) = mmp2_set_wake;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
+index 1ccbba9..7a95c29 100644
+--- a/arch/arm/mach-mmp/pxa910.c
++++ b/arch/arm/mach-mmp/pxa910.c
+@@ -84,7 +84,9 @@ void __init pxa910_init_irq(void)
+ {
+ icu_init_irq();
+ #ifdef CONFIG_PM
+- icu_irq_chip.irq_set_wake = pxa910_set_wake;
++ pax_open_kernel();
++ const_cast(icu_irq_chip.irq_set_wake) = pxa910_set_wake;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index ae2a018..297ad08 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -156,7 +156,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
+
+ /*
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+- * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
+ */
+@@ -164,7 +164,7 @@ static void __iomem *
+ armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+ {
+- mtype = MT_UNCACHED;
++ mtype = MT_UNCACHED_RW;
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+
+@@ -174,7 +174,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
+
+ coherency_cpu_base = of_iomap(np, 0);
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+- pci_ioremap_set_mem_type(MT_UNCACHED);
++ pci_ioremap_set_mem_type(MT_UNCACHED_RW);
+
+ /*
+ * We should switch the PL310 to I/O coherency mode only if
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index f39bd51..866c780 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -93,7 +93,7 @@
+ #define ARMADA_370_CRYPT0_ENG_ATTR 0x1
+
+ extern void ll_disable_coherency(void);
+-extern void ll_enable_coherency(void);
++extern int ll_enable_coherency(void);
+
+ extern void armada_370_xp_cpu_resume(void);
+ extern void armada_38x_cpu_resume(void);
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 6b6fda6..232df1e 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+
+-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+ .late_init = n8x0_menelaus_late_init,
+ };
+
+diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+index 7d62ad4..97774b1 100644
+--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+@@ -89,7 +89,7 @@ struct cpu_pm_ops {
+ void (*resume)(void);
+ void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
+ void (*hotplug_restart)(void);
+-};
++} __no_const;
+
+ static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+ static struct powerdomain *mpuss_pd;
+@@ -107,7 +107,7 @@ static void dummy_cpu_resume(void)
+ static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
+ {}
+
+-static struct cpu_pm_ops omap_pm_ops = {
++static struct cpu_pm_ops omap_pm_ops __read_only = {
+ .finish_suspend = default_finish_suspend,
+ .resume = dummy_cpu_resume,
+ .scu_prepare = dummy_scu_prepare,
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index b4de3da..e027393 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/device.h>
+ #include <linux/smp.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+
+ #include <asm/smp_scu.h>
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index e920dd8..ef999171 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -530,7 +530,7 @@ void omap_device_delete(struct omap_device *od)
+ struct platform_device __init *omap_device_build(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod *oh,
+- void *pdata, int pdata_len)
++ const void *pdata, int pdata_len)
+ {
+ struct omap_hwmod *ohs[] = { oh };
+
+@@ -558,7 +558,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+ struct platform_device __init *omap_device_build_ss(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod **ohs,
+- int oh_cnt, void *pdata,
++ int oh_cnt, const void *pdata,
+ int pdata_len)
+ {
+ int ret = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
+index 78c02b3..c94109a 100644
+--- a/arch/arm/mach-omap2/omap_device.h
++++ b/arch/arm/mach-omap2/omap_device.h
+@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
+ /* Core code interface */
+
+ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
+- struct omap_hwmod *oh, void *pdata,
++ struct omap_hwmod *oh, const void *pdata,
+ int pdata_len);
+
+ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+ struct omap_hwmod **oh, int oh_cnt,
+- void *pdata, int pdata_len);
++ const void *pdata, int pdata_len);
+
+ struct omap_device *omap_device_alloc(struct platform_device *pdev,
+ struct omap_hwmod **ohs, int oh_cnt);
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 1052b29..54669b0 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -206,10 +206,10 @@ struct omap_hwmod_soc_ops {
+ void (*update_context_lost)(struct omap_hwmod *oh);
+ int (*get_context_lost)(struct omap_hwmod *oh);
+ int (*disable_direct_prcm)(struct omap_hwmod *oh);
+-};
++} __no_const;
+
+ /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+-static struct omap_hwmod_soc_ops soc_ops;
++static struct omap_hwmod_soc_ops soc_ops __read_only;
+
+ /* omap_hwmod_list contains all registered struct omap_hwmods */
+ static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
+index 95fee54..b5dd79d 100644
+--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
++++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
+@@ -10,6 +10,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <asm/pgtable.h>
+
+ #include "powerdomain.h"
+
+@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
+
+ void __init am43xx_powerdomains_init(void)
+ {
+- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
++ pax_open_kernel();
++ const_cast(omap4_pwrdm_operations.pwrdm_has_voltdm) = am43xx_check_vcvp;
++ pax_close_kernel();
+ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+ pwrdm_register_pwrdms(powerdomains_am43xx);
+ pwrdm_complete_init();
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index ff0a68c..b312aa0 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+ struct omap_hwmod *oh;
+ char *oh_name = "wd_timer2";
+ char *dev_name = "omap_wdt";
+- struct omap_wd_timer_platform_data pdata;
++ static struct omap_wd_timer_platform_data pdata = {
++ .read_reset_sources = prm_read_reset_sources
++ };
+
+ if (!cpu_class_is_omap2() || of_have_populated_dt())
+ return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+ return -EINVAL;
+ }
+
+- pdata.read_reset_sources = prm_read_reset_sources;
+-
+ pdev = omap_device_build(dev_name, id, oh, &pdata,
+ sizeof(struct omap_wd_timer_platform_data));
+ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+index 92ec8c3..3b09472 100644
+--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
++++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+@@ -240,7 +240,7 @@ static struct platform_device smdk6410_b_pwr_5v = {
+ };
+ #endif
+
+-static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = {
++static const struct s3c_ide_platdata smdk6410_ide_pdata __initconst = {
+ .setup_gpio = s3c64xx_ide_setup_gpio,
+ };
+
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index 0c6bb45..0f18d70 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -22,6 +22,7 @@
+ #include <asm/proc-fns.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+ #include "platsmp-apmu.h"
+ #include "rcar-gen2.h"
+@@ -316,6 +317,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+
+ void __init shmobile_smp_apmu_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_open_kernel();
++ const_cast(shmobile_suspend_ops.enter) = shmobile_smp_apmu_enter_suspend;
++ pax_close_kernel();
+ }
+ #endif
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index afcee04..63e52ac 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+ bool entered_lp2 = false;
+
+ if (tegra_pending_sgi())
+- ACCESS_ONCE(abort_flag) = true;
++ ACCESS_ONCE_RW(abort_flag) = true;
+
+ cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+
+diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
+index a69b22d..8523a03 100644
+--- a/arch/arm/mach-tegra/irq.c
++++ b/arch/arm/mach-tegra/irq.c
+@@ -20,6 +20,7 @@
+ #include <linux/cpu_pm.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/irq.h>
+ #include <linux/kernel.h>
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index a970e7f..6f2bf9a 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index 7cd9865..a00b6ab 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -24,6 +24,7 @@
+ #include <linux/io.h>
+ #include <asm/cacheflush.h>
+ #include <asm/smp_scu.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include "common.h"
+
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index c1799dd..9111dcc 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -446,6 +446,7 @@ config CPU_32v5
+
+ config CPU_32v6
+ bool
++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+
+ config CPU_32v6K
+@@ -603,6 +604,7 @@ config CPU_CP15_MPU
+
+ config CPU_USE_DOMAINS
+ bool
++ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+@@ -813,7 +815,7 @@ config NEED_KUSER_HELPERS
+
+ config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+- depends on MMU
++ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
+ default y
+ help
+ Warning: disabling this option may break user programs.
+@@ -827,7 +829,7 @@ config KUSER_HELPERS
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+- by ROP (return orientated programming) authors when creating
++ by ROP (Return Oriented Programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+@@ -842,7 +844,7 @@ config KUSER_HELPERS
+
+ config VDSO
+ bool "Enable VDSO for acceleration of some system calls"
+- depends on AEABI && MMU && CPU_V7
++ depends on AEABI && MMU && CPU_V7 && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ default y if ARM_ARCH_TIMER
+ select GENERIC_TIME_VSYSCALL
+ help
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 7d5f4c7..c6a0816 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -778,6 +778,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
++ bool is_user_mode = user_mode(regs);
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+@@ -786,14 +787,24 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ if (thumb_mode(regs)) {
+ u16 *ptr = (u16 *)(instrptr & ~1);
+- fault = probe_kernel_address(ptr, tinstr);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address(ptr, tinstr);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+- fault = probe_kernel_address(ptr + 1, tinst2);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address(ptr + 1, tinst2);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address(ptr + 1, tinst2);
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
+ thumb2_32b = 1;
+@@ -803,7 +814,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ }
+ } else {
+- fault = probe_kernel_address((void *)instrptr, instr);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address((void *)instrptr, instr);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address((void *)instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
+
+@@ -812,7 +828,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ goto bad_or_fault;
+ }
+
+- if (user_mode(regs))
++ if (is_user_mode)
+ goto user;
+
+ ai_sys += 1;
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index d1870c7..36d500f 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -44,7 +44,7 @@ struct l2c_init_data {
+ void (*configure)(void __iomem *);
+ void (*unlock)(void __iomem *, unsigned);
+ struct outer_cache_fns outer_cache;
+-};
++} __do_const;
+
+ #define CACHE_LINE_SIZE 32
+
+diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
+index c8c8b9e..c55cc79 100644
+--- a/arch/arm/mm/context.c
++++ b/arch/arm/mm/context.c
+@@ -43,7 +43,7 @@
+ #define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+ static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+
+ static DEFINE_PER_CPU(atomic64_t, active_asids);
+@@ -193,7 +193,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ {
+ static u32 cur_idx = 1;
+ u64 asid = atomic64_read(&mm->context.id);
+- u64 generation = atomic64_read(&asid_generation);
++ u64 generation = atomic64_read_unchecked(&asid_generation);
+
+ if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+@@ -225,7 +225,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+ if (asid == NUM_USER_ASIDS) {
+- generation = atomic64_add_return(ASID_FIRST_VERSION,
++ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+@@ -254,14 +254,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+ cpu_set_reserved_ttbr0();
+
+ asid = atomic64_read(&mm->context.id);
+- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
++ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+ goto switch_mm_fastpath;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ /* Check that our ASID belongs to the current generation. */
+ asid = atomic64_read(&mm->context.id);
+- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
++ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 3a2e678..ebdbf80 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -25,6 +25,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/system_info.h>
+ #include <asm/tlbflush.h>
++#include <asm/sections.h>
+
+ #include "fault.h"
+
+@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ if (fixup_exception(regs))
+ return;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ }
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((fsr & FSR_WRITE) &&
++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
++ (MODULES_VADDR <= addr && addr < MODULES_END)))
++ {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ }
++#endif
++
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((tsk->mm->pax_flags & MF_PAX_PAGEEXEC) && (fsr & FSR_LNX_PF)) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ goto die;
++ }
++#endif
++
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+ show_pte(current->mm, addr);
+@@ -574,15 +647,118 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+ ifsr_info[nr].name = name;
+ }
+
++asmlinkage int sys_sigreturn(struct pt_regs *regs);
++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
++
+ asmlinkage void __exception
+ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ {
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
++ unsigned long pc = instruction_pointer(regs);
++
++ if (user_mode(regs)) {
++ unsigned long sigpage = current->mm->context.sigpage;
++
++ if (sigpage <= pc && pc < sigpage + 7*4) {
++ if (pc < sigpage + 3*4)
++ sys_sigreturn(regs);
++ else
++ sys_rt_sigreturn(regs);
++ return;
++ }
++ if (pc == 0xffff0f60UL) {
++ /*
++ * PaX: __kuser_cmpxchg64 emulation
++ */
++ // TODO
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fa0UL) {
++ /*
++ * PaX: __kuser_memory_barrier emulation
++ */
++ // dmb(); implied by the exception
++ regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++ if (regs->ARM_lr & 1) {
++ regs->ARM_cpsr |= PSR_T_BIT;
++ regs->ARM_pc &= ~0x1U;
++ } else
++ regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++ return;
++ }
++ if (pc == 0xffff0fc0UL) {
++ /*
++ * PaX: __kuser_cmpxchg emulation
++ */
++ // TODO
++ //long new;
++ //int op;
++
++ //op = FUTEX_OP_SET << 28;
++ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
++ //regs->ARM_r0 = old != new;
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fe0UL) {
++ /*
++ * PaX: __kuser_get_tls emulation
++ */
++ regs->ARM_r0 = current_thread_info()->tp_value[0];
++ regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++ if (regs->ARM_lr & 1) {
++ regs->ARM_cpsr |= PSR_T_BIT;
++ regs->ARM_pc &= ~0x1U;
++ } else
++ regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++ return;
++ }
++ }
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ goto die;
++ }
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++ unsigned short bkpt;
++
++ if (!probe_kernel_address((const unsigned short *)pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
++ unsigned int bkpt;
++
++ if (!probe_kernel_address((const unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_error(regs, NULL);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index 67532f2..10b646e 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -3,6 +3,7 @@
+
+ /*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
++ * Set when the FSR value is from an instruction fault.
+ */
+ #define FSR_LNX_PF (1 << 31)
+ #define FSR_WRITE (1 << 11)
+@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
+ }
+ #endif
+
++/* valid for LPAE and !LPAE */
++static inline int is_xn_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0x3c) == 0xc);
++}
++
++static inline int is_domain_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0xD) == 0x9);
++}
++
+ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ void early_abt_enable(void);
+
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 370581a..b985cc1 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -747,7 +747,46 @@ void free_tcmmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+ extern char __tcm_start, __tcm_end;
++#endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long addr;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int cpu_arch = cpu_architecture();
++ unsigned int cr = get_cr();
++
++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
++ /* make pages tables, etc before .text NX */
++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make init NX */
++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make kernel code/rodata RX */
++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_ARM_LPAE
++ __section_update(pmd, addr, PMD_SECT_RDONLY);
++#else
++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
++#endif
++ }
++ }
++#endif
++
++#ifdef CONFIG_HAVE_TCM
+ poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
+ free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
+ #endif
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index ff0eed2..f17f1c9 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -411,9 +411,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+ unsigned int mtype;
+
+ if (cached)
+- mtype = MT_MEMORY_RWX;
++ mtype = MT_MEMORY_RX;
+ else
+- mtype = MT_MEMORY_RWX_NONCACHED;
++ mtype = MT_MEMORY_RX_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 66353ca..8aad9f8 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -99,19 +103,21 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_align)
+@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ else
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -182,14 +192,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 4001dd1..c6dce7b 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -243,6 +243,14 @@ __setup("noalign", noalign_setup);
+ #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
+ #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define L_PTE_KERNEXEC L_PTE_RDONLY
++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
++#else
++#define L_PTE_KERNEXEC L_PTE_DIRTY
++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
++#endif
++
+ static struct mem_type mem_types[] __ro_after_init = {
+ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+@@ -272,19 +280,19 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PROT_SECT_DEVICE,
+ .domain = DOMAIN_IO,
+ },
+- [MT_UNCACHED] = {
++ [MT_UNCACHED_RW] = {
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_IO,
+ },
+- [MT_CACHECLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++ [MT_CACHECLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #ifndef CONFIG_ARM_LPAE
+- [MT_MINICLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
++ [MT_MINICLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #endif
+@@ -300,7 +308,7 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_VECTORS,
+ },
+- [MT_MEMORY_RWX] = {
++ [__MT_MEMORY_RWX] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+@@ -313,17 +321,30 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_ROM] = {
+- .prot_sect = PMD_TYPE_SECT,
++ [MT_MEMORY_RX] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
++ [MT_ROM_RX] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_NONCACHED] = {
++ [MT_MEMORY_RW_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
++ [MT_MEMORY_RX_NONCACHED] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
++ L_PTE_MT_BUFFERABLE,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
+ [MT_MEMORY_RW_DTCM] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
+@@ -331,9 +352,10 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_ITCM] = {
+- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++ [MT_MEMORY_RX_ITCM] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RW_SO] = {
+@@ -586,9 +608,14 @@ static void __init build_mem_type_table(void)
+ * Mark cache clean areas and XIP ROM read only
+ * from SVC mode and no access from userspace.
+ */
+- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#ifdef CONFIG_PAX_KERNEXEC
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#endif
++ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ #endif
+
+ /*
+@@ -605,13 +632,17 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+ }
+
+@@ -622,15 +653,20 @@ static void __init build_mem_type_table(void)
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_BUFFERED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_TEX(1);
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -651,6 +687,8 @@ static void __init build_mem_type_table(void)
+ user_pgprot |= PTE_EXT_PXN;
+ #endif
+
++ user_pgprot |= __supported_pte_mask;
++
+ for (i = 0; i < 16; i++) {
+ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+@@ -668,21 +706,24 @@ static void __init build_mem_type_table(void)
+
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
+- mem_types[MT_ROM].prot_sect |= cp->pmd;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
+
+ switch (cp->pmd) {
+ case PMD_SECT_WT:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
+ break;
+ case PMD_SECT_WB:
+ case PMD_SECT_WBWA:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
+ break;
+ }
+ pr_info("Memory policy: %sData cache %s\n",
+@@ -959,7 +1000,7 @@ static void __init create_mapping(struct map_desc *md)
+ return;
+ }
+
+- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
++ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+@@ -1320,18 +1361,15 @@ void __init arm_mm_memblock_reserve(void)
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
+ */
++
++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
++
+ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ {
+ struct map_desc map;
+ unsigned long addr;
+- void *vectors;
+
+- /*
+- * Allocate the vector page early.
+- */
+- vectors = early_alloc(PAGE_SIZE * 2);
+-
+- early_trap_init(vectors);
++ early_trap_init(&vectors);
+
+ /*
+ * Clear page table except top pmd used by early fixmaps
+@@ -1347,7 +1385,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+ map.virtual = MODULES_VADDR;
+ map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+- map.type = MT_ROM;
++ map.type = MT_ROM_RX;
+ create_mapping(&map);
+ #endif
+
+@@ -1358,14 +1396,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+ map.virtual = FLUSH_BASE;
+ map.length = SZ_1M;
+- map.type = MT_CACHECLEAN;
++ map.type = MT_CACHECLEAN_RO;
+ create_mapping(&map);
+ #endif
+ #ifdef FLUSH_BASE_MINICACHE
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
+ map.virtual = FLUSH_BASE_MINICACHE;
+ map.length = SZ_1M;
+- map.type = MT_MINICLEAN;
++ map.type = MT_MINICLEAN_RO;
+ create_mapping(&map);
+ #endif
+
+@@ -1374,7 +1412,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
+ */
+- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
+ map.virtual = 0xffff0000;
+ map.length = PAGE_SIZE;
+ #ifdef CONFIG_KUSER_HELPERS
+@@ -1437,12 +1475,14 @@ static void __init kmap_init(void)
+ static void __init map_lowmem(void)
+ {
+ struct memblock_region *reg;
++#ifndef CONFIG_PAX_KERNEXEC
+ #ifdef CONFIG_XIP_KERNEL
+ phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
+ #else
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ #endif
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++#endif
+
+ /* Map all the lowmem memory banks. */
+ for_each_memblock(memory, reg) {
+@@ -1458,11 +1498,48 @@ static void __init map_lowmem(void)
+ if (start >= end)
+ break;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ map.pfn = __phys_to_pfn(start);
++ map.virtual = __phys_to_virt(start);
++ map.length = end - start;
++
++ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
++ struct map_desc kernel;
++ struct map_desc initmap;
++
++ /* when freeing initmem we will make this RW */
++ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
++ initmap.virtual = (unsigned long)__init_begin;
++ initmap.length = _sdata - __init_begin;
++ initmap.type = __MT_MEMORY_RWX;
++ create_mapping(&initmap);
++
++ /* when freeing initmem we will make this RX */
++ kernel.pfn = __phys_to_pfn(__pa(_stext));
++ kernel.virtual = (unsigned long)_stext;
++ kernel.length = __init_begin - _stext;
++ kernel.type = __MT_MEMORY_RWX;
++ create_mapping(&kernel);
++
++ if (map.virtual < (unsigned long)_stext) {
++ map.length = (unsigned long)_stext - map.virtual;
++ map.type = __MT_MEMORY_RWX;
++ create_mapping(&map);
++ }
++
++ map.pfn = __phys_to_pfn(__pa(_sdata));
++ map.virtual = (unsigned long)_sdata;
++ map.length = end - __pa(_sdata);
++ }
++
++ map.type = MT_MEMORY_RW;
++ create_mapping(&map);
++#else
+ if (end < kernel_x_start) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+@@ -1486,7 +1563,7 @@ static void __init map_lowmem(void)
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+
+@@ -1499,6 +1576,7 @@ static void __init map_lowmem(void)
+ create_mapping(&map);
+ }
+ }
++#endif
+ }
+ }
+
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 93d0b6d..2db6d99 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -20,6 +20,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/hwcap.h>
+ #include <asm/opcodes.h>
++#include <asm/pgtable.h>
+
+ #include "bpf_jit_32.h"
+
+@@ -72,54 +73,38 @@ struct jit_ctx {
+ #endif
+ };
+
++#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
++int bpf_jit_enable __read_only;
++#else
+ int bpf_jit_enable __read_mostly;
++#endif
+
+-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+- unsigned int size)
+-{
+- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+-
+- if (!ptr)
+- return -EFAULT;
+- memcpy(ret, ptr, size);
+- return 0;
+-}
+-
+-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+ {
+ u8 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 1);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+ {
+ u16 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 2);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+ {
+ u32 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 4);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -191,8 +176,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ {
+ u32 *ptr;
+ /* We are guaranteed to have aligned memory. */
++ pax_open_kernel();
+ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+ *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
++ pax_close_kernel();
+ }
+
+ static void build_prologue(struct jit_ctx *ctx)
+@@ -554,6 +541,9 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_LD | BPF_B | BPF_ABS:
+ load_order = 0;
+ load:
++ /* the interpreter will deal with the negative K */
++ if ((int)k < 0)
++ return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -568,18 +558,6 @@ static int build_body(struct jit_ctx *ctx)
+ condt = ARM_COND_HI;
+ }
+
+- /*
+- * test for negative offset, only if we are
+- * currently scheduled to take the fast
+- * path. this will update the flags so that
+- * the slowpath instruction are ignored if the
+- * offset is negative.
+- *
+- * for loard_order == 0 the HI condition will
+- * make loads at offset 0 take the slow path too.
+- */
+- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+-
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
+diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
+index 8151bde..9be301f 100644
+--- a/arch/arm/plat-iop/setup.c
++++ b/arch/arm/plat-iop/setup.c
+@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
+ .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
+ .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
+ .length = IOP3XX_PERIPHERAL_SIZE,
+- .type = MT_UNCACHED,
++ .type = MT_UNCACHED_RW,
+ },
+ };
+
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index a5bc92d..0bb4730 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
+ * Looks like we need to preserve some bootloader code at the
+ * beginning of SRAM for jumping to flash for reboot to work...
+ */
++ pax_open_kernel();
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
++ pax_close_kernel();
+ }
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index a4ec240..96faf9b 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -485,6 +485,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -493,6 +494,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ {
+@@ -605,10 +607,12 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ return 0;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ #ifdef CONFIG_THUMB2_KERNEL
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 969ef88..305b856 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -896,6 +896,7 @@ config RELOCATABLE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
++ depends on BROKEN_SECURITY
+ select ARM64_MODULE_PLTS if MODULES
+ select RELOCATABLE
+ help
+diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
+index b661fe7..6d124fc 100644
+--- a/arch/arm64/Kconfig.debug
++++ b/arch/arm64/Kconfig.debug
+@@ -6,6 +6,7 @@ config ARM64_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index aefda98..2937874 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -29,7 +29,7 @@ struct sha1_ce_state {
+ u32 finalize;
+ };
+
+-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
++asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks);
+
+ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+@@ -39,8 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+ sctx->finalize = finalize;
+
+ kernel_neon_begin_partial(16);
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ if (!finalize)
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_finalize(desc, sha1_ce_transform);
+ kernel_neon_end();
+ return sha1_base_finish(desc, out);
+ }
+@@ -78,7 +76,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
+
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_finalize(desc, sha1_ce_transform);
+ kernel_neon_end();
+ return sha1_base_finish(desc, out);
+ }
+diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
+index c0235e0..86eb684 100644
+--- a/arch/arm64/include/asm/atomic.h
++++ b/arch/arm64/include/asm/atomic.h
+@@ -57,11 +57,13 @@
+ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_relaxed
+ #define atomic_add_return_acquire atomic_add_return_acquire
+ #define atomic_add_return_release atomic_add_return_release
+ #define atomic_add_return atomic_add_return
+
+ #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v))
+ #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+ #define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
+@@ -128,6 +130,8 @@
+ #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
+ #define atomic_andnot atomic_andnot
+
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v))
++
+ /*
+ * 64-bit atomic operations.
+ */
+@@ -206,5 +210,16 @@
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n))
++
+ #endif
+ #endif
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5082b30..9ef38c2 100644
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -16,10 +16,14 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ #include <asm/cachetype.h>
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 7
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 5394c84..05e5a95 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -123,16 +123,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
++ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
+ break;
+ case 2:
+- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
++ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
+ break;
+ case 4:
+- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
++ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
+ break;
+ case 8:
+- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
++ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
+ break;
+ default:
+ BUILD_BUG();
+diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
+index d25f4f1..61d52da 100644
+--- a/arch/arm64/include/asm/pgalloc.h
++++ b/arch/arm64/include/asm/pgalloc.h
+@@ -51,6 +51,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #else
+ static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+ {
+@@ -80,6 +85,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ {
+ __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+ }
++
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ pgd_populate(mm, pgd, pud);
++}
+ #else
+ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+ {
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index ffbb9a5..d8b49ff 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -23,6 +23,9 @@
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/pgtable-prot.h>
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * VMALLOC range.
+ *
+@@ -728,6 +731,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
+ #define kc_offset_to_vaddr(o) ((o) | VA_START)
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* __ASM_PGTABLE_H */
+diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
+index 2eb714c..3a10471 100644
+--- a/arch/arm64/include/asm/string.h
++++ b/arch/arm64/include/asm/string.h
+@@ -17,40 +17,40 @@
+ #define __ASM_STRING_H
+
+ #define __HAVE_ARCH_STRRCHR
+-extern char *strrchr(const char *, int c);
++extern char *strrchr(const char *, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCHR
+-extern char *strchr(const char *, int c);
++extern char *strchr(const char *, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCMP
+-extern int strcmp(const char *, const char *);
++extern int strcmp(const char *, const char *) __nocapture();
+
+ #define __HAVE_ARCH_STRNCMP
+-extern int strncmp(const char *, const char *, __kernel_size_t);
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
+
+ #define __HAVE_ARCH_STRLEN
+-extern __kernel_size_t strlen(const char *);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
+
+ #define __HAVE_ARCH_STRNLEN
+-extern __kernel_size_t strnlen(const char *, __kernel_size_t);
++extern __kernel_size_t strnlen(const char *, __kernel_size_t) __nocapture(1);
+
+ #define __HAVE_ARCH_MEMCPY
+-extern void *memcpy(void *, const void *, __kernel_size_t);
+-extern void *__memcpy(void *, const void *, __kernel_size_t);
++extern void *memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMMOVE
+-extern void *memmove(void *, const void *, __kernel_size_t);
+-extern void *__memmove(void *, const void *, __kernel_size_t);
++extern void *memmove(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMCHR
+-extern void *memchr(const void *, int, __kernel_size_t);
++extern void *memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, __kernel_size_t);
+ extern void *__memset(void *, int, __kernel_size_t);
+
+ #define __HAVE_ARCH_MEMCMP
+-extern int memcmp(const void *, const void *, size_t);
++extern int memcmp(const void *, const void *, size_t) __nocapture(1, 2);
+
+
+ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index 55d0adb..b986918 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -110,6 +110,7 @@ static inline void set_fs(mm_segment_t fs)
+ */
+ #define untagged_addr(addr) sign_extend64(addr, 55)
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __range_ok(addr, size)
+ #define user_addr_max get_fs
+
+@@ -279,6 +280,9 @@ static inline unsigned long __must_check __copy_from_user(void *to, const void _
+
+ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
+@@ -287,6 +291,10 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned long res = n;
++
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_write(to, n);
+
+ if (access_ok(VERIFY_READ, from, n)) {
+@@ -300,6 +308,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_read(from, n);
+
+ if (access_ok(VERIFY_WRITE, to, n)) {
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index d55a7b0..d8dbd8a 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -198,7 +198,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
+ static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr,
+- void *(*allocator)(gfp_t mask),
++ unsigned long (*allocator)(gfp_t mask),
+ gfp_t mask)
+ {
+ int rc = 0;
+@@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+- unsigned long dst = (unsigned long)allocator(mask);
++ unsigned long dst = allocator(mask);
+
+ if (!dst) {
+ rc = -ENOMEM;
+@@ -216,9 +216,9 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ memcpy((void *)dst, src_start, length);
+ flush_icache_range(dst, dst + length);
+
+- pgd = pgd_offset_raw(allocator(mask), dst_addr);
++ pgd = pgd_offset_raw((pgd_t *)allocator(mask), dst_addr);
+ if (pgd_none(*pgd)) {
+- pud = allocator(mask);
++ pud = (pud_t *)allocator(mask);
+ if (!pud) {
+ rc = -ENOMEM;
+ goto out;
+@@ -228,7 +228,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+
+ pud = pud_offset(pgd, dst_addr);
+ if (pud_none(*pud)) {
+- pmd = allocator(mask);
++ pmd = (pmd_t *)allocator(mask);
+ if (!pmd) {
+ rc = -ENOMEM;
+ goto out;
+@@ -238,7 +238,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+
+ pmd = pmd_offset(pud, dst_addr);
+ if (pmd_none(*pmd)) {
+- pte = allocator(mask);
++ pte = (pte_t *)allocator(mask);
+ if (!pte) {
+ rc = -ENOMEM;
+ goto out;
+@@ -510,7 +510,7 @@ int swsusp_arch_resume(void)
+ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+ (unsigned long)hibernate_exit,
+ &phys_hibernate_exit,
+- (void *)get_safe_page, GFP_ATOMIC);
++ get_safe_page, GFP_ATOMIC);
+ if (rc) {
+ pr_err("Failed to create safe executable page for hibernate_exit code.");
+ goto out;
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index f5077ea..46b4664 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -639,6 +639,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -652,6 +653,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ int __init arch_init_kprobes(void)
+ {
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 01753cd..b65d17a 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -64,7 +64,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
+ /*
+ * Function pointers to optional machine specific functions
+ */
+-void (*pm_power_off)(void);
++void (* pm_power_off)(void);
+ EXPORT_SYMBOL_GPL(pm_power_off);
+
+ void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+@@ -110,7 +110,7 @@ void machine_shutdown(void)
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
+-void machine_halt(void)
++void __noreturn machine_halt(void)
+ {
+ local_irq_disable();
+ smp_send_stop();
+@@ -123,12 +123,13 @@ void machine_halt(void)
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
+-void machine_power_off(void)
++void __noreturn machine_power_off(void)
+ {
+ local_irq_disable();
+ smp_send_stop();
+ if (pm_power_off)
+ pm_power_off();
++ while(1);
+ }
+
+ /*
+@@ -140,7 +141,7 @@ void machine_power_off(void)
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
+-void machine_restart(char *cmd)
++void __noreturn machine_restart(char *cmd)
+ {
+ /* Disable interrupts first */
+ local_irq_disable();
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index c2efddf..c58e0a2 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -95,8 +95,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+ struct pt_regs *irq_args;
+ unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
+- if (object_is_on_stack((void *)orig_sp) &&
+- object_is_on_stack((void *)frame->fp)) {
++ if (object_starts_on_stack((void *)orig_sp) &&
++ object_starts_on_stack((void *)frame->fp)) {
+ frame->sp = orig_sp;
+
+ /* orig_sp is the saved pt_regs, find the elr */
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 11e5eae..d8cdfa7 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -547,7 +547,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+ __show_regs(regs);
+ }
+
+- return sys_ni_syscall();
++ return -ENOSYS;
+ }
+
+ static const char *esr_class_str[] = {
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 0388ece..87c8df1 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index 479330b..53717a8 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define __ASM_AVR32_KMAP_TYPES_H
+
+ #ifdef CONFIG_DEBUG_HIGHMEM
+-# define KM_TYPE_NR 29
++# define KM_TYPE_NR 30
+ #else
+-# define KM_TYPE_NR 14
++# define KM_TYPE_NR 15
+ #endif
+
+ #endif /* __ASM_AVR32_KMAP_TYPES_H */
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index b3977e9..4230c51a 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -178,6 +195,16 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
+index f3337ee..15b6f8d 100644
+--- a/arch/blackfin/Kconfig.debug
++++ b/arch/blackfin/Kconfig.debug
+@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
+ config DEBUG_MMRS
+ tristate "Generate Blackfin MMR tree"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Create a tree of Blackfin MMRs via the debugfs tree. If
+ you enable this, you will find all MMRs laid out in the
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
+ #include <linux/linkage.h> /* for asmlinkage */
+
+ /*
+@@ -14,7 +15,7 @@
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 7caf25d..ee65ac5 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 1c2a5e2..2579e5f 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -146,6 +146,16 @@ static inline void atomic64_dec(atomic64_t *v)
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401df9 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index 43901f2..0d8b865 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -2,6 +2,6 @@
+ #ifndef _ASM_KMAP_TYPES_H
+ #define _ASM_KMAP_TYPES_H
+
+-#define KM_TYPE_NR 17
++#define KM_TYPE_NR 18
+
+ #endif
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 836f1470..4cf23f5 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ goto success;
+ }
+
+@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ info.high_limit = (current->mm->start_stack - 0x00200000);
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto success;
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 69952c18..4fa2908 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 18ca6a9..77b0e0d 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -519,6 +519,7 @@ config KEXEC
+ bool "kexec system call"
+ depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
+index c100d78..c44d46d 100644
+--- a/arch/ia64/Makefile
++++ b/arch/ia64/Makefile
+@@ -98,5 +98,6 @@ endef
+ archprepare: make_nr_irqs_h
+ PHONY += make_nr_irqs_h
+
++GCC_PLUGINS_make_nr_irqs_h := n
+ make_nr_irqs_h:
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 65d4bb2..8b2e661 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -323,4 +323,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_IA64_ATOMIC_H */
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 5a83c5c..4d7f553 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index f5e70e9..624fad5 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 9f3ed9e..c99b418 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -139,6 +139,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index ca9e761..40dffaf 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -73,7 +73,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index bfe1319..da0014b 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -70,6 +70,7 @@
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+ /*
+@@ -241,17 +242,23 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
+ check_object_size(from, count, true);
+
+- return __copy_user(to, (__force void __user *) from, count);
++ return __copy_user(to, (void __force_user *) from, count);
+ }
+
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
+ check_object_size(to, count, false);
+
+- return __copy_user((__force void __user *) to, from, count);
++ return __copy_user((void __force_user *) to, from, count);
+ }
+
+ #define __copy_to_user_inatomic __copy_to_user
+@@ -260,11 +267,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) { \
+- check_object_size(__cu_from, __cu_len, true); \
+- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
++ check_object_size(__cu_from, __cu_len, true); \
++ __cu_len = __copy_user(__cu_to, (void __force_user *) __cu_from, __cu_len); \
+ } \
+ __cu_len; \
+ })
+@@ -272,10 +279,10 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- check_object_size(to, n, false);
+- if (likely(__access_ok(from, n, get_fs())))
+- n = __copy_user((__force void __user *) to, from, n);
+- else
++ if (likely(__access_ok(from, n, get_fs()))) {
++ check_object_size(to, n, false);
++ n = __copy_user((void __force_user *) to, from, n);
++ } else if ((long)n > 0)
+ memset(to, 0, n);
+ return n;
+ }
+diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
+index c7c5144..7e31461 100644
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -507,6 +508,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
+ }
++#endif
+
+ /* Check the instruction in the slot is break */
+ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
+@@ -1119,6 +1121,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr ==
+@@ -1127,3 +1130,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 6ab0ae7..88f1b60 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
++ return within_module_init(addr, mod);
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
++ return within_module_core(addr, mod);
+ }
+
+ static inline int
+@@ -676,6 +676,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+
+ case RV_BDREL:
+ val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
++ if (within_module_rx(val, &mod->init_layout))
++ val -= mod->init_layout.base_rx;
++ else if (within_module_rw(val, &mod->init_layout))
++ val -= mod->init_layout.base_rw;
++ else if (within_module_rx(val, &mod->core_layout))
++ val -= mod->core_layout.base_rx;
++ else if (within_module_rw(val, &mod->core_layout))
++ val -= mod->core_layout.base_rw;
+ break;
+
+ case RV_LTV:
+@@ -810,15 +818,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_layout.size > MAX_LTOFF)
++ if (mod->core_layout.size_rx + mod->core_layout.size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_layout.size - MAX_LTOFF / 2;
++ gp = mod->core_layout.size_rx + mod->core_layout.size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_layout.size / 2;
+- gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
++ gp = (mod->core_layout.size_rx + mod->core_layout.size_rw) / 2;
++ gp = (uint64_t) mod->core_layout.base_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index c39c3cd..3c77738 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+ .notifier_call = palinfo_cpu_callback,
+ .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 41e33f8..65180b2a 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ unsigned long align_mask = 0;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index f89d20c..410a1b1 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -172,7 +172,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index fa6ad95..b46bd89 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ # define VM_READ_BIT 0
+ # define VM_WRITE_BIT 1
+ # define VM_EXEC_BIT 2
+@@ -151,8 +168,21 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+ if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+ goto bad_area;
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 85de86d..db7f6b8 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 1841ef6..74d8330 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+
+ return 0;
+ }
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee98..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
+index 4e5aa2f..172c469 100644
+--- a/arch/m68k/kernel/time.c
++++ b/arch/m68k/kernel/time.c
+@@ -107,6 +107,7 @@ static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+
+ switch (cmd) {
+ case RTC_PLL_GET:
++ memset(&pll, 0, sizeof(pll));
+ if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
+ return -EINVAL;
+ return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index db1b7da..8e13684 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & HUGEPT_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
+index 5c3f688..f8cc1b3 100644
+--- a/arch/mips/Kbuild
++++ b/arch/mips/Kbuild
+@@ -1,7 +1,7 @@
+ # Fail on warnings - also for files referenced in subdirs
+ # -Werror can be disabled for specific files using:
+ # CFLAGS_<file.o> := -Wno-error
+-subdir-ccflags-y := -Werror
++# subdir-ccflags-y := -Werror
+
+ # platform specific definitions
+ include arch/mips/Kbuild.platforms
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index b3c5bde..d6b5104 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -49,6 +49,7 @@ config MIPS
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
+ select VIRT_TO_BUS
++ select HAVE_GCC_PLUGINS
+ select MODULES_USE_ELF_REL if MODULES
+ select MODULES_USE_ELF_RELA if MODULES && 64BIT
+ select CLONE_BACKWARDS
+@@ -2595,7 +2596,7 @@ config RELOCATION_TABLE_SIZE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+- depends on RELOCATABLE
++ depends on RELOCATABLE && BROKEN_SECURITY
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+@@ -2811,6 +2812,7 @@ source "kernel/Kconfig.preempt"
+ config KEXEC
+ bool "Kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 0ab176b..c4469a4 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -22,15 +22,39 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
++#ifdef CONFIG_64BIT
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .dword " #from ", " #to"\n" \
++" .previous\n"
++#else
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .word " #from ", " #to"\n" \
++" .previous\n"
++#endif
++
+ /*
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+-#define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read(const atomic_t *v)
++{
++ return READ_ONCE(v->counter);
++}
++
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ /*
+ * atomic_set - set atomic variable
+@@ -39,47 +63,77 @@
+ *
+ * Atomically sets the value of @v to @i.
+ */
+-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
++static inline void atomic_set(atomic_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static __inline__ void atomic_##op(int i, atomic_t * v) \
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " b 4f \n" \
++ " .set noreorder \n" \
++ "3: b 5f \n" \
++ " move %0, %1 \n" \
++ " .set reorder \n"
++#define __OVERFLOW_EXTABLE \
++ "3:\n" \
++ _ASM_EXTABLE(2b, 3b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, extable) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %0, %1 # atomic_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set mips3 \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %0, %1 # atomic_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " sc %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " sc %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+r" (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t * v) \
+ { \
+ int result; \
+ \
+@@ -87,12 +141,15 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ " .set mips3 \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -100,32 +157,40 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " sc %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!result)); \
+- \
+- result = temp; result c_op i; \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " sc %0, %2 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " lw %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sw %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ return result; \
+ }
+
++#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ { \
+@@ -173,13 +238,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC_OPS(op, c_op, asm_op) \
+- ATOMIC_OP(op, c_op, asm_op) \
+- ATOMIC_OP_RETURN(op, c_op, asm_op) \
+- ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op) \
++ ATOMIC_OP(op, asm_op) \
++ ATOMIC_OP_RETURN(op, asm_op) \
++ ATOMIC_FETCH_OP(op, asm_op)
+
+-ATOMIC_OPS(add, +=, addu)
+-ATOMIC_OPS(sub, -=, subu)
++ATOMIC_OPS(add, addu)
++ATOMIC_OPS(sub, subu)
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+@@ -187,13 +252,13 @@ ATOMIC_OPS(sub, -=, subu)
+ #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+ #undef ATOMIC_OPS
+-#define ATOMIC_OPS(op, c_op, asm_op) \
+- ATOMIC_OP(op, c_op, asm_op) \
+- ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op) \
++ ATOMIC_OP(op, asm_op) \
++ ATOMIC_FETCH_OP(op, asm_op)
+
+-ATOMIC_OPS(and, &=, and)
+-ATOMIC_OPS(or, |=, or)
+-ATOMIC_OPS(xor, ^=, xor)
++ATOMIC_OPS(and, and)
++ATOMIC_OPS(or, or)
++ATOMIC_OPS(xor, xor)
+
+ #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+ #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+@@ -202,7 +267,9 @@ ATOMIC_OPS(xor, ^=, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ /*
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+@@ -212,7 +279,7 @@ ATOMIC_OPS(xor, ^=, xor)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+ int result;
+
+@@ -222,7 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ int temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
+ " subu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -271,8 +338,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ return result;
+ }
+
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++ int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+@@ -300,6 +385,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_sub_and_test - subtract value from variable and test result
+@@ -321,6 +410,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+
+ /*
+ * atomic_dec_and_test - decrement by 1 and test
+@@ -345,6 +438,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+
+ /*
+ * atomic_dec - decrement and test
+@@ -353,6 +450,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic_dec(v) atomic_sub(1, (v))
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ /*
+ * atomic_add_negative - add and test if negative
+@@ -374,54 +475,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * @v: pointer of type atomic64_t
+ *
+ */
+-#define atomic64_read(v) READ_ONCE((v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++ return READ_ONCE(v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ /*
+ * atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC64_OP(op, c_op, asm_op) \
+-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
++
++#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
++static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: lld %0, %1 # atomic64_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %0, %1 # atomic64_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " scd %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " scd %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline long atomic64_##op##_return##suffix##_relaxed(long i, atomic64##suffix##_t * v)\
+ { \
+ long result; \
+ \
+@@ -429,12 +553,15 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
++ " .set mips3 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -442,33 +569,42 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " scd %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "=" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+- : "memory"); \
+- } while (unlikely(!result)); \
+- \
+- result = temp; result c_op i; \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " scd %0, %2 \n" \
++ " beqz %0, 1b \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "=" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
++ : "memory"); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " ld %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sd %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ return result; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ { \
+@@ -517,13 +653,13 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC64_OPS(op, c_op, asm_op) \
+- ATOMIC64_OP(op, c_op, asm_op) \
+- ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+- ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op) \
++ ATOMIC64_OP(op, asm_op) \
++ ATOMIC64_OP_RETURN(op, asm_op) \
++ ATOMIC64_FETCH_OP(op, asm_op)
+
+-ATOMIC64_OPS(add, +=, daddu)
+-ATOMIC64_OPS(sub, -=, dsubu)
++ATOMIC64_OPS(add, daddu)
++ATOMIC64_OPS(sub, dsubu)
+
+ #define atomic64_add_return_relaxed atomic64_add_return_relaxed
+ #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+@@ -531,13 +667,13 @@ ATOMIC64_OPS(sub, -=, dsubu)
+ #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+ #undef ATOMIC64_OPS
+-#define ATOMIC64_OPS(op, c_op, asm_op) \
+- ATOMIC64_OP(op, c_op, asm_op) \
+- ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op) \
++ ATOMIC64_OP(op, asm_op) \
++ ATOMIC64_FETCH_OP(op, asm_op)
+
+-ATOMIC64_OPS(and, &=, and)
+-ATOMIC64_OPS(or, |=, or)
+-ATOMIC64_OPS(xor, ^=, xor)
++ATOMIC64_OPS(and, and)
++ATOMIC64_OPS(or, or)
++ATOMIC64_OPS(xor, xor)
+
+ #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+ #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+@@ -546,7 +682,11 @@ ATOMIC64_OPS(xor, ^=, xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
+
+ /*
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic
+@@ -557,7 +697,7 @@ ATOMIC64_OPS(xor, ^=, xor)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+ long result;
+
+@@ -567,7 +707,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ long temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -616,9 +756,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ return result;
+ }
+
+-#define atomic64_cmpxchg(v, o, n) \
+- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+@@ -648,6 +805,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+
+ /*
+ * atomic64_sub_and_test - subtract value from variable and test result
+@@ -669,6 +827,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * other cases.
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic64_dec_and_test - decrement by 1 and test
+@@ -693,6 +852,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+
+ /*
+ * atomic64_dec - decrement and test
+@@ -701,6 +861,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
+
+ /*
+ * atomic64_add_negative - add and test if negative
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 2b3dc29..1f7bdc4 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -458,6 +458,13 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+ #define ARCH_DLINFO \
+ do { \
+diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
+index c1f6afa..38cc6e9 100644
+--- a/arch/mips/include/asm/exec.h
++++ b/arch/mips/include/asm/exec.h
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_EXEC_H */
+diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
+index 9e8ef59..1139d6b 100644
+--- a/arch/mips/include/asm/hw_irq.h
++++ b/arch/mips/include/asm/hw_irq.h
+@@ -10,7 +10,7 @@
+
+ #include <linux/atomic.h>
+
+-extern atomic_t irq_err_count;
++extern atomic_unchecked_t irq_err_count;
+
+ /*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 6bf10e7..3c0b52f 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -11,7 +11,6 @@
+
+ #include <linux/linkage.h>
+ #include <linux/smp.h>
+-#include <linux/irqdomain.h>
+
+ #include <asm/mipsmtregs.h>
+
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index 8feaed6..1bd8a64 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -13,15 +13,25 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i, l) atomic_long_add((i), (&(l)->a))
++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
+ #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ /*
+ * Same as above, but return the result value
+@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
+ return result;
+ }
+
++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
++{
++ unsigned long result;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqzl %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqz %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ result = l->a.counter;
++ result += i;
++ l->a.counter = result;
++ local_irq_restore(flags);
++ }
++
++ return result;
++}
++
+ static __inline__ long local_sub_return(long i, local_t * l)
+ {
+ unsigned long result;
+@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
+
+ #define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+ /**
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 5f98759..a3a7cb2 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index a03e869..e1928f5 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 9e9e94415..43354f5 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -20,6 +20,9 @@
+ #include <asm/io.h>
+ #include <asm/pgtable-bits.h>
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ struct mm_struct;
+ struct vm_area_struct;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index e309d8f..20eefec 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -101,6 +101,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+ #define TIF_UPROBE 6 /* breakpointed or singlestepping */
+ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
++
+ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_NOHZ 19 /* in adaptive nohz mode */
+@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_USEDMSA (1<<TIF_USEDMSA)
+ #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
++ _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK \
+@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
+ /* work to do on any return to u-space */
+ #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
+ _TIF_WORK_SYSCALL_EXIT | \
+- _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /*
+ * We stash processor id into a COP0 register to retrieve it fast
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index 89fa5c0b..9409cea 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -148,6 +148,7 @@ static inline bool eva_kernel_access(void)
+ __ok == 0; \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ likely(__access_ok((addr), (size), __access_mask))
+
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 9c7f3e1..ed42bda 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -37,6 +37,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/elfcore.h>
+ #include <linux/compat.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 1ab3432..0e66879 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -41,6 +41,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
+index 44a1f79..2bd6aa3 100644
+--- a/arch/mips/kernel/irq-gt641xx.c
++++ b/arch/mips/kernel/irq-gt641xx.c
+@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
+ }
+ }
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init gt641xx_irq_init(void)
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index f25f7ea..19e1c62 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
+ printk("unexpected IRQ # %d\n", irq);
+ }
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+ {
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ return 0;
+ }
+
+ asmlinkage void spurious_interrupt(void)
+ {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init init_IRQ(void)
+@@ -61,6 +61,8 @@ void __init init_IRQ(void)
+ }
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
++
++extern void gr_handle_kernel_exploit(void);
+ static inline void check_stack_overflow(void)
+ {
+ unsigned long sp;
+@@ -76,6 +78,7 @@ static inline void check_stack_overflow(void)
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ }
+ #else
+diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
+index f5c8bce..b06a560 100644
+--- a/arch/mips/kernel/kprobes.c
++++ b/arch/mips/kernel/kprobes.c
+@@ -535,6 +535,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -543,6 +544,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
+@@ -611,6 +613,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -618,6 +621,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index 7cf653e..7df52f6 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+- ACCESS_ONCE(*nc_core_ready_count) = 0;
++ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 9514e5f..a3fc550 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -545,18 +545,6 @@ unsigned long get_wchan(struct task_struct *task)
+ return pc;
+ }
+
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+-
+ static void arch_dump_stack(void *info)
+ {
+ struct pt_regs *regs;
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index a92994d..e389b11 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -882,6 +882,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+@@ -899,6 +903,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ if (secure_computing(NULL) == -1)
+ return -1;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
+index 4472a7f..c5905e6 100644
+--- a/arch/mips/kernel/sync-r4k.c
++++ b/arch/mips/kernel/sync-r4k.c
+@@ -18,8 +18,8 @@
+ #include <asm/mipsregs.h>
+
+ static unsigned int initcount = 0;
+-static atomic_t count_count_start = ATOMIC_INIT(0);
+-static atomic_t count_count_stop = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
+
+ #define COUNTON 100
+ #define NR_LOOPS 3
+@@ -46,13 +46,13 @@ void synchronise_count_master(int cpu)
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+- while (atomic_read(&count_count_start) != 1)
++ while (atomic_read_unchecked(&count_count_start) != 1)
+ mb();
+- atomic_set(&count_count_stop, 0);
++ atomic_set_unchecked(&count_count_stop, 0);
+ smp_wmb();
+
+ /* Let the slave writes its count register */
+- atomic_inc(&count_count_start);
++ atomic_inc_unchecked(&count_count_start);
+
+ /* Count will be initialised to current timer */
+ if (i == 1)
+@@ -67,11 +67,11 @@ void synchronise_count_master(int cpu)
+ /*
+ * Wait for slave to leave the synchronization point:
+ */
+- while (atomic_read(&count_count_stop) != 1)
++ while (atomic_read_unchecked(&count_count_stop) != 1)
+ mb();
+- atomic_set(&count_count_start, 0);
++ atomic_set_unchecked(&count_count_start, 0);
+ smp_wmb();
+- atomic_inc(&count_count_stop);
++ atomic_inc_unchecked(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+@@ -96,8 +96,8 @@ void synchronise_count_slave(int cpu)
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+- atomic_inc(&count_count_start);
+- while (atomic_read(&count_count_start) != 2)
++ atomic_inc_unchecked(&count_count_start);
++ while (atomic_read_unchecked(&count_count_start) != 2)
+ mb();
+
+ /*
+@@ -106,8 +106,8 @@ void synchronise_count_slave(int cpu)
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+- atomic_inc(&count_count_stop);
+- while (atomic_read(&count_count_stop) != 2)
++ atomic_inc_unchecked(&count_count_stop);
++ while (atomic_read_unchecked(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 3905003..7c0cc88 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -702,7 +702,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+ };
+
+ prev_state = exception_enter();
+- die_if_kernel("Integer overflow", regs);
++ if (unlikely(!user_mode(regs))) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fixup_exception(regs)) {
++ pax_report_refcount_error(regs, NULL);
++ exception_exit(prev_state);
++ return;
++ }
++#endif
++
++ die("Integer overflow", regs);
++ }
+
+ force_sig_info(SIGFPE, &info, current);
+ exception_exit(prev_state);
+diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
+index c3e2205..b4302f8 100644
+--- a/arch/mips/lib/ashldi3.c
++++ b/arch/mips/lib/ashldi3.c
+@@ -2,7 +2,11 @@
+
+ #include "libgcc.h"
+
+-long long notrace __ashldi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashlti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashldi3(DWtype u, word_type b)
++#endif
+ {
+ DWunion uu, w;
+ word_type bm;
+@@ -11,19 +15,22 @@ long long notrace __ashldi3(long long u, word_type b)
+ return u;
+
+ uu.ll = u;
+- bm = 32 - b;
++ bm = BITS_PER_LONG - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+- w.s.high = (unsigned int) uu.s.low << -bm;
++ w.s.high = (unsigned long) uu.s.low << -bm;
+ } else {
+- const unsigned int carries = (unsigned int) uu.s.low >> bm;
++ const unsigned long carries = (unsigned long) uu.s.low >> bm;
+
+- w.s.low = (unsigned int) uu.s.low << b;
+- w.s.high = ((unsigned int) uu.s.high << b) | carries;
++ w.s.low = (unsigned long) uu.s.low << b;
++ w.s.high = ((unsigned long) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashlti3);
++#else
+ EXPORT_SYMBOL(__ashldi3);
++#endif
+diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
+index 1745602..d20aabf 100644
+--- a/arch/mips/lib/ashrdi3.c
++++ b/arch/mips/lib/ashrdi3.c
+@@ -2,7 +2,11 @@
+
+ #include "libgcc.h"
+
+-long long notrace __ashrdi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashrti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashrdi3(DWtype u, word_type b)
++#endif
+ {
+ DWunion uu, w;
+ word_type bm;
+@@ -11,21 +15,24 @@ long long notrace __ashrdi3(long long u, word_type b)
+ return u;
+
+ uu.ll = u;
+- bm = 32 - b;
++ bm = BITS_PER_LONG - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+- uu.s.high >> 31;
++ uu.s.high >> (BITS_PER_LONG - 1);
+ w.s.low = uu.s.high >> -bm;
+ } else {
+- const unsigned int carries = (unsigned int) uu.s.high << bm;
++ const unsigned long carries = (unsigned long) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+- w.s.low = ((unsigned int) uu.s.low >> b) | carries;
++ w.s.low = ((unsigned long) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashrti3);
++#else
+ EXPORT_SYMBOL(__ashrdi3);
++#endif
+diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
+index 05909d58..b03284b 100644
+--- a/arch/mips/lib/libgcc.h
++++ b/arch/mips/lib/libgcc.h
+@@ -5,13 +5,19 @@
+
+ typedef int word_type __attribute__ ((mode (__word__)));
+
++#ifdef CONFIG_64BIT
++typedef int DWtype __attribute__((mode(TI)));
++#else
++typedef long long DWtype;
++#endif
++
+ #ifdef __BIG_ENDIAN
+ struct DWstruct {
+- int high, low;
++ long high, low;
+ };
+ #elif defined(__LITTLE_ENDIAN)
+ struct DWstruct {
+- int low, high;
++ long low, high;
+ };
+ #else
+ #error I feel sick.
+@@ -19,7 +25,7 @@ struct DWstruct {
+
+ typedef union {
+ struct DWstruct s;
+- long long ll;
++ DWtype ll;
+ } DWunion;
+
+ #endif /* __ASM_LIBGCC_H */
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 3bef306..fcec133 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -30,6 +30,23 @@
+
+ int show_unhandled_signals = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -204,6 +221,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
++ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+ if (show_unhandled_signals &&
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index e86ebcf..7a78a07 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -474,10 +474,10 @@ void __init mem_init(void)
+
+ #ifdef CONFIG_64BIT
+ if ((unsigned long) &_text > (unsigned long) CKSEG0)
+- /* The -4 is a hack so that user tools don't have to handle
++ /* The -0x2000-4 is a hack so that user tools don't have to handle
+ the overflow. */
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+- 0x80000000 - 4, KCORE_TEXT);
++ 0x80000000 - 0x2000 - 4, KCORE_TEXT);
+ #endif
+ }
+ #endif /* !CONFIG_NEED_MULTIPLE_NODES */
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index d08ea3f..66bb13d 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+@@ -160,14 +166,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
+index cfceaea..65deeb4 100644
+--- a/arch/mips/sgi-ip27/ip27-nmi.c
++++ b/arch/mips/sgi-ip27/ip27-nmi.c
+@@ -187,9 +187,9 @@ void
+ cont_nmi_dump(void)
+ {
+ #ifndef REAL_NMI_SIGNAL
+- static atomic_t nmied_cpus = ATOMIC_INIT(0);
++ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
+
+- atomic_inc(&nmied_cpus);
++ atomic_inc_unchecked(&nmied_cpus);
+ #endif
+ /*
+ * Only allow 1 cpu to proceed
+@@ -233,7 +233,7 @@ cont_nmi_dump(void)
+ udelay(10000);
+ }
+ #else
+- while (atomic_read(&nmied_cpus) != num_online_cpus());
++ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
+ #endif
+
+ /*
+diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
+index 160b880..3b53fdc 100644
+--- a/arch/mips/sni/rm200.c
++++ b/arch/mips/sni/rm200.c
+@@ -270,7 +270,7 @@ void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
+ "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 41e873b..34d33a7 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -1;
+ }
+diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
+index ae0e4ee..e8f0692 100644
+--- a/arch/mips/vr41xx/common/irq.c
++++ b/arch/mips/vr41xx/common/irq.c
+@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
+ irq_cascade_t *cascade;
+
+ if (irq >= NR_IRQS) {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ return;
+ }
+
+@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
+ ret = cascade->get_irq(irq);
+ irq = ret;
+ if (ret < 0)
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ else
+ irq_dispatch(irq);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * L1 cache
+ */
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+-#define L1_CACHE_BYTES 32 /* bytes per entry */
+ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 5f55da9..7ce9437 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,13 +19,15 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
+
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+ #define __ro_after_init __read_mostly
+
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 5394b9c..e77a306 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -327,6 +327,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ return dec;
+ }
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+
+
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index df0f52b..810699b 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
+@@ -14,6 +15,8 @@
+ #define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 78c9fd3..42fa66a 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index f08dda3..ea6aa1b 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
+@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 3a4ed9f..29b7218 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -236,6 +236,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 9a2aee1..a8e588f 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -223,10 +223,10 @@ static inline void copy_user_overflow(int size, unsigned long count)
+ static __always_inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+ unsigned long ret = n;
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(to, n, false);
+ ret = __copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+@@ -234,7 +234,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ else
+ __bad_copy_user();
+
+- if (unlikely(ret))
++ if (unlikely(ret && (long)ret > 0))
+ memset(to + (n - ret), 0, ret);
+
+ return ret;
+@@ -243,9 +243,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ static __always_inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(from);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = __copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index a0ecdb4a..71d2069 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -100,14 +100,12 @@
+ * or init pieces the location is */
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->init_layout.base &&
+- loc <= (me->init_layout.base + me->init_layout.size));
++ within_module_init((unsigned long)loc, me);
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->core_layout.base &&
+- loc <= (me->core_layout.base + me->core_layout.size));
++ within_module_core((unsigned long)loc, me);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -367,13 +365,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_layout.size = ALIGN(me->core_layout.size, 16);
+- me->arch.got_offset = me->core_layout.size;
+- me->core_layout.size += gots * sizeof(struct got_entry);
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++ me->arch.got_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += gots * sizeof(struct got_entry);
+
+- me->core_layout.size = ALIGN(me->core_layout.size, 16);
+- me->arch.fdesc_offset = me->core_layout.size;
+- me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++ me->arch.fdesc_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -391,7 +389,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->core_layout.base + me->arch.got_offset;
++ got = me->core_layout.base_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -409,7 +407,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->core_layout.base_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -427,7 +425,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -847,7 +845,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++ gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 0a393a0..5b3199e0 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > task_size)
+ return -ENOMEM;
+@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ goto found_addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = mmap_upper_limit();
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ found_addr:
+@@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long addr = addr0;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ #ifdef CONFIG_64BIT
+ /* This should only ever run for 32-bit processes. */
+@@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ }
+
+ /* requesting a specific address */
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto found_addr;
+@@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_upper_limit();
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP) {
++ mm->mmap_legacy_base += mm->delta_mmap;
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ }
++#endif
++
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 378df92..9b2ab51 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -719,9 +719,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 1a0b4f6..f9d326d 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -16,6 +16,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/extable.h>
+ #include <linux/uaccess.h>
++#include <linux/unistd.h>
+
+ #include <asm/traps.h>
+
+@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -281,8 +392,33 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
+
+ good_area:
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 65fba4c..3cfec12 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -140,6 +140,7 @@ config PPC
+ select ARCH_USE_BUILTIN_BSWAP
+ select OLD_SIGSUSPEND
+ select OLD_SIGACTION if PPC32
++ select HAVE_GCC_PLUGINS
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+@@ -441,6 +442,7 @@ config KEXEC
+ bool "kexec system call"
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 2b90335..5e1a3d6 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -12,6 +12,11 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++ PPC_LONG" " #from ", " #to"\n" \
++" .previous\n"
++
+ /*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+@@ -39,38 +44,79 @@ static __inline__ int atomic_read(const atomic_t *v)
+ return t;
+ }
+
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC_OP(op, asm_op) \
+-static __inline__ void atomic_##op(int a, atomic_t *v) \
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##o.
++#define __OVERFLOW_PRE \
++ " mcrxr cr0\n"
++#define __OVERFLOW_POST \
++ " bf 4*cr0+so, 3f\n" \
++ "2: .long 0x00c00b00\n" \
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "\n4:\n" \
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_PRE
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
++"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ PPC405_ERR77(0,%3) \
+ " stwcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
+-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
++ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline int atomic_##op##_return##suffix##_relaxed(int a, atomic##suffix##_t *v)\
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+-"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
++"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "_relaxed\n"\
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ PPC405_ERR77(0, %3) \
+ " stwcx. %0,0,%3\n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+@@ -78,6 +124,9 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+ return t; \
+ }
+
++#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
+ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+ { \
+@@ -105,6 +154,7 @@ ATOMIC_OPS(add, add)
+ ATOMIC_OPS(sub, subf)
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
+ #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+@@ -126,41 +176,22 @@ ATOMIC_OPS(xor, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP_RELAXED
+ #undef ATOMIC_OP_RETURN_RELAXED
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic_inc(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%2)
+-" stwcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
+-" addic %0,%0,1\n"
+- PPC405_ERR77(0, %2)
+-" stwcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
+-}
++/*
++ * atomic_inc - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
++#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_unchecked_relaxed(1, (v))
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -171,37 +202,20 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+-
+-static __inline__ void atomic_dec(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%2)\
+-" stwcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
++#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
++
++/*
++ * atomic_dec - decrement atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
+-" addic %0,%0,-1\n"
+- PPC405_ERR77(0, %2)
+-" stwcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ atomic_sub_unchecked(1, v);
+ }
+
+ #define atomic_inc_return_relaxed atomic_inc_return_relaxed
+@@ -216,6 +230,16 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+@@ -233,14 +257,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%1 # __atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+- beq 2f \n\
+- add %0,%2,%0 \n"
++ beq 5f \n"
++
++ __OVERFLOW_PRE
++ __REFCOUNT_OP(add) " %0,%2,%0 \n"
++ __OVERFLOW_POST
++
+ PPC405_ERR77(0,%2)
+ " stwcx. %0,0,%1 \n\
+ bne- 1b \n"
++
++ __OVERFLOW_EXTABLE
++
+ PPC_ATOMIC_EXIT_BARRIER
+ " subf %0,%2,%0 \n\
+-2:"
++5:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+@@ -323,37 +354,59 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+ return t;
+ }
+
++static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic64_set(atomic64_t *v, long i)
+ {
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC64_OP(op, asm_op) \
+-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
++static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ " stdcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ }
+
+-#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+-static inline long \
+-atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
++ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline long atomic64_##op##_return##suffix##_relaxed(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ " stdcx. %0,0,%3\n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+@@ -361,6 +414,9 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
+ return t; \
+ }
+
++#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
+ static inline long \
+ atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
+@@ -409,38 +465,33 @@ ATOMIC64_OPS(xor, xor)
+ #undef ATOPIC64_OPS
+ #undef ATOMIC64_FETCH_OP_RELAXED
+ #undef ATOMIC64_OP_RETURN_RELAXED
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
++#undef __OVERFLOW_PRE
++#undef __REFCOUNT_OP
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic64_inc(atomic64_t *v)
+-{
+- long t;
++/*
++ * atomic64_inc - increment atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
+ }
+
+-static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
++static inline long atomic64_inc_return_unchecked_relaxed(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
+-" addic %0,%0,1\n"
+-" stdcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ return atomic64_add_return_unchecked_relaxed(1, v);
+ }
+
+ /*
+@@ -453,34 +504,18 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+-static __inline__ void atomic64_dec(atomic64_t *v)
++/*
++ * atomic64_dec - decrement atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
+-{
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
+-" addic %0,%0,-1\n"
+-" stdcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ atomic64_sub_unchecked(1, v);
+ }
+
+ #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+@@ -522,6 +557,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+@@ -537,15 +582,22 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
++"1: ldarx %0,0,%1 # atomic64_add_unless\n\
+ cmpd 0,%0,%3 \n\
+- beq 2f \n\
+- add %0,%2,%0 \n"
++ beq 5f \n"
++
++ __OVERFLOW_PRE
++ __REFCOUNT_OP(add) " %0,%2,%0 \n"
++ __OVERFLOW_POST
++
+ " stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ PPC_ATOMIC_EXIT_BARRIER
++
++ __OVERFLOW_EXTABLE
++
+ " subf %0,%2,%0 \n\
+-2:"
++5:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
+index 880db13..bb4ed4a 100644
+--- a/arch/powerpc/include/asm/book3s/32/hash.h
++++ b/arch/powerpc/include/asm/book3s/32/hash.h
+@@ -20,6 +20,7 @@
+ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_NX _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 6b8b2d5..cf17a29 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -227,7 +227,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
+ {
+ unsigned long set = pte_val(entry) &
+- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
++ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | _PAGE_NX);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+ pte_update(ptep, clr, set);
+diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+index cd5e7aa..7709061 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+@@ -91,6 +91,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ pgd_populate(mm, pgd, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
+@@ -106,6 +111,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate_kernel(mm, pud, pmd);
++}
++
+ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+ {
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index ffbafbf..71d037fb 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,8 @@
+
+ #ifdef __KERNEL__
+
++#include <asm/reg.h>
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +24,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index ee46ffe..b36c98c 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -30,6 +30,18 @@
+
+ #define ELF_ET_DYN_BASE 0x20000000
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
++
+ #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
+ /*
+diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
+index 8196e9c..d83a9f3 100644
+--- a/arch/powerpc/include/asm/exec.h
++++ b/arch/powerpc/include/asm/exec.h
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 5acabbd..7ea14fa 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -10,7 +10,7 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_KMAP_TYPES_H */
+diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
+index b8da913..c02b593 100644
+--- a/arch/powerpc/include/asm/local.h
++++ b/arch/powerpc/include/asm/local.h
+@@ -9,21 +9,65 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct
++{
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i,l) atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ static __inline__ long local_add_return(long a, local_t *l)
+ {
+ long t;
+
+ __asm__ __volatile__(
++"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++ PPC_STLCX "%0,0,%2 \n\
++ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&(l->a.counter))
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
++{
++ long t;
++
++ __asm__ __volatile__(
+ "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
+
+ #define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+ /**
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index 30922f6..0bb237c 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -26,7 +26,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ }
+ #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+index 897d2e1..399f34f 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
++++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+ {
+@@ -139,6 +145,7 @@ extern void __tlb_remove_table(void *_table);
+ #endif
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 56398e7..287a772 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef CONFIG_PPC_BOOK3S_64
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index dd5f071..0470718 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -169,15 +169,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 9bd87f2..f600e6d 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_POWERPC_PGTABLE_H
+ #define _ASM_POWERPC_PGTABLE_H
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/mmzone.h>
+diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
+index 4ba26dd..2d1137d 100644
+--- a/arch/powerpc/include/asm/pte-common.h
++++ b/arch/powerpc/include/asm/pte-common.h
+@@ -16,6 +16,9 @@
+ #ifndef _PAGE_EXEC
+ #define _PAGE_EXEC 0
+ #endif
++#ifndef _PAGE_NX
++#define _PAGE_NX 0
++#endif
+ #ifndef _PAGE_ENDIAN
+ #define _PAGE_ENDIAN 0
+ #endif
+@@ -53,13 +56,13 @@
+ #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
+ #endif
+ #ifndef _PAGE_KERNEL_RO
+-#define _PAGE_KERNEL_RO (_PAGE_RO)
++#define _PAGE_KERNEL_RO (_PAGE_RO | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_ROX
+ #define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO)
+ #endif
+ #ifndef _PAGE_KERNEL_RW
+-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
++#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_RWX
+ #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+@@ -142,15 +145,12 @@ static inline bool pte_user(pte_t pte)
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+- _PAGE_EXEC)
+-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+- _PAGE_EXEC)
+-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+- _PAGE_EXEC)
++#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_NX)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
++#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
+
+ #define __P000 PAGE_NONE
+ #define __P001 PAGE_READONLY
+@@ -171,11 +171,9 @@ static inline bool pte_user(pte_t pte)
+ #define __S111 PAGE_SHARED_X
+
+ /* Permission masks used for kernel mappings */
+-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+-#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+- _PAGE_NO_CACHE)
+-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+- _PAGE_NO_CACHE | _PAGE_GUARDED)
++#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_NX)
++#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
++#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
+ #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+ #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 9e1499f..4e03a24 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -270,6 +270,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 0d02c11..33a8f08 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -51,7 +51,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index fa37fe9..867d3cf 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -27,6 +27,7 @@
+ #include <asm/asm-compat.h>
+ #include <asm/synch.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/atomic.h>
+
+ #ifdef CONFIG_PPC64
+ /* use 0x800000yy when locked, where yy == CPU number */
+@@ -228,13 +229,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "1: " PPC_LWARX(%0,0,%1,1) "\n"
+ __DO_SIGN_EXTEND
+-" addic. %0,%0,1\n\
+- ble- 2f\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,1\n"
++#endif
++
++"3:\n"
++ "ble- 4f\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+-"2:" : "=&r" (tmp)
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b,4b)
++#endif
++
++ : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
+
+@@ -310,11 +327,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+-"1: lwarx %0,0,%1\n\
+- addic %0,%0,-1\n"
++"1: lwarx %0,0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,-1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,-1\n"
++#endif
++
++"3:\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
+diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
+index da3cdff..c774844 100644
+--- a/arch/powerpc/include/asm/string.h
++++ b/arch/powerpc/include/asm/string.h
+@@ -11,17 +11,17 @@
+ #define __HAVE_ARCH_MEMCMP
+ #define __HAVE_ARCH_MEMCHR
+
+-extern char * strcpy(char *,const char *);
+-extern char * strncpy(char *,const char *, __kernel_size_t);
+-extern __kernel_size_t strlen(const char *);
+-extern int strcmp(const char *,const char *);
+-extern int strncmp(const char *, const char *, __kernel_size_t);
+-extern char * strcat(char *, const char *);
++extern char * strcpy(char *,const char *) __nocapture(2);
++extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
++extern int strcmp(const char *,const char *) __nocapture();
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
++extern char * strcat(char *, const char *) __nocapture(2);
+ extern void * memset(void *,int,__kernel_size_t);
+-extern void * memcpy(void *,const void *,__kernel_size_t);
+-extern void * memmove(void *,const void *,__kernel_size_t);
+-extern int memcmp(const void *,const void *,__kernel_size_t);
+-extern void * memchr(const void *,int,__kernel_size_t);
++extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2);
++extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2);
++extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2);
++extern void * memchr(const void *,int,__kernel_size_t) __nocapture(1);
+
+ #endif /* __KERNEL__ */
+
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 87e4b2d..c362390 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+- _TIF_NOHZ)
++ _TIF_NOHZ | _TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index c266227..f3dc6bb 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -58,6 +58,7 @@
+
+ #endif
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+@@ -303,43 +304,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n))) {
+- check_object_size(to, n, false);
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- }
+- memset(to, 0, n);
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_WRITE, to, n)) {
+- check_object_size(from, n, true);
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -412,6 +376,70 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ if ((long)n < 0)
++ return n;
++
++ if (likely(access_ok(VERIFY_READ, from, n))) {
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((void __force_user *)to, from, n);
++ }
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ if ((long)n < 0)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (void __force_user *)from, n);
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_READ, from, n))) {
++ check_object_size(to, n, false);
++ n = __copy_from_user(to, from, n);
++ } else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 1925341..a1841ac 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -15,7 +15,7 @@ CFLAGS_btext.o += -fPIC
+ endif
+
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+-CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
+@@ -31,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ endif
+
++CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++
+ obj-y := cputable.o ptrace.o syscalls.o \
+ irq.o align.o signal_32.o pmc.o vdso.o \
+ process.o systbl.o idle.o \
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 38a1f96..ed94e42 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -1010,6 +1010,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -1018,8 +1019,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b ret_from_except_lite
+-1: bl save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 1ba82ea..f78bd700 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1445,10 +1445,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ bl do_page_fault
+ cmpdi r3,0
+ beq+ 12f
+- bl save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 3c05c31..a8e6888 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -482,6 +482,8 @@ void migrate_irqs(void)
+ }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ static inline void check_stack_overflow(void)
+ {
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+@@ -494,6 +496,7 @@ static inline void check_stack_overflow(void)
+ pr_err("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index e785cc9..514488c 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -131,6 +131,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ kcb->kprobe_saved_msr = regs->msr;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -139,6 +140,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->link = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+@@ -547,6 +549,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+@@ -554,3 +557,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 5a7a78f..c0e4207 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
++ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
+
+ pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->core_layout.base
+- && location < mod->core_layout.base + mod->core_layout.size)
++ if ((location >= mod->core_layout.base_rx && location < mod->core_layout.base_rx + mod->core_layout.size_rx) ||
++ (location >= mod->core_layout.base_rw && location < mod->core_layout.base_rw + mod->core_layout.size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->init_layout.base_rx && location < mod->init_layout.base_rx + mod->init_layout.size_rx) ||
++ (location >= mod->init_layout.base_rw && location < mod->init_layout.base_rw + mod->init_layout.size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+@@ -301,7 +306,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
+ {
+- module->arch.tramp = do_plt_call(module->core_layout.base,
++ module->arch.tramp = do_plt_call(module->core_layout.base_rx,
+ (unsigned long)ftrace_caller,
+ sechdrs, module);
+ if (!module->arch.tramp)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 49a680d..2514bbcb 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1375,8 +1375,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1897,10 +1897,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth) && curr_frame >= 0) {
+- pr_cont(" (%pS)",
++ pr_cont(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1920,7 +1920,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
++ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1957,13 +1957,6 @@ void notrace __ppc64_runlatch_off(void)
+ }
+ #endif /* CONFIG_PPC64 */
+
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+ unsigned long rnd = 0;
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 5c8f12f..98047fb 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -3151,7 +3151,7 @@ static int do_seccomp(struct pt_regs *regs)
+ * have already loaded -ENOSYS into r3, or seccomp has put
+ * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+ */
+- if (__secure_computing(NULL))
++ if (secure_computing(NULL))
+ return -1;
+
+ /*
+@@ -3169,6 +3169,10 @@ static int do_seccomp(struct pt_regs *regs)
+ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+ #endif /* CONFIG_SECCOMP */
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+@@ -3192,6 +3196,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /*
+ * The tracer may decide to abort the syscall, if so tracehook
+ * will return !0. Note that the tracer may also just change
+@@ -3210,6 +3219,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ if (regs->gpr[0] >= NR_syscalls)
+ goto skip;
+
++
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gpr[0]);
+
+@@ -3241,6 +3251,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+ int step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 27aa913..dc0d9f5 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1006,7 +1006,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
+ sigret = 0;
+ tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
+ } else {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 96698fd..fe57485 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -791,7 +791,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ tsk->thread.fp_state.fpscr = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
+ regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 023a462..9d940854 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -37,6 +37,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/uaccess.h>
+
+ #include <asm/emulated_ops.h>
+ #include <asm/pgtable.h>
+@@ -146,6 +147,8 @@ static unsigned long oops_begin(struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(oops_begin);
+
++extern void gr_handle_kernel_exploit(void);
++
+ static void oops_end(unsigned long flags, struct pt_regs *regs,
+ int signr)
+ {
+@@ -195,6 +198,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ do_exit(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+@@ -1162,6 +1168,26 @@ void program_check_exception(struct pt_regs *regs)
+ enum ctx_state prev_state = exception_enter();
+ unsigned int reason = get_reason(regs);
+
++#ifdef CONFIG_PAX_REFCOUNT
++ unsigned int bkpt;
++ const struct exception_table_entry *entry;
++
++ if (reason & REASON_ILLEGAL) {
++ /* Check if PaX bad instruction */
++ if (!probe_kernel_address((const void *)regs->nip, bkpt) && bkpt == 0xc00b00) {
++ current->thread.trap_nr = 0;
++ pax_report_refcount_error(regs, NULL);
++ /* fixup_exception() for PowerPC does not exist, simulate its job */
++ if ((entry = search_exception_tables(regs->nip)) != NULL) {
++ regs->nip = entry->fixup;
++ return;
++ }
++ /* fixup_exception() could not handle */
++ goto bail;
++ }
++ }
++#endif
++
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case the reason flags will be 0 */
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 4111d30..fa5e7be 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -35,6 +35,7 @@
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
+ #include <asm/setup.h>
++#include <asm/mman.h>
+
+ #undef DEBUG
+
+@@ -180,7 +181,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -201,7 +202,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index d0b137d..af92bde 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -34,6 +34,10 @@
+ #include <linux/context_tracking.h>
+ #include <linux/hugetlb.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -227,7 +258,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -384,12 +415,16 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+
+ if (is_exec) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (error_code & DSISR_GUARDED)
++ goto bad_area;
++#endif
+ /*
+ * Allow execution from readable areas if the MMU does not
+ * provide separate controls over reading and executing.
+@@ -484,6 +519,23 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ goto bail;
+ }
+diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
+index 2f1e443..de888bf 100644
+--- a/arch/powerpc/mm/mmap.c
++++ b/arch/powerpc/mm/mmap.c
+@@ -194,6 +194,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -205,9 +209,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 2b27458..7c7c59b 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len, 0);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -276,6 +276,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+ info.align_offset = 0;
+
+ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr += mm->delta_mmap;
++#endif
++
+ while (addr < TASK_SIZE) {
+ info.low_limit = addr;
+ if (!slice_scan_available(addr, available, 1, &addr))
+@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+@@ -555,10 +566,10 @@ unsigned long arch_get_unmapped_area(struct file *filp,
+ }
+
+ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+- const unsigned long addr0,
+- const unsigned long len,
+- const unsigned long pgoff,
+- const unsigned long flags)
++ unsigned long addr0,
++ unsigned long len,
++ unsigned long pgoff,
++ unsigned long flags)
+ {
+ return slice_get_unmapped_area(addr0, len, flags,
+ current->mm->context.user_psize, 1);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index 0625446..139a0aa 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -263,9 +263,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ return VM_FAULT_NOPAGE;
+ }
+
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+ unsigned long address,
+- void *buf, int len, int write)
++ void *buf, size_t len, int write)
+ {
+ struct spu_context *ctx = vma->vm_file->private_data;
+ unsigned long offset = address - vma->vm_start;
+diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
+index 26c5d5be..a308c28 100644
+--- a/arch/s390/Kconfig.debug
++++ b/arch/s390/Kconfig.debug
+@@ -9,6 +9,7 @@ config S390_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index d28cc2f..a937312 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -342,4 +342,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __ARCH_S390_ATOMIC__ */
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 05219a5..032f5f0 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -9,8 +9,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD 32
+
+ #define __read_mostly __section(.data..read_mostly)
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 1736c7d..261351c 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -167,6 +167,13 @@ extern unsigned int vdso_enabled;
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
+index c4a93d6..4d2a9b4 100644
+--- a/arch/s390/include/asm/exec.h
++++ b/arch/s390/include/asm/exec.h
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 52d7c87..577d292 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
+ __range_ok((unsigned long)(addr), (size)); \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok(addr, size)
+
+ /*
+@@ -337,6 +338,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ return __copy_to_user(to, from, n);
+ }
+
+@@ -360,10 +365,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- unsigned int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+ might_fault();
+- if (unlikely(sz != -1 && sz < n)) {
++
++ if ((long)n < 0)
++ return n;
++
++ if (unlikely(sz != (size_t)-1 && sz < n)) {
+ if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index fdb4042..b72ae72 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -269,6 +269,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
+ }
+ NOKPROBE_SYMBOL(pop_kprobe);
+
++#ifdef CONFIG_KRETPROBES
+ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ {
+ ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+@@ -277,6 +278,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
++#endif
+
+ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
+ {
+@@ -740,8 +742,10 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_trampoline_kprobe);
++#endif
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index fbc0789..e7962a1 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -163,11 +163,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_layout.size = ALIGN(me->core_layout.size, 4);
+- me->arch.got_offset = me->core_layout.size;
+- me->core_layout.size += me->arch.got_size;
+- me->arch.plt_offset = me->core_layout.size;
+- me->core_layout.size += me->arch.plt_size;
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 4);
++ me->arch.got_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_layout.size_rx;
++ me->core_layout.size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -283,7 +283,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->core_layout.base + me->arch.got_offset +
++ gotent = me->core_layout.base_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -306,7 +306,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT) {
+- val += (Elf_Addr) me->core_layout.base - loc;
++ val += (Elf_Addr) me->core_layout.base_rw - loc;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
+ }
+ break;
+@@ -319,7 +319,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->core_layout.base + me->arch.plt_offset +
++ ip = me->core_layout.base_rx + me->arch.plt_offset +
+ info->plt_offset;
+ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+ ip[1] = 0x100a0004;
+@@ -338,7 +338,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->core_layout.base +
++ val = (Elf_Addr) me->core_layout.base_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -360,7 +360,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
++ ((Elf_Addr) me->core_layout.base_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
+ else if (r_type == R_390_GOTOFF32)
+@@ -370,7 +370,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
++ val = (Elf_Addr) me->core_layout.base_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ rc = apply_rela_bits(loc, val, 1, 32, 0);
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index bba4fa7..9c32b3c 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -217,13 +217,6 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+ return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index eb9df28..7b686ba 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -201,9 +201,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+
+ static unsigned long
+-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++s390_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct mm_struct *mm = current->mm;
+ unsigned long area;
+@@ -230,6 +230,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -239,9 +243,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mmap_base_legacy(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
+index f9f3cd5..58ff438 100644
+--- a/arch/score/include/asm/exec.h
++++ b/arch/score/include/asm/exec.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #endif /* _ASM_SCORE_EXEC_H */
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index aae9480..93e40a4 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -114,8 +114,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index 83acbf3..fa67491 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -72,6 +72,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (*p->addr == BREAKPOINT_INSTRUCTION)
+@@ -79,6 +80,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ /**
+ * If an illegal slot instruction exception occurs for an address
+@@ -203,6 +205,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+ }
+
+ /* Called with kretprobe_lock held */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -211,6 +214,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->pr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index 6777177..d44b592 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+@@ -77,14 +81,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_colour_align)
+@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 165ecdd..2bac5bf 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -38,6 +38,7 @@ config SPARC
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select MODULES_USE_ELF_RELA
++ select HAVE_GCC_PLUGINS
+ select ODD_RT_SIGACTION
+ select OLD_SIGSUSPEND
+ select ARCH_HAS_SG_CHAIN
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 24827a3..5dd45ac4 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -15,18 +15,38 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+ #define atomic64_read(v) READ_ONCE((v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+ #define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC_OP(op) \
+-void atomic_##op(int, atomic_t *); \
+-void atomic64_##op(long, atomic64_t *);
++#define __ATOMIC_OP(op, suffix) \
++void atomic_##op##suffix(int, atomic##suffix##_t *); \
++void atomic64_##op##suffix(long, atomic64##suffix##_t *);
+
+-#define ATOMIC_OP_RETURN(op) \
+-int atomic_##op##_return(int, atomic_t *); \
+-long atomic64_##op##_return(long, atomic64_t *);
++#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
++
++#define __ATOMIC_OP_RETURN(op, suffix) \
++int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
++long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
+
+ #define ATOMIC_FETCH_OP(op) \
+ int atomic_fetch_##op(int, atomic_t *); \
+@@ -47,13 +67,23 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_dec_return(v) atomic_sub_return(1, v)
+ #define atomic64_dec_return(v) atomic64_sub_return(1, v)
+
+ #define atomic_inc_return(v) atomic_add_return(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_return(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -64,6 +94,10 @@ ATOMIC_OPS(xor)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -73,25 +107,60 @@ ATOMIC_OPS(xor)
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -101,21 +170,42 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 5bb6991..5c2132e 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index a24e41f..47677ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 3f2d403..4385ed9 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -190,6 +190,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index 0346c7e..c5c25b9 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+ }
+
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 3529f13..d98a28c 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+ }
+
+ #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+ }
+
+ #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
+index 59ba6f6..4518128 100644
+--- a/arch/sparc/include/asm/pgtable.h
++++ b/arch/sparc/include/asm/pgtable.h
+@@ -5,4 +5,8 @@
+ #else
+ #include <asm/pgtable_32.h>
+ #endif
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #endif
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index ce6f569..593b043 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
+ #define PAGE_SHARED SRMMU_PAGE_SHARED
+ #define PAGE_COPY SRMMU_PAGE_COPY
+ #define PAGE_READONLY SRMMU_PAGE_RDONLY
++#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
++#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
++#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
+ #define PAGE_KERNEL SRMMU_PAGE_KERNEL
+
+ /* Top-level page directory - dummy used by init-mm.
+@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
+
+ /* xwr */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index ae51a11..eadfd03 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -111,6 +111,11 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 29d64b1..4272fe8 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+ void handle_ld_nf(u32 insn, struct pt_regs *regs);
+
+ /* init_64.c */
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+
+ extern int sysctl_tsb_ratio;
+ #endif
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 07c9f2e..352fff0 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -103,7 +103,12 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -116,7 +121,7 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+ static inline int arch_read_trylock(arch_rwlock_t *lock)
+@@ -127,7 +132,12 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -146,7 +156,12 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index 229475f..2fca9163 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -48,6 +48,7 @@ struct thread_info {
+ struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
+ unsigned long rwbuf_stkptrs[NSWINS];
+ unsigned long w_saved;
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index 3d7b925..493ce82 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -59,6 +59,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
+ __attribute__ ((aligned(64)));
+ };
+@@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ /* flag bit 4 is available */
+ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
+-/* flag bit 6 is available */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+ #define TIF_32BIT 7 /* 32-bit binary */
+ #define TIF_NOHZ 8 /* in adaptive nohz mode */
+ #define TIF_SECCOMP 9 /* secure computing */
+ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+ * in using in assembly, else we can't use the mask as
+ * an immediate value in instructions such as andcc.
+@@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+ _TIF_DO_NOTIFY_RESUME_MASK | \
+ _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+
++#define _TIF_WORK_SYSCALL \
++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
++
+ #define is_32bit_task() (test_thread_flag(TIF_32BIT))
+
+ /*
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index bd56c28..4b63d83 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index ea55f86..dbf15cf 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -47,6 +47,7 @@
+ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ ({ (void)(type); __access_ok((unsigned long)(addr), size); })
+
+@@ -248,6 +249,9 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) to, n)) {
+ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+@@ -257,12 +261,18 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) from, n)) {
+ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+@@ -274,6 +284,9 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 5373136..c528f7e 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -77,6 +78,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+ return 1;
+ }
+
++static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
++{
++ return 1;
++}
++
+ static inline int access_ok(int type, const void __user * addr, unsigned long size)
+ {
+ return 1;
+@@ -191,6 +197,9 @@ unsigned long __must_check ___copy_from_user(void *to,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
+ check_object_size(to, size, false);
+
+ return ___copy_from_user(to, from, size);
+@@ -203,6 +212,9 @@ unsigned long __must_check ___copy_to_user(void __user *to,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
+ check_object_size(from, size, true);
+
+ return ___copy_to_user(to, from, size);
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index fa3c02d..c9a6309 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -4,7 +4,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+
+diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
+index b0377db..1da3b53 100644
+--- a/arch/sparc/kernel/kprobes.c
++++ b/arch/sparc/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ * value kept in ri->ret_addr so we don't need to keep adjusting it
+ * back and forth.
+ */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -508,6 +509,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ regs->u_regs[UREG_RETPC] =
+ ((unsigned long)kretprobe_trampoline) - 8;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index b7780a5..28315f0 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 47ff558..2333c8a 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
+
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -278,7 +278,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 79cc0d1..46d6233 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+
+ unsigned int prom_early_allocated __initdata;
+
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static const struct of_pdt_ops prom_sparc_ops __initconst = {
+ .nextprop = prom_common_nextprop,
+ .getproplen = prom_getproplen,
+ .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index ac082dd..7170942 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1068,6 +1068,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+ int ret = 0;
+@@ -1078,6 +1082,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+@@ -1096,6 +1105,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 8182f7c..a5ab37f 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ return;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ this_cpu = get_cpu();
+@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ }
+@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_disable();
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ data0 = 0;
+ pg_addr = page_address(page);
+@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ __local_flush_dcache_page(page);
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 646988d..b88905f 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (len > TASK_SIZE - PAGE_SIZE)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ info.flags = 0;
+ info.length = len;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index fe8b8ee..3f17a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ struct vm_area_struct * vma;
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOR_ALIGN(addr, pgoff);
+@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -141,15 +152,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /* This should only ever run for 32-bit processes. */
+@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+
+ /* Essentially the same as PowerPC. */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+ unsigned long rnd = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ unsigned long val = get_random_long();
+ if (test_thread_flag(TIF_32BIT))
+@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
+
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- unsigned long random_factor = mmap_rnd();
++ unsigned long random_factor = mmap_rnd(mm);
+ unsigned long gap;
+
+ /*
+@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ /* We know it's 32-bit */
+@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index c4a1b5c..c5e0ef3 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+ .align 32
+ 1: ldx [%g6 + TI_FLAGS], %l5
+- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l5, _TIF_WORK_SYSCALL, %g0
+ be,pt %icc, rtrap
+ nop
+ call syscall_trace_leave
+@@ -230,7 +230,7 @@ linux_sparc_syscall32:
+
+ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+@@ -254,7 +254,7 @@ linux_sparc_syscall:
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+ 2: call %l7 ! CTI Group brk forced
+@@ -269,7 +269,7 @@ ret_sys_call:
+
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+ 2:
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 4f21df7..0a374da 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 4094a51..4a360da 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_error(regs, NULL);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_error(regs, NULL);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
+ /*0x38*/u64 reserved_5;
+ };
+
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+ static const char *sun4v_err_type_to_str(u8 type)
+ {
+@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
+ }
+
+ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+- int cpu, const char *pfx, atomic_t *ocnt)
++ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+ u64 *raw_ptr = (u64 *) ent;
+ u32 attrs;
+@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+
+ show_regs(regs);
+
+- if ((cnt = atomic_read(ocnt)) != 0) {
+- atomic_set(ocnt, 0);
++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++ atomic_set_unchecked(ocnt, 0);
+ wmb();
+ printk("%s: Queue overflowed %d times.\n",
+ pfx, cnt);
+@@ -2048,7 +2059,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+ */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+- atomic_inc(&sun4v_resum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ /* XXX Actually even this can make not that much sense. Perhaps
+ * XXX we should just pull the plug and panic directly from here?
+ */
+- atomic_inc(&sun4v_nonresum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ if (panic_on_oops)
+ panic("Fatal exception");
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 52c00d9..6f8aa4e 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 69912d2..6c0c227 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 1c6a1bd..93e9698 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -17,11 +17,22 @@
+ * barriers.
+ */
+
+-#define ATOMIC_OP(op) \
+-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##cc
++#define __OVERFLOW_IOP tvs %icc, 6;
++#define __OVERFLOW_XOP tvs %xcc, 6;
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_IOP
++#define __OVERFLOW_XOP
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -29,14 +40,18 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ nop; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic_##op); \
+-EXPORT_SYMBOL(atomic_##op);
++ENDPROC(atomic_##op##suffix); \
++EXPORT_SYMBOL(atomic_##op##suffix);
+
+-#define ATOMIC_OP_RETURN(op) \
+-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
++ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -44,8 +59,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ sra %g1, 0, %o0; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic_##op##_return); \
+-EXPORT_SYMBOL(atomic_##op##_return);
++ENDPROC(atomic_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic_##op##_return##suffix)
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
++ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
+
+ #define ATOMIC_FETCH_OP(op) \
+ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -77,13 +95,16 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+-#define ATOMIC64_OP(op) \
+-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -91,14 +112,18 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ nop; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic64_##op); \
+-EXPORT_SYMBOL(atomic64_##op);
++ENDPROC(atomic64_##op##suffix); \
++EXPORT_SYMBOL(atomic64_##op##suffix);
+
+-#define ATOMIC64_OP_RETURN(op) \
+-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
++ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -106,8 +131,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ op %g1, %o0, %o0; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic64_##op##_return); \
+-EXPORT_SYMBOL(atomic64_##op##_return);
++ENDPROC(atomic64_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic64_##op##_return##suffix);
++
++#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
++ __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
+
+ #define ATOMIC64_FETCH_OP(op) \
+ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -139,7 +167,12 @@ ATOMIC64_OPS(xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_XOP
++#undef __OVERFLOW_IOP
++#undef __REFCOUNT_OP
+
+ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 30c3ecc..736f015 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 4714061..bad7f9a 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned int addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -226,6 +500,24 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 643c149..845c113 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -23,6 +23,9 @@
+ #include <linux/percpu.h>
+ #include <linux/context_tracking.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -276,6 +279,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned long addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ enum ctx_state prev_state = exception_enter();
+@@ -350,6 +813,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 988acc8b..f26345c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -26,8 +26,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+- unsigned long flags)
++ unsigned long flags,
++ unsigned long offset)
+ {
++ struct mm_struct *mm = current->mm;
+ unsigned long task_size = TASK_SIZE;
+ struct vm_unmapped_area_info info;
+
+@@ -36,15 +38,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -53,10 +62,11 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ }
+
+ static unsigned long
+-hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len,
+- const unsigned long pgoff,
+- const unsigned long flags)
++hugetlb_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len,
++ unsigned long pgoff,
++ unsigned long flags,
++ unsigned long offset)
+ {
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+@@ -71,6 +81,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -83,6 +94,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -97,6 +114,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+@@ -112,19 +130,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ }
+
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 37aa537..06b756c 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+
+@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+ BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -462,10 +462,10 @@ void mmu_info(struct seq_file *m)
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+- atomic_read(&dcpage_flushes));
++ atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+- atomic_read(&dcpage_flushes_xcall));
++ atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
+diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
+index 4583c03..5e074bb 100644
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
+ config KEXEC
+ bool "kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index 4cefa0c..98d8b83 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -195,6 +195,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_TILE_ATOMIC_64_H */
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 4810e48..08b733b 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/chip.h>
+
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index a77369e..7ba6ecd 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -428,9 +428,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
++ if (likely(sz == (size_t)-1 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
+index c68694b..12bf0cb 100644
+--- a/arch/tile/kernel/kprobes.c
++++ b/arch/tile/kernel/kprobes.c
+@@ -430,6 +430,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -438,6 +439,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->lr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit.
+@@ -507,6 +509,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -514,6 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 77ceaa3..3630dea 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ info.high_limit = current->mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 0ca46ede..8d7fd38 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -73,6 +73,8 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
+ -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
+ -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
+
++USER_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(USER_CFLAGS))
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(ARCH_DIR)/Makefile-os-$(OS)
+
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 6208702..00292c8 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -377,7 +377,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ struct tty_driver *driver = line->driver->driver;
+ int err = -EINVAL;
+
+- if (line->port.count) {
++ if (atomic_read(&line->port.count)) {
+ *error_out = "Device is already open";
+ goto out;
+ }
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 2e0a6b1..a64d0f5 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -8,6 +8,6 @@
+
+ /* No more #include "asm/arch/kmap_types.h" ! */
+
+-#define KM_TYPE_NR 14
++#define KM_TYPE_NR 15
+
+ #endif
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index f878bec..ca09300 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index bae8523..ba9484b 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 034b42c7..5c186ce 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -343,22 +343,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/exec.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index bada636..1775eac 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -39,14 +39,13 @@ config X86
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+- select ARCH_SUPPORTS_INT128 if X86_64
++ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW_EXTRA && !PAX_SIZE_OVERFLOW
+ select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if X86_64
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select BUILDTIME_EXTABLE_SORT
+@@ -94,7 +93,7 @@ config X86
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
+ select HAVE_EBPF_JIT if X86_64
+ select HAVE_ARCH_VMAP_STACK if X86_64
+- select HAVE_CC_STACKPROTECTOR
++ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
+ select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
+ select HAVE_CONTEXT_TRACKING if X86_64
+@@ -136,6 +135,7 @@ config X86
+ select HAVE_NMI
+ select HAVE_OPROFILE
+ select HAVE_OPTPROBES
++ select HAVE_PAX_INITIFY_INIT_EXIT if GCC_PLUGINS
+ select HAVE_PCSPKR_PLATFORM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS_NMI
+@@ -190,11 +190,13 @@ config MMU
+ def_bool y
+
+ config ARCH_MMAP_RND_BITS_MIN
+- default 28 if 64BIT
++ default 28 if 64BIT && !PAX_PER_CPU_PGD
++ default 27 if 64BIT && PAX_PER_CPU_PGD
+ default 8
+
+ config ARCH_MMAP_RND_BITS_MAX
+- default 32 if 64BIT
++ default 32 if 64BIT && !PAX_PER_CPU_PGD
++ default 27 if 64BIT && PAX_PER_CPU_PGD
+ default 16
+
+ config ARCH_MMAP_RND_COMPAT_BITS_MIN
+@@ -296,7 +298,7 @@ config X86_64_SMP
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_SUPPORTS_UPROBES
+ def_bool y
+@@ -690,6 +692,7 @@ config SCHED_OMIT_FRAME_POINTER
+
+ menuconfig HYPERVISOR_GUEST
+ bool "Linux guest support"
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
+ ---help---
+ Say Y here to enable options for running Linux under various hyper-
+ visors. This option enables basic hypervisor detection and platform
+@@ -1090,6 +1093,7 @@ config VM86
+
+ config X86_16BIT
+ bool "Enable support for 16-bit segments" if EXPERT
++ depends on !GRKERNSEC
+ default y
+ depends on MODIFY_LDT_SYSCALL
+ ---help---
+@@ -1244,6 +1248,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1280,6 +1285,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1332,7 +1338,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1353,7 +1359,6 @@ config X86_PAE
+
+ config ARCH_PHYS_ADDR_T_64BIT
+ def_bool y
+- depends on X86_64 || X86_PAE
+
+ config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+@@ -1484,7 +1489,7 @@ config ARCH_PROC_KCORE_TEXT
+
+ config ILLEGAL_POINTER_VALUE
+ hex
+- default 0 if X86_32
++ default 0xfffff000 if X86_32
+ default 0xdead000000000000 if X86_64
+
+ source "mm/Kconfig"
+@@ -1807,6 +1812,7 @@ source kernel/Kconfig.hz
+ config KEXEC
+ bool "kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -1934,7 +1940,7 @@ config RELOCATABLE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image (KASLR)"
+- depends on RELOCATABLE
++ depends on RELOCATABLE && BROKEN_SECURITY
+ default n
+ ---help---
+ In support of Kernel Address Space Layout Randomization (KASLR),
+@@ -1978,7 +1984,9 @@ config X86_NEED_RELOCS
+
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned"
+- default "0x200000"
++ default "0x1000000"
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+ range 0x2000 0x1000000 if X86_32
+ range 0x200000 0x1000000 if X86_64
+ ---help---
+@@ -2093,6 +2101,7 @@ config COMPAT_VDSO
+ def_bool n
+ prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Certain buggy versions of glibc will crash if they are
+ presented with a 32-bit vDSO that is not mapped at the address
+@@ -2133,15 +2142,6 @@ choice
+
+ If unsure, select "Emulate".
+
+- config LEGACY_VSYSCALL_NATIVE
+- bool "Native"
+- help
+- Actual executable code is located in the fixed vsyscall
+- address mapping, implementing time() efficiently. Since
+- this makes the mapping executable, it can be used during
+- security vulnerability exploitation (traditionally as
+- ROP gadgets). This configuration is not recommended.
+-
+ config LEGACY_VSYSCALL_EMULATE
+ bool "Emulate"
+ help
+@@ -2222,6 +2222,22 @@ config MODIFY_LDT_SYSCALL
+
+ Saying 'N' here may make sense for embedded or server kernels.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ source "kernel/livepatch/Kconfig"
+
+ endmenu
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 3ba5ff2..44bdacc 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -329,7 +329,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486
++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -337,7 +337,7 @@ config X86_INVD_BUG
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -379,7 +379,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 67eec55..1a5c1ab 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -55,6 +55,7 @@ config X86_PTDUMP
+ tristate "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ select X86_PTDUMP_CORE
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+@@ -84,6 +85,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
++ depends on BROKEN
+ select X86_PTDUMP_CORE
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+@@ -111,7 +113,7 @@ config DEBUG_WX
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+@@ -353,6 +355,7 @@ config X86_DEBUG_FPU
+ config PUNIT_ATOM_DEBUG
+ tristate "ATOM Punit debug driver"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ select IOSF_MBI
+ ---help---
+ This is a debug driver, which gets the power states
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 2d44933..86ecceb 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y)
+ # CPU-specific tuning. Anything which can be shared with UML should go here.
+ include arch/x86/Makefile_32.cpu
+ KBUILD_CFLAGS += $(cflags-y)
+-
+- # temporary until string.h is fixed
+- KBUILD_CFLAGS += -ffreestanding
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
+@@ -126,6 +123,9 @@ else
+ KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
+ endif
+
++# temporary until string.h is fixed
++KBUILD_CFLAGS += -ffreestanding
++
+ ifdef CONFIG_X86_X32
+ x32_ld_ok := $(call try-run,\
+ /bin/echo -e '1: .quad 1b' | \
+@@ -192,6 +192,7 @@ archheaders:
+ $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
+
+ archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+ $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
+ endif
+@@ -278,3 +279,9 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 12ea8f8..46969be 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -11,6 +11,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Kernel does not boot with kcov instrumentation here.
+ # One of the problems observed was insertion of __sanitizer_cov_trace_pc()
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 0d41d68..2d6120c 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -28,7 +28,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+ bool v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -39,7 +39,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index e5612f3..e755d05 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -84,7 +84,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 34d9e15..c26e047 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -18,6 +18,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT := n
+@@ -35,6 +36,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+
++ifdef CONFIG_DEBUG_INFO
++ifdef CONFIG_DEBUG_INFO_SPLIT
++KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
++else
++KBUILD_CFLAGS += -g
++endif
++KBUILD_AFLAGS += -Wa,--gdwarf-2
++endif
++ifdef CONFIG_DEBUG_INFO_DWARF4
++KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
++endif
++
++ifdef CONFIG_DEBUG_INFO_REDUCED
++KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
++ $(call cc-option,-fno-var-tracking)
++endif
++
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE :=n
+diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
+index a53440e..c3dbf1e 100644
+--- a/arch/x86/boot/compressed/efi_stub_32.S
++++ b/arch/x86/boot/compressed/efi_stub_32.S
+@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %ecx
+- movl %ecx, saved_return_addr(%edx)
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr(%edx)
++ popl saved_return_addr(%edx)
++ popl efi_rt_function_ptr(%edx)
+
+ /*
+ * 3. Call the physical function.
+ */
+- call *%ecx
++ call *efi_rt_function_ptr(%edx)
+
+ /*
+ * 4. Balance the stack. And because EAX contain the return value,
+@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
+ 1: popl %edx
+ subl $1b, %edx
+
+- movl efi_rt_function_ptr(%edx), %ecx
+- pushl %ecx
++ pushl efi_rt_function_ptr(%edx)
+
+ /*
+ * 10. Push the saved return address onto the stack and return.
+ */
+- movl saved_return_addr(%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *saved_return_addr(%edx)
+ ENDPROC(efi_call_phys)
+ .previous
+
+diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
+index 99494dff..7fa59bf 100644
+--- a/arch/x86/boot/compressed/efi_stub_64.S
++++ b/arch/x86/boot/compressed/efi_stub_64.S
+@@ -2,4 +2,5 @@
+ #include <asm/msr.h>
+ #include <asm/processor-flags.h>
+
++#define efi_call efi_call_early
+ #include "../../platform/efi/efi_stub_64.S"
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+index 630384a..278e788 100644
+--- a/arch/x86/boot/compressed/efi_thunk_64.S
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -189,8 +189,8 @@ efi_gdt64:
+ .long 0 /* Filled out by user */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ efi_gdt64_end:
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index fd0b6a2..7206864 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -169,10 +169,10 @@ preferred_addr:
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index efdfba2..af6d962 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -103,10 +103,10 @@ ENTRY(startup_32)
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -333,10 +333,10 @@ preferred_addr:
+ addq %rax, %rbp
+ notq %rax
+ andq %rax, %rbp
+- cmpq $LOAD_PHYSICAL_ADDR, %rbp
++ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
+ #endif
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -444,8 +444,8 @@ gdt:
+ .long gdt
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ gdt_end:
+@@ -465,7 +465,7 @@ efi32_config:
+ .global efi64_config
+ efi64_config:
+ .fill 4,8,0
+- .quad efi_call
++ .quad efi_call_early
+ .byte 1
+ #endif /* CONFIG_EFI_STUB */
+
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index b3c5a5f0..596115e 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -176,13 +176,17 @@ static void handle_relocations(void *output, unsigned long output_len,
+ int *reloc;
+ unsigned long delta, map, ptr;
+ unsigned long min_addr = (unsigned long)output;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long max_addr = min_addr + (VO___bss_start - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR);
++#else
+ unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
++#endif
+
+ /*
+ * Calculate the delta between where vmlinux was linked to load
+ * and where it was actually loaded.
+ */
+- delta = min_addr - LOAD_PHYSICAL_ADDR;
++ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
+
+ /*
+ * The kernel contains a table of relocation addresses. Those
+@@ -199,7 +203,7 @@ static void handle_relocations(void *output, unsigned long output_len,
+ * from __START_KERNEL_map.
+ */
+ if (IS_ENABLED(CONFIG_X86_64))
+- delta = virt_addr - LOAD_PHYSICAL_ADDR;
++ delta = virt_addr - ____LOAD_PHYSICAL_ADDR;
+
+ if (!delta) {
+ debug_putstr("No relocation needed... ");
+@@ -274,7 +278,7 @@ static void parse_elf(void *output)
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdrs, *phdr;
+ #endif
+- void *dest;
++ void *dest, *prev;
+ int i;
+
+ memcpy(&ehdr, output, sizeof(ehdr));
+@@ -301,11 +305,14 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+ memmove(dest, output + phdr->p_offset, phdr->p_filesz);
++ if (i)
++ memset(prev, 0xff, dest - prev);
++ prev = dest + phdr->p_filesz;
+ break;
+ default: /* Ignore other PT_* */ break;
+ }
+@@ -337,7 +344,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ unsigned char *output,
+ unsigned long output_len)
+ {
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ const unsigned long kernel_total_size = VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR;
++#else
+ const unsigned long kernel_total_size = VO__end - VO__text;
++#endif
+ unsigned long virt_addr = (unsigned long)output;
+
+ /* Retain x86 boot parameters pointer passed from startup_32/64. */
+@@ -395,7 +406,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Destination address does not match LOAD_PHYSICAL_ADDR");
+ if ((unsigned long)output != virt_addr)
+ error("Destination virtual address changed when not relocatable");
+diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
+index 56589d0..f2085be 100644
+--- a/arch/x86/boot/compressed/pagetable.c
++++ b/arch/x86/boot/compressed/pagetable.c
+@@ -14,6 +14,7 @@
+ */
+ #define __pa(x) ((unsigned long)(x))
+ #define __va(x) ((void *)((unsigned long)(x)))
++#undef CONFIG_PAX_KERNEXEC
+
+ #include "misc.h"
+
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4ad7d70..c703963 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -126,9 +126,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
+@@ -141,9 +141,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_cpuflags();
+@@ -154,12 +154,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;