summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.8.8/1007_linux-4.8.8.patch1846
-rw-r--r--4.8.9/0000_README (renamed from 4.8.8/0000_README)6
-rw-r--r--4.8.9/1008_linux-4.8.9.patch3119
-rw-r--r--4.8.9/4420_grsecurity-3.1-4.8.9-201611192033.patch (renamed from 4.8.8/4420_grsecurity-3.1-4.8.8-201611150756.patch)250
-rw-r--r--4.8.9/4425_grsec_remove_EI_PAX.patch (renamed from 4.8.8/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.8.9/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.8.8/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.8.9/4430_grsec-remove-localversion-grsec.patch (renamed from 4.8.8/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.8.9/4435_grsec-mute-warnings.patch (renamed from 4.8.8/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.8.9/4440_grsec-remove-protected-paths.patch (renamed from 4.8.8/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.8.9/4450_grsec-kconfig-default-gids.patch (renamed from 4.8.8/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.8.9/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.8.8/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.8.9/4470_disable-compat_vdso.patch (renamed from 4.8.8/4470_disable-compat_vdso.patch)0
-rw-r--r--4.8.9/4475_emutramp_default_on.patch (renamed from 4.8.8/4475_emutramp_default_on.patch)0
13 files changed, 3278 insertions, 1943 deletions
diff --git a/4.8.8/1007_linux-4.8.8.patch b/4.8.8/1007_linux-4.8.8.patch
deleted file mode 100644
index 35fb91c..0000000
--- a/4.8.8/1007_linux-4.8.8.patch
+++ /dev/null
@@ -1,1846 +0,0 @@
-diff --git a/Makefile b/Makefile
-index 4d0f28c..8f18daa 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 8
--SUBLEVEL = 7
-+SUBLEVEL = 8
- EXTRAVERSION =
- NAME = Psychotic Stoned Sheep
-
-diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
-index ee655ed..1e8fceb 100644
---- a/arch/powerpc/include/asm/checksum.h
-+++ b/arch/powerpc/include/asm/checksum.h
-@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
- return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
- }
-
--static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-- unsigned short len,
-- unsigned short proto,
-- __wsum sum)
-+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
-+ __u8 proto, __wsum sum)
- {
- #ifdef __powerpc64__
- unsigned long s = (__force u32)sum;
-@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
--static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-- unsigned short len,
-- unsigned short proto,
-- __wsum sum)
-+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
-+ __u8 proto, __wsum sum)
- {
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
- }
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
-index 9dbfcc0..5ff64af 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib.h
-+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
-@@ -63,6 +63,8 @@ enum ipoib_flush_level {
-
- enum {
- IPOIB_ENCAP_LEN = 4,
-+ IPOIB_PSEUDO_LEN = 20,
-+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
-
- IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
- IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
-@@ -134,15 +136,21 @@ struct ipoib_header {
- u16 reserved;
- };
-
--struct ipoib_cb {
-- struct qdisc_skb_cb qdisc_cb;
-- u8 hwaddr[INFINIBAND_ALEN];
-+struct ipoib_pseudo_header {
-+ u8 hwaddr[INFINIBAND_ALEN];
- };
-
--static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
-+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
- {
-- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
-- return (struct ipoib_cb *)skb->cb;
-+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
-+
-+ /*
-+ * only the ipoib header is present now, make room for a dummy
-+ * pseudo header and set skb field accordingly
-+ */
-+ memset(data, 0, IPOIB_PSEUDO_LEN);
-+ skb_reset_mac_header(skb);
-+ skb_pull(skb, IPOIB_HARD_LEN);
- }
-
- /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-index 4ad297d..339a1ee 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
-@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
- #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
- #define IPOIB_CM_RX_UPDATE_MASK (0x3)
-
-+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
-+
- static struct ib_qp_attr ipoib_cm_err_attr = {
- .qp_state = IB_QPS_ERR
- };
-@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
- struct sk_buff *skb;
- int i;
-
-- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
-+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
- if (unlikely(!skb))
- return NULL;
-
- /*
-- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
-+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
- * IP header to a multiple of 16.
- */
-- skb_reserve(skb, 12);
-+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
-
- mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
- DMA_FROM_DEVICE);
-@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- if (wc->byte_len < IPOIB_CM_COPYBREAK) {
- int dlen = wc->byte_len;
-
-- small_skb = dev_alloc_skb(dlen + 12);
-+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
- if (small_skb) {
-- skb_reserve(small_skb, 12);
-+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
- ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
- dlen, DMA_FROM_DEVICE);
- skb_copy_from_linear_data(skb, small_skb->data, dlen);
-@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
-
- copied:
- skb->protocol = ((struct ipoib_header *) skb->data)->proto;
-- skb_reset_mac_header(skb);
-- skb_pull(skb, IPOIB_ENCAP_LEN);
-+ skb_add_pseudo_hdr(skb);
-
- ++dev->stats.rx_packets;
- dev->stats.rx_bytes += skb->len;
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-index be11d5d..830fecb 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
-@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
-
- buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
-
-- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
-+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
- if (unlikely(!skb))
- return NULL;
-
- /*
-- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
-- * header. So we need 4 more bytes to get to 48 and align the
-- * IP header to a multiple of 16.
-+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
-+ * 64 bytes aligned
- */
-- skb_reserve(skb, 4);
-+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
-
- mapping = priv->rx_ring[id].mapping;
- mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
-@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
- skb_pull(skb, IB_GRH_BYTES);
-
- skb->protocol = ((struct ipoib_header *) skb->data)->proto;
-- skb_reset_mac_header(skb);
-- skb_pull(skb, IPOIB_ENCAP_LEN);
-+ skb_add_pseudo_hdr(skb);
-
- ++dev->stats.rx_packets;
- dev->stats.rx_bytes += skb->len;
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
-index cc1c1b0..823a528 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
-@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- ipoib_neigh_free(neigh);
- goto err_drop;
- }
-- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
-+ if (skb_queue_len(&neigh->queue) <
-+ IPOIB_MAX_PATH_REC_QUEUE) {
-+ /* put pseudoheader back on for next time */
-+ skb_push(skb, IPOIB_PSEUDO_LEN);
- __skb_queue_tail(&neigh->queue, skb);
-- else {
-+ } else {
- ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
- skb_queue_len(&neigh->queue));
- goto err_drop;
-@@ -964,7 +967,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- }
-
- static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
-- struct ipoib_cb *cb)
-+ struct ipoib_pseudo_header *phdr)
- {
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_path *path;
-@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
-
- spin_lock_irqsave(&priv->lock, flags);
-
-- path = __path_find(dev, cb->hwaddr + 4);
-+ path = __path_find(dev, phdr->hwaddr + 4);
- if (!path || !path->valid) {
- int new_path = 0;
-
- if (!path) {
-- path = path_rec_create(dev, cb->hwaddr + 4);
-+ path = path_rec_create(dev, phdr->hwaddr + 4);
- new_path = 1;
- }
- if (path) {
- if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-+ /* put pseudoheader back on for next time */
-+ skb_push(skb, IPOIB_PSEUDO_LEN);
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
-@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- be16_to_cpu(path->pathrec.dlid));
-
- spin_unlock_irqrestore(&priv->lock, flags);
-- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
-+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
- return;
- } else if ((path->query || !path_rec_start(dev, path)) &&
- skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-+ /* put pseudoheader back on for next time */
-+ skb_push(skb, IPOIB_PSEUDO_LEN);
- __skb_queue_tail(&path->queue, skb);
- } else {
- ++dev->stats.tx_dropped;
-@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
- {
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_neigh *neigh;
-- struct ipoib_cb *cb = ipoib_skb_cb(skb);
-+ struct ipoib_pseudo_header *phdr;
- struct ipoib_header *header;
- unsigned long flags;
-
-+ phdr = (struct ipoib_pseudo_header *) skb->data;
-+ skb_pull(skb, sizeof(*phdr));
- header = (struct ipoib_header *) skb->data;
-
-- if (unlikely(cb->hwaddr[4] == 0xff)) {
-+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
- /* multicast, arrange "if" according to probability */
- if ((header->proto != htons(ETH_P_IP)) &&
- (header->proto != htons(ETH_P_IPV6)) &&
-@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
- return NETDEV_TX_OK;
- }
- /* Add in the P_Key for multicast*/
-- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
-- cb->hwaddr[9] = priv->pkey & 0xff;
-+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
-+ phdr->hwaddr[9] = priv->pkey & 0xff;
-
-- neigh = ipoib_neigh_get(dev, cb->hwaddr);
-+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
- if (likely(neigh))
- goto send_using_neigh;
-- ipoib_mcast_send(dev, cb->hwaddr, skb);
-+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
- return NETDEV_TX_OK;
- }
-
-@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
- case htons(ETH_P_IP):
- case htons(ETH_P_IPV6):
- case htons(ETH_P_TIPC):
-- neigh = ipoib_neigh_get(dev, cb->hwaddr);
-+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
- if (unlikely(!neigh)) {
-- neigh_add_path(skb, cb->hwaddr, dev);
-+ neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
- }
- break;
- case htons(ETH_P_ARP):
- case htons(ETH_P_RARP):
- /* for unicast ARP and RARP should always perform path find */
-- unicast_arp_send(skb, dev, cb);
-+ unicast_arp_send(skb, dev, phdr);
- return NETDEV_TX_OK;
- default:
- /* ethertype not supported by IPoIB */
-@@ -1086,11 +1095,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
- goto unref;
- }
- } else if (neigh->ah) {
-- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
-+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
- goto unref;
- }
-
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
-+ /* put pseudoheader back on for next time */
-+ skb_push(skb, sizeof(*phdr));
- spin_lock_irqsave(&priv->lock, flags);
- __skb_queue_tail(&neigh->queue, skb);
- spin_unlock_irqrestore(&priv->lock, flags);
-@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
- {
-+ struct ipoib_pseudo_header *phdr;
- struct ipoib_header *header;
-- struct ipoib_cb *cb = ipoib_skb_cb(skb);
-
- header = (struct ipoib_header *) skb_push(skb, sizeof *header);
-
-@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
-
- /*
- * we don't rely on dst_entry structure, always stuff the
-- * destination address into skb->cb so we can figure out where
-+ * destination address into skb hard header so we can figure out where
- * to send the packet later.
- */
-- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
-+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
-+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
-
-- return sizeof *header;
-+ return IPOIB_HARD_LEN;
- }
-
- static void ipoib_set_mcast_list(struct net_device *dev)
-@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
-
- dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
-
-- dev->hard_header_len = IPOIB_ENCAP_LEN;
-+ dev->hard_header_len = IPOIB_HARD_LEN;
- dev->addr_len = INFINIBAND_ALEN;
- dev->type = ARPHRD_INFINIBAND;
- dev->tx_queue_len = ipoib_sendq_size * 2;
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-index d3394b6..1909dd2 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
- __ipoib_mcast_add(dev, mcast);
- list_add_tail(&mcast->list, &priv->multicast_list);
- }
-- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
-+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
-+ /* put pseudoheader back on for next time */
-+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
- skb_queue_tail(&mcast->pkt_queue, skb);
-- else {
-+ } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
-diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
-index 692ee24..3474de5 100644
---- a/drivers/net/ethernet/freescale/fec_main.c
-+++ b/drivers/net/ethernet/freescale/fec_main.c
-@@ -913,13 +913,11 @@ fec_restart(struct net_device *ndev)
- * enet-mac reset will reset mac address registers too,
- * so need to reconfigure it.
- */
-- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
-- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-- writel((__force u32)cpu_to_be32(temp_mac[0]),
-- fep->hwp + FEC_ADDR_LOW);
-- writel((__force u32)cpu_to_be32(temp_mac[1]),
-- fep->hwp + FEC_ADDR_HIGH);
-- }
-+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-+ writel((__force u32)cpu_to_be32(temp_mac[0]),
-+ fep->hwp + FEC_ADDR_LOW);
-+ writel((__force u32)cpu_to_be32(temp_mac[1]),
-+ fep->hwp + FEC_ADDR_HIGH);
-
- /* Clear any outstanding interrupt. */
- writel(0xffffffff, fep->hwp + FEC_IEVENT);
-@@ -1432,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- skb_put(skb, pkt_len - 4);
- data = skb->data;
-
-+ if (!is_copybreak && need_swap)
-+ swap_buffer(data, pkt_len);
-+
- #if !defined(CONFIG_M5272)
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- data = skb_pull_inline(skb, 2);
- #endif
-
-- if (!is_copybreak && need_swap)
-- swap_buffer(data, pkt_len);
--
- /* Extract the enhanced buffer descriptor */
- ebdp = NULL;
- if (fep->bufdesc_ex)
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-index 132cea6..e3be7e4 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
-@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
- /* For TX we use the same irq per
- ring we assigned for the RX */
- struct mlx4_en_cq *rx_cq;
--
-+ int xdp_index;
-+
-+ /* The xdp tx irq must align with the rx ring that forwards to
-+ * it, so reindex these from 0. This should only happen when
-+ * tx_ring_num is not a multiple of rx_ring_num.
-+ */
-+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
-+ if (xdp_index >= 0)
-+ cq_idx = xdp_index;
- cq_idx = cq_idx % priv->rx_ring_num;
- rx_cq = priv->rx_cq[cq_idx];
- cq->vector = rx_cq->vector;
-diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
-index 3c20e87..16af1ce 100644
---- a/drivers/net/geneve.c
-+++ b/drivers/net/geneve.c
-@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
-
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
-- pp = ptype->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
- flush = 0;
-
- out_unlock:
-diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
-index 3ba29fc..c4d9653 100644
---- a/drivers/net/hyperv/netvsc_drv.c
-+++ b/drivers/net/hyperv/netvsc_drv.c
-@@ -624,15 +624,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- packet->total_data_buflen);
-
- skb->protocol = eth_type_trans(skb, net);
-- if (csum_info) {
-- /* We only look at the IP checksum here.
-- * Should we be dropping the packet if checksum
-- * failed? How do we deal with other checksums - TCP/UDP?
-- */
-- if (csum_info->receive.ip_checksum_succeeded)
-+
-+ /* skb is already created with CHECKSUM_NONE */
-+ skb_checksum_none_assert(skb);
-+
-+ /*
-+ * In Linux, the IP checksum is always checked.
-+ * Do L4 checksum offload if enabled and present.
-+ */
-+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
-+ if (csum_info->receive.tcp_checksum_succeeded ||
-+ csum_info->receive.udp_checksum_succeeded)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-- else
-- skb->ip_summed = CHECKSUM_NONE;
- }
-
- if (vlan_tci & VLAN_TAG_PRESENT)
-diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index 351e701..b72ddc6 100644
---- a/drivers/net/macsec.c
-+++ b/drivers/net/macsec.c
-@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
- #define DEFAULT_ENCRYPT false
- #define DEFAULT_ENCODING_SA 0
-
-+static bool send_sci(const struct macsec_secy *secy)
-+{
-+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
-+
-+ return tx_sc->send_sci ||
-+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
-+}
-+
- static sci_t make_sci(u8 *addr, __be16 port)
- {
- sci_t sci;
-@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
-
- /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
- static void macsec_fill_sectag(struct macsec_eth_header *h,
-- const struct macsec_secy *secy, u32 pn)
-+ const struct macsec_secy *secy, u32 pn,
-+ bool sci_present)
- {
- const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
-
-- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
-+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
- h->eth.h_proto = htons(ETH_P_MACSEC);
-
-- if (tx_sc->send_sci ||
-- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
-+ if (sci_present) {
- h->tci_an |= MACSEC_TCI_SC;
- memcpy(&h->secure_channel_id, &secy->sci,
- sizeof(h->secure_channel_id));
-@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
- struct macsec_tx_sc *tx_sc;
- struct macsec_tx_sa *tx_sa;
- struct macsec_dev *macsec = macsec_priv(dev);
-+ bool sci_present;
- u32 pn;
-
- secy = &macsec->secy;
-@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
-
- unprotected_len = skb->len;
- eth = eth_hdr(skb);
-- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
-+ sci_present = send_sci(secy);
-+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
- memmove(hh, eth, 2 * ETH_ALEN);
-
- pn = tx_sa_update_pn(tx_sa, secy);
-@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
- kfree_skb(skb);
- return ERR_PTR(-ENOLINK);
- }
-- macsec_fill_sectag(hh, secy, pn);
-+ macsec_fill_sectag(hh, secy, pn, sci_present);
- macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
-
- skb_put(skb, secy->icv_len);
-@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
- skb_to_sgvec(skb, sg, 0, skb->len);
-
- if (tx_sc->encrypt) {
-- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
-+ int len = skb->len - macsec_hdr_len(sci_present) -
- secy->icv_len;
- aead_request_set_crypt(req, sg, sg, len, iv);
-- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
-+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
- } else {
- aead_request_set_crypt(req, sg, sg, 0, iv);
- aead_request_set_ad(req, skb->len - secy->icv_len);
-diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
-index c6f6683..f424b86 100644
---- a/drivers/net/phy/phy.c
-+++ b/drivers/net/phy/phy.c
-@@ -608,6 +608,21 @@ void phy_start_machine(struct phy_device *phydev)
- }
-
- /**
-+ * phy_trigger_machine - trigger the state machine to run
-+ *
-+ * @phydev: the phy_device struct
-+ *
-+ * Description: There has been a change in state which requires that the
-+ * state machine runs.
-+ */
-+
-+static void phy_trigger_machine(struct phy_device *phydev)
-+{
-+ cancel_delayed_work_sync(&phydev->state_queue);
-+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-+}
-+
-+/**
- * phy_stop_machine - stop the PHY state machine tracking
- * @phydev: target phy_device struct
- *
-@@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
- mutex_lock(&phydev->lock);
- phydev->state = PHY_HALTED;
- mutex_unlock(&phydev->lock);
-+
-+ phy_trigger_machine(phydev);
- }
-
- /**
-@@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
- }
-
- /* reschedule state queue work to run as soon as possible */
-- cancel_delayed_work_sync(&phydev->state_queue);
-- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
-+ phy_trigger_machine(phydev);
- return;
-
- ignore:
-@@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
- /* if phy was suspended, bring the physical link up again */
- if (do_resume)
- phy_resume(phydev);
-+
-+ phy_trigger_machine(phydev);
- }
- EXPORT_SYMBOL(phy_start);
-
-diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 6e65832..5ae664c 100644
---- a/drivers/net/vxlan.c
-+++ b/drivers/net/vxlan.c
-@@ -584,7 +584,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
- }
- }
-
-- pp = eth_gro_receive(head, skb);
-+ pp = call_gro_receive(eth_gro_receive, head, skb);
- flush = 0;
-
- out:
-diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
-index d637c93..58a97d4 100644
---- a/drivers/ptp/ptp_chardev.c
-+++ b/drivers/ptp/ptp_chardev.c
-@@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
- if (err)
- break;
-
-+ memset(&precise_offset, 0, sizeof(precise_offset));
- ts = ktime_to_timespec64(xtstamp.device);
- precise_offset.device.sec = ts.tv_sec;
- precise_offset.device.nsec = ts.tv_nsec;
-diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
-index ca86c88..3aaea71 100644
---- a/drivers/scsi/megaraid/megaraid_sas.h
-+++ b/drivers/scsi/megaraid/megaraid_sas.h
-@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
- };
-
- #define MEGASAS_IS_LOGICAL(scp) \
-- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
-+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
-
- #define MEGASAS_DEV_INDEX(scp) \
- (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
-diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
-index c1ed25a..71e4899 100644
---- a/drivers/scsi/megaraid/megaraid_sas_base.c
-+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
-@@ -1713,16 +1713,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
- goto out_done;
- }
-
-- switch (scmd->cmnd[0]) {
-- case SYNCHRONIZE_CACHE:
-- /*
-- * FW takes care of flush cache on its own
-- * No need to send it down
-- */
-+ /*
-+ * FW takes care of flush cache on its own for Virtual Disk.
-+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
-+ */
-+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
- scmd->result = DID_OK << 16;
- goto out_done;
-- default:
-- break;
- }
-
- return instance->instancet->build_and_issue_cmd(instance, scmd);
-diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index 6443cfb..dc3b596 100644
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -789,6 +789,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
- req->trb = trb;
- req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- req->first_trb_index = dep->trb_enqueue;
-+ dep->queued_requests++;
- }
-
- dwc3_ep_inc_enq(dep);
-@@ -841,8 +842,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
-
- trb->ctrl |= DWC3_TRB_CTRL_HWO;
-
-- dep->queued_requests++;
--
- trace_dwc3_prepare_trb(dep, trb);
- }
-
-@@ -1963,7 +1962,9 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
- unsigned int s_pkt = 0;
- unsigned int trb_status;
-
-- dep->queued_requests--;
-+ if (req->trb == trb)
-+ dep->queued_requests--;
-+
- trace_dwc3_complete_trb(dep, trb);
-
- /*
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index e8d79d4..e942c67 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -2154,7 +2154,10 @@ struct napi_gro_cb {
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
-- /* 5 bit hole */
-+ /* Number of gro_receive callbacks this packet already went through */
-+ u8 recursion_counter:4;
-+
-+ /* 1 bit hole */
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-@@ -2165,6 +2168,40 @@ struct napi_gro_cb {
-
- #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-+#define GRO_RECURSION_LIMIT 15
-+static inline int gro_recursion_inc_test(struct sk_buff *skb)
-+{
-+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-+}
-+
-+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
-+ struct sk_buff **head,
-+ struct sk_buff *skb)
-+{
-+ if (unlikely(gro_recursion_inc_test(skb))) {
-+ NAPI_GRO_CB(skb)->flush |= 1;
-+ return NULL;
-+ }
-+
-+ return cb(head, skb);
-+}
-+
-+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
-+ struct sk_buff *);
-+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
-+ struct sock *sk,
-+ struct sk_buff **head,
-+ struct sk_buff *skb)
-+{
-+ if (unlikely(gro_recursion_inc_test(skb))) {
-+ NAPI_GRO_CB(skb)->flush |= 1;
-+ return NULL;
-+ }
-+
-+ return cb(sk, head, skb);
-+}
-+
- struct packet_type {
- __be16 type; /* This is really htons(ether_type). */
- struct net_device *dev; /* NULL is wildcarded here */
-@@ -3862,7 +3899,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
- ldev = netdev_all_lower_get_next(dev, &(iter)))
-
- #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
-- for (iter = (dev)->all_adj_list.lower.next, \
-+ for (iter = &(dev)->all_adj_list.lower, \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
- ldev; \
- ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
-diff --git a/include/net/ip.h b/include/net/ip.h
-index 9742b92..156b0c1 100644
---- a/include/net/ip.h
-+++ b/include/net/ip.h
-@@ -549,7 +549,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
- */
-
- void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
--void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
-+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
- int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
- struct ipcm_cookie *ipc, bool allow_ipv6);
- int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
-@@ -571,7 +571,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
-
- static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
- {
-- ip_cmsg_recv_offset(msg, skb, 0);
-+ ip_cmsg_recv_offset(msg, skb, 0, 0);
- }
-
- bool icmp_global_allow(void);
-diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
-index d97305d..0a2d270 100644
---- a/include/net/ip6_route.h
-+++ b/include/net/ip6_route.h
-@@ -32,6 +32,7 @@ struct route_info {
- #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
- #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
- #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
-+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
-
- /* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
- * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
-diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
-index 262f037..5a78be5 100644
---- a/include/uapi/linux/rtnetlink.h
-+++ b/include/uapi/linux/rtnetlink.h
-@@ -350,7 +350,7 @@ struct rtnexthop {
- #define RTNH_F_OFFLOAD 8 /* offloaded route */
- #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
-
--#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
-+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
-
- /* Macros to handle hexthops */
-
-diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
-index 8de138d..f2531ad 100644
---- a/net/8021q/vlan.c
-+++ b/net/8021q/vlan.c
-@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
-
- skb_gro_pull(skb, sizeof(*vhdr));
- skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
-- pp = ptype->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index c5fea93..2136e45 100644
---- a/net/bridge/br_multicast.c
-+++ b/net/bridge/br_multicast.c
-@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
- mod_timer(&query->timer, jiffies);
- }
-
--void br_multicast_enable_port(struct net_bridge_port *port)
-+static void __br_multicast_enable_port(struct net_bridge_port *port)
- {
- struct net_bridge *br = port->br;
-
-- spin_lock(&br->multicast_lock);
- if (br->multicast_disabled || !netif_running(br->dev))
-- goto out;
-+ return;
-
- br_multicast_enable(&port->ip4_own_query);
- #if IS_ENABLED(CONFIG_IPV6)
-@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
- if (port->multicast_router == MDB_RTR_TYPE_PERM &&
- hlist_unhashed(&port->rlist))
- br_multicast_add_router(br, port);
-+}
-
--out:
-+void br_multicast_enable_port(struct net_bridge_port *port)
-+{
-+ struct net_bridge *br = port->br;
-+
-+ spin_lock(&br->multicast_lock);
-+ __br_multicast_enable_port(port);
- spin_unlock(&br->multicast_lock);
- }
-
-@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
-
- int br_multicast_toggle(struct net_bridge *br, unsigned long val)
- {
-- int err = 0;
- struct net_bridge_mdb_htable *mdb;
-+ struct net_bridge_port *port;
-+ int err = 0;
-
- spin_lock_bh(&br->multicast_lock);
- if (br->multicast_disabled == !val)
-@@ -2023,10 +2029,9 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
- goto rollback;
- }
-
-- br_multicast_start_querier(br, &br->ip4_own_query);
--#if IS_ENABLED(CONFIG_IPV6)
-- br_multicast_start_querier(br, &br->ip6_own_query);
--#endif
-+ br_multicast_open(br);
-+ list_for_each_entry(port, &br->port_list, list)
-+ __br_multicast_enable_port(port);
-
- unlock:
- spin_unlock_bh(&br->multicast_lock);
-diff --git a/net/core/dev.c b/net/core/dev.c
-index ea63120..44b3ba4 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
- }
- return head;
- }
-+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
-
- static void qdisc_pkt_len_init(struct sk_buff *skb)
- {
-@@ -4496,6 +4497,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
- NAPI_GRO_CB(skb)->flush = 0;
- NAPI_GRO_CB(skb)->free = 0;
- NAPI_GRO_CB(skb)->encap_mark = 0;
-+ NAPI_GRO_CB(skb)->recursion_counter = 0;
- NAPI_GRO_CB(skb)->is_fou = 0;
- NAPI_GRO_CB(skb)->is_atomic = 1;
- NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
-@@ -5500,10 +5502,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
- {
- struct netdev_adjacent *lower;
-
-- lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
-- struct netdev_adjacent, list);
-+ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
-+
-+ if (&lower->list == &dev->all_adj_list.lower)
-+ return NULL;
-+
-+ *iter = &lower->list;
-
-- return lower ? lower->dev : NULL;
-+ return lower->dev;
- }
- EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
-
-@@ -5578,6 +5584,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
-
- static int __netdev_adjacent_dev_insert(struct net_device *dev,
- struct net_device *adj_dev,
-+ u16 ref_nr,
- struct list_head *dev_list,
- void *private, bool master)
- {
-@@ -5587,7 +5594,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
- adj = __netdev_find_adj(adj_dev, dev_list);
-
- if (adj) {
-- adj->ref_nr++;
-+ adj->ref_nr += ref_nr;
- return 0;
- }
-
-@@ -5597,7 +5604,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
-
- adj->dev = adj_dev;
- adj->master = master;
-- adj->ref_nr = 1;
-+ adj->ref_nr = ref_nr;
- adj->private = private;
- dev_hold(adj_dev);
-
-@@ -5636,6 +5643,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
-
- static void __netdev_adjacent_dev_remove(struct net_device *dev,
- struct net_device *adj_dev,
-+ u16 ref_nr,
- struct list_head *dev_list)
- {
- struct netdev_adjacent *adj;
-@@ -5648,10 +5656,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
- BUG();
- }
-
-- if (adj->ref_nr > 1) {
-- pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
-- adj->ref_nr-1);
-- adj->ref_nr--;
-+ if (adj->ref_nr > ref_nr) {
-+ pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
-+ ref_nr, adj->ref_nr-ref_nr);
-+ adj->ref_nr -= ref_nr;
- return;
- }
-
-@@ -5670,21 +5678,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
-
- static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
- struct net_device *upper_dev,
-+ u16 ref_nr,
- struct list_head *up_list,
- struct list_head *down_list,
- void *private, bool master)
- {
- int ret;
-
-- ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
-- master);
-+ ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
-+ private, master);
- if (ret)
- return ret;
-
-- ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
-- false);
-+ ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
-+ private, false);
- if (ret) {
-- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
-+ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
- return ret;
- }
-
-@@ -5692,9 +5701,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
- }
-
- static int __netdev_adjacent_dev_link(struct net_device *dev,
-- struct net_device *upper_dev)
-+ struct net_device *upper_dev,
-+ u16 ref_nr)
- {
-- return __netdev_adjacent_dev_link_lists(dev, upper_dev,
-+ return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
- &dev->all_adj_list.upper,
- &upper_dev->all_adj_list.lower,
- NULL, false);
-@@ -5702,17 +5712,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
-
- static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
- struct net_device *upper_dev,
-+ u16 ref_nr,
- struct list_head *up_list,
- struct list_head *down_list)
- {
-- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
-- __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
-+ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
-+ __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
- }
-
- static void __netdev_adjacent_dev_unlink(struct net_device *dev,
-- struct net_device *upper_dev)
-+ struct net_device *upper_dev,
-+ u16 ref_nr)
- {
-- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
-+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
- &dev->all_adj_list.upper,
- &upper_dev->all_adj_list.lower);
- }
-@@ -5721,17 +5733,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- struct net_device *upper_dev,
- void *private, bool master)
- {
-- int ret = __netdev_adjacent_dev_link(dev, upper_dev);
-+ int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
-
- if (ret)
- return ret;
-
-- ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
-+ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
- &dev->adj_list.upper,
- &upper_dev->adj_list.lower,
- private, master);
- if (ret) {
-- __netdev_adjacent_dev_unlink(dev, upper_dev);
-+ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
- return ret;
- }
-
-@@ -5741,8 +5753,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
- static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
- struct net_device *upper_dev)
- {
-- __netdev_adjacent_dev_unlink(dev, upper_dev);
-- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
-+ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
-+ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
- &dev->adj_list.upper,
- &upper_dev->adj_list.lower);
- }
-@@ -5795,7 +5807,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
- pr_debug("Interlinking %s with %s, non-neighbour\n",
- i->dev->name, j->dev->name);
-- ret = __netdev_adjacent_dev_link(i->dev, j->dev);
-+ ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
- if (ret)
- goto rollback_mesh;
- }
-@@ -5805,7 +5817,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
- pr_debug("linking %s's upper device %s with %s\n",
- upper_dev->name, i->dev->name, dev->name);
-- ret = __netdev_adjacent_dev_link(dev, i->dev);
-+ ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
- if (ret)
- goto rollback_upper_mesh;
- }
-@@ -5814,7 +5826,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(i, &dev->all_adj_list.lower, list) {
- pr_debug("linking %s's lower device %s with %s\n", dev->name,
- i->dev->name, upper_dev->name);
-- ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
-+ ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
- if (ret)
- goto rollback_lower_mesh;
- }
-@@ -5832,7 +5844,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(i, &dev->all_adj_list.lower, list) {
- if (i == to_i)
- break;
-- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
-+ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
- }
-
- i = NULL;
-@@ -5842,7 +5854,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
- if (i == to_i)
- break;
-- __netdev_adjacent_dev_unlink(dev, i->dev);
-+ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
- }
-
- i = j = NULL;
-@@ -5854,7 +5866,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
- list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
- if (i == to_i && j == to_j)
- break;
-- __netdev_adjacent_dev_unlink(i->dev, j->dev);
-+ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
- }
- if (i == to_i)
- break;
-@@ -5934,16 +5946,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
- */
- list_for_each_entry(i, &dev->all_adj_list.lower, list)
- list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
-- __netdev_adjacent_dev_unlink(i->dev, j->dev);
-+ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
-
- /* remove also the devices itself from lower/upper device
- * list
- */
- list_for_each_entry(i, &dev->all_adj_list.lower, list)
-- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
-+ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
-
- list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
-- __netdev_adjacent_dev_unlink(dev, i->dev);
-+ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
-
- call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
- &changeupper_info.info);
-diff --git a/net/core/pktgen.c b/net/core/pktgen.c
-index bbd118b..306b8f0 100644
---- a/net/core/pktgen.c
-+++ b/net/core/pktgen.c
-@@ -216,8 +216,8 @@
- #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
-
- /* If lock -- protects updating of if_list */
--#define if_lock(t) spin_lock(&(t->if_lock));
--#define if_unlock(t) spin_unlock(&(t->if_lock));
-+#define if_lock(t) mutex_lock(&(t->if_lock));
-+#define if_unlock(t) mutex_unlock(&(t->if_lock));
-
- /* Used to help with determining the pkts on receive */
- #define PKTGEN_MAGIC 0xbe9be955
-@@ -423,7 +423,7 @@ struct pktgen_net {
- };
-
- struct pktgen_thread {
-- spinlock_t if_lock; /* for list of devices */
-+ struct mutex if_lock; /* for list of devices */
- struct list_head if_list; /* All device here */
- struct list_head th_list;
- struct task_struct *tsk;
-@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
- {
- struct pktgen_thread *t;
-
-+ mutex_lock(&pktgen_thread_lock);
-+
- list_for_each_entry(t, &pn->pktgen_threads, th_list) {
- struct pktgen_dev *pkt_dev;
-
-- rcu_read_lock();
-- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
-+ if_lock(t);
-+ list_for_each_entry(pkt_dev, &t->if_list, list) {
- if (pkt_dev->odev != dev)
- continue;
-
-@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
- dev->name);
- break;
- }
-- rcu_read_unlock();
-+ if_unlock(t);
- }
-+ mutex_unlock(&pktgen_thread_lock);
- }
-
- static int pktgen_device_event(struct notifier_block *unused,
-@@ -2286,7 +2289,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
-
- static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
- {
-- pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
-+ pkt_dev->pkt_overhead = 0;
- pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
- pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
- pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
-@@ -2777,13 +2780,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
- }
-
- static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
-- struct pktgen_dev *pkt_dev,
-- unsigned int extralen)
-+ struct pktgen_dev *pkt_dev)
- {
-+ unsigned int extralen = LL_RESERVED_SPACE(dev);
- struct sk_buff *skb = NULL;
-- unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
-- pkt_dev->pkt_overhead;
-+ unsigned int size;
-
-+ size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
- if (pkt_dev->flags & F_NODE) {
- int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
-
-@@ -2796,8 +2799,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
- skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
- }
-
-+ /* the caller pre-fetches from skb->data and reserves for the mac hdr */
- if (likely(skb))
-- skb_reserve(skb, LL_RESERVED_SPACE(dev));
-+ skb_reserve(skb, extralen - 16);
-
- return skb;
- }
-@@ -2830,16 +2834,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
- mod_cur_headers(pkt_dev);
- queue_map = pkt_dev->cur_queue_map;
-
-- datalen = (odev->hard_header_len + 16) & ~0xf;
--
-- skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
-+ skb = pktgen_alloc_skb(odev, pkt_dev);
- if (!skb) {
- sprintf(pkt_dev->result, "No memory");
- return NULL;
- }
-
- prefetchw(skb->data);
-- skb_reserve(skb, datalen);
-+ skb_reserve(skb, 16);
-
- /* Reserve for ethernet and IP header */
- eth = (__u8 *) skb_push(skb, 14);
-@@ -2959,7 +2961,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
- mod_cur_headers(pkt_dev);
- queue_map = pkt_dev->cur_queue_map;
-
-- skb = pktgen_alloc_skb(odev, pkt_dev, 16);
-+ skb = pktgen_alloc_skb(odev, pkt_dev);
- if (!skb) {
- sprintf(pkt_dev->result, "No memory");
- return NULL;
-@@ -3763,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
- return -ENOMEM;
- }
-
-- spin_lock_init(&t->if_lock);
-+ mutex_init(&t->if_lock);
- t->cpu = cpu;
-
- INIT_LIST_HEAD(&t->if_list);
-diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
-index 66dff5e..02acfff 100644
---- a/net/ethernet/eth.c
-+++ b/net/ethernet/eth.c
-@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
-
- skb_gro_pull(skb, sizeof(*eh));
- skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
-- pp = ptype->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index 55513e6..eebbc0f 100644
---- a/net/ipv4/af_inet.c
-+++ b/net/ipv4/af_inet.c
-@@ -1388,7 +1388,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
- skb_gro_pull(skb, sizeof(*iph));
- skb_set_transport_header(skb, skb_gro_offset(skb));
-
-- pp = ops->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
-index 321d57f..5351b61 100644
---- a/net/ipv4/fou.c
-+++ b/net/ipv4/fou.c
-@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
- if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
-
-- pp = ops->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-@@ -441,7 +441,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
- if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
- goto out_unlock;
-
-- pp = ops->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
- flush = 0;
-
- out_unlock:
-diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index ecd1e09..6871f59 100644
---- a/net/ipv4/gre_offload.c
-+++ b/net/ipv4/gre_offload.c
-@@ -227,7 +227,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
- /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
- skb_gro_postpull_rcsum(skb, greh, grehlen);
-
-- pp = ptype->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
- flush = 0;
-
- out_unlock:
-diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index 71a52f4d..11ef96e 100644
---- a/net/ipv4/ip_sockglue.c
-+++ b/net/ipv4/ip_sockglue.c
-@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
- }
-
- static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
-- int offset)
-+ int tlen, int offset)
- {
- __wsum csum = skb->csum;
-
-@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
- return;
-
- if (offset != 0)
-- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
-- offset, 0));
-+ csum = csum_sub(csum,
-+ csum_partial(skb_transport_header(skb) + tlen,
-+ offset, 0));
-
- put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
- }
-@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
- }
-
- void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
-- int offset)
-+ int tlen, int offset)
- {
- struct inet_sock *inet = inet_sk(skb->sk);
- unsigned int flags = inet->cmsg_flags;
-@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
- }
-
- if (flags & IP_CMSG_CHECKSUM)
-- ip_cmsg_recv_checksum(msg, skb, offset);
-+ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
- }
- EXPORT_SYMBOL(ip_cmsg_recv_offset);
-
-diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
-index 1cb67de..80bc36b 100644
---- a/net/ipv4/sysctl_net_ipv4.c
-+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
- container_of(table->data, struct net, ipv4.ping_group_range.range);
- unsigned int seq;
- do {
-- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
-+ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
-
- *low = data[0];
- *high = data[1];
-- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
-+ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
- }
-
- /* Update system visible IP port range */
-@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
- kgid_t *data = table->data;
- struct net *net =
- container_of(table->data, struct net, ipv4.ping_group_range.range);
-- write_seqlock(&net->ipv4.ip_local_ports.lock);
-+ write_seqlock(&net->ipv4.ping_group_range.lock);
- data[0] = low;
- data[1] = high;
-- write_sequnlock(&net->ipv4.ip_local_ports.lock);
-+ write_sequnlock(&net->ipv4.ping_group_range.lock);
- }
-
- /* Validate changes from /proc interface. */
-diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 5fdcb8d..c0d71e7 100644
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -1327,7 +1327,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
- *addr_len = sizeof(*sin);
- }
- if (inet->cmsg_flags)
-- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
-+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
-
- err = copied;
- if (flags & MSG_TRUNC)
-diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
-index 81f253b..6de9f97 100644
---- a/net/ipv4/udp_offload.c
-+++ b/net/ipv4/udp_offload.c
-@@ -293,7 +293,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
-
- skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
- skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
-- pp = udp_sk(sk)->gro_receive(sk, head, skb);
-+ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index 2f1f5d4..f5432d6 100644
---- a/net/ipv6/addrconf.c
-+++ b/net/ipv6/addrconf.c
-@@ -2995,7 +2995,7 @@ static void init_loopback(struct net_device *dev)
- * lo device down, release this obsolete dst and
- * reallocate a new router for ifa.
- */
-- if (sp_ifa->rt->dst.obsolete > 0) {
-+ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
- ip6_rt_put(sp_ifa->rt);
- sp_ifa->rt = NULL;
- } else {
-diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
-index 22e90e5..a09418b 100644
---- a/net/ipv6/ip6_offload.c
-+++ b/net/ipv6/ip6_offload.c
-@@ -243,7 +243,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
-
- skb_gro_postpull_rcsum(skb, iph, nlen);
-
-- pp = ops->callbacks.gro_receive(head, skb);
-+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
-
- out_unlock:
- rcu_read_unlock();
-diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
-index 888543d..41489f3 100644
---- a/net/ipv6/ip6_tunnel.c
-+++ b/net/ipv6/ip6_tunnel.c
-@@ -155,6 +155,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
- hash = HASH(&any, local);
- for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
- if (ipv6_addr_equal(local, &t->parms.laddr) &&
-+ ipv6_addr_any(&t->parms.raddr) &&
- (t->dev->flags & IFF_UP))
- return t;
- }
-@@ -162,6 +163,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
- hash = HASH(remote, &any);
- for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
- if (ipv6_addr_equal(remote, &t->parms.raddr) &&
-+ ipv6_addr_any(&t->parms.laddr) &&
- (t->dev->flags & IFF_UP))
- return t;
- }
-@@ -1132,6 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
- if (err)
- return err;
-
-+ skb->protocol = htons(ETH_P_IPV6);
- skb_push(skb, sizeof(struct ipv6hdr));
- skb_reset_network_header(skb);
- ipv6h = ipv6_hdr(skb);
-diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index 269218a..23153ac 100644
---- a/net/ipv6/route.c
-+++ b/net/ipv6/route.c
-@@ -656,7 +656,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
- struct net_device *dev = rt->dst.dev;
-
- if (dev && !netif_carrier_ok(dev) &&
-- idev->cnf.ignore_routes_with_linkdown)
-+ idev->cnf.ignore_routes_with_linkdown &&
-+ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
- goto out;
-
- if (rt6_check_expired(rt))
-@@ -1050,6 +1051,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
- int strict = 0;
-
- strict |= flags & RT6_LOOKUP_F_IFACE;
-+ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
- if (net->ipv6.devconf_all->forwarding == 0)
- strict |= RT6_LOOKUP_F_REACHABLE;
-
-@@ -1783,7 +1785,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
- };
- struct fib6_table *table;
- struct rt6_info *rt;
-- int flags = RT6_LOOKUP_F_IFACE;
-+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
-
- table = fib6_get_table(net, cfg->fc_table);
- if (!table)
-diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 94f4f89..fc67822 100644
---- a/net/ipv6/tcp_ipv6.c
-+++ b/net/ipv6/tcp_ipv6.c
-@@ -1193,6 +1193,16 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
- return NULL;
- }
-
-+static void tcp_v6_restore_cb(struct sk_buff *skb)
-+{
-+ /* We need to move header back to the beginning if xfrm6_policy_check()
-+ * and tcp_v6_fill_cb() are going to be called again.
-+ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-+ */
-+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-+ sizeof(struct inet6_skb_parm));
-+}
-+
- /* The socket must have it's spinlock held when we get
- * here, unless it is a TCP_LISTEN socket.
- *
-@@ -1322,6 +1332,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
- np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
- if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
- skb_set_owner_r(opt_skb, sk);
-+ tcp_v6_restore_cb(opt_skb);
- opt_skb = xchg(&np->pktoptions, opt_skb);
- } else {
- __kfree_skb(opt_skb);
-@@ -1355,15 +1366,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
- TCP_SKB_CB(skb)->sacked = 0;
- }
-
--static void tcp_v6_restore_cb(struct sk_buff *skb)
--{
-- /* We need to move header back to the beginning if xfrm6_policy_check()
-- * and tcp_v6_fill_cb() are going to be called again.
-- */
-- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-- sizeof(struct inet6_skb_parm));
--}
--
- static int tcp_v6_rcv(struct sk_buff *skb)
- {
- const struct tcphdr *th;
-diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index 19ac3a1..c2a8656 100644
---- a/net/ipv6/udp.c
-+++ b/net/ipv6/udp.c
-@@ -427,7 +427,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
-
- if (is_udp4) {
- if (inet->cmsg_flags)
-- ip_cmsg_recv(msg, skb);
-+ ip_cmsg_recv_offset(msg, skb,
-+ sizeof(struct udphdr), off);
- } else {
- if (np->rxopt.all)
- ip6_datagram_recv_specific_ctl(sk, msg, skb);
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 627f898..62bea45 100644
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -1832,7 +1832,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- /* Record the max length of recvmsg() calls for future allocations */
- nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
- nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
-- 16384);
-+ SKB_WITH_OVERHEAD(32768));
-
- copied = data_skb->len;
- if (len < copied) {
-@@ -2083,8 +2083,9 @@ static int netlink_dump(struct sock *sk)
-
- if (alloc_min_size < nlk->max_recvmsg_len) {
- alloc_size = nlk->max_recvmsg_len;
-- skb = alloc_skb(alloc_size, GFP_KERNEL |
-- __GFP_NOWARN | __GFP_NORETRY);
-+ skb = alloc_skb(alloc_size,
-+ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
-+ __GFP_NOWARN | __GFP_NORETRY);
- }
- if (!skb) {
- alloc_size = alloc_min_size;
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 33a4697..d2238b2 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
- static int packet_direct_xmit(struct sk_buff *skb)
- {
- struct net_device *dev = skb->dev;
-- netdev_features_t features;
-+ struct sk_buff *orig_skb = skb;
- struct netdev_queue *txq;
- int ret = NETDEV_TX_BUSY;
-
-@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
- !netif_carrier_ok(dev)))
- goto drop;
-
-- features = netif_skb_features(skb);
-- if (skb_needs_linearize(skb, features) &&
-- __skb_linearize(skb))
-+ skb = validate_xmit_skb_list(skb, dev);
-+ if (skb != orig_skb)
- goto drop;
-
- txq = skb_get_tx_queue(dev, skb);
-@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
- return ret;
- drop:
- atomic_long_inc(&dev->tx_dropped);
-- kfree_skb(skb);
-+ kfree_skb_list(skb);
- return NET_XMIT_DROP;
- }
-
-@@ -3952,6 +3951,7 @@ static int packet_notifier(struct notifier_block *this,
- }
- if (msg == NETDEV_UNREGISTER) {
- packet_cached_dev_reset(po);
-+ fanout_release(sk);
- po->ifindex = -1;
- if (po->prot_hook.dev)
- dev_put(po->prot_hook.dev);
-diff --git a/net/sched/act_api.c b/net/sched/act_api.c
-index d09d068..027ddf4 100644
---- a/net/sched/act_api.c
-+++ b/net/sched/act_api.c
-@@ -341,22 +341,25 @@ int tcf_register_action(struct tc_action_ops *act,
- if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
- return -EINVAL;
-
-+ /* We have to register pernet ops before making the action ops visible,
-+ * otherwise tcf_action_init_1() could get a partially initialized
-+ * netns.
-+ */
-+ ret = register_pernet_subsys(ops);
-+ if (ret)
-+ return ret;
-+
- write_lock(&act_mod_lock);
- list_for_each_entry(a, &act_base, head) {
- if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
- write_unlock(&act_mod_lock);
-+ unregister_pernet_subsys(ops);
- return -EEXIST;
- }
- }
- list_add_tail(&act->head, &act_base);
- write_unlock(&act_mod_lock);
-
-- ret = register_pernet_subsys(ops);
-- if (ret) {
-- tcf_unregister_action(act, ops);
-- return ret;
-- }
--
- return 0;
- }
- EXPORT_SYMBOL(tcf_register_action);
-@@ -367,8 +370,6 @@ int tcf_unregister_action(struct tc_action_ops *act,
- struct tc_action_ops *a;
- int err = -ENOENT;
-
-- unregister_pernet_subsys(ops);
--
- write_lock(&act_mod_lock);
- list_for_each_entry(a, &act_base, head) {
- if (a == act) {
-@@ -378,6 +379,8 @@ int tcf_unregister_action(struct tc_action_ops *act,
- }
- }
- write_unlock(&act_mod_lock);
-+ if (!err)
-+ unregister_pernet_subsys(ops);
- return err;
- }
- EXPORT_SYMBOL(tcf_unregister_action);
-diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
-index 691409d..4ffc6c1 100644
---- a/net/sched/act_vlan.c
-+++ b/net/sched/act_vlan.c
-@@ -36,6 +36,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
- bstats_update(&v->tcf_bstats, skb);
- action = v->tcf_action;
-
-+ /* Ensure 'data' points at mac_header prior calling vlan manipulating
-+ * functions.
-+ */
-+ if (skb_at_tc_ingress(skb))
-+ skb_push_rcsum(skb, skb->mac_len);
-+
- switch (v->tcfv_action) {
- case TCA_VLAN_ACT_POP:
- err = skb_vlan_pop(skb);
-@@ -57,6 +63,9 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
- action = TC_ACT_SHOT;
- v->tcf_qstats.drops++;
- unlock:
-+ if (skb_at_tc_ingress(skb))
-+ skb_pull_rcsum(skb, skb->mac_len);
-+
- spin_unlock(&v->tcf_lock);
- return action;
- }
-diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
-index a7c5645..74bed5e 100644
---- a/net/sched/cls_api.c
-+++ b/net/sched/cls_api.c
-@@ -344,7 +344,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
- if (err == 0) {
- struct tcf_proto *next = rtnl_dereference(tp->next);
-
-- tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
-+ tfilter_notify(net, skb, n, tp,
-+ t->tcm_handle, RTM_DELTFILTER);
- if (tcf_destroy(tp, false))
- RCU_INIT_POINTER(*back, next);
- }
-diff --git a/net/sctp/output.c b/net/sctp/output.c
-index 31b7bc3..8192990 100644
---- a/net/sctp/output.c
-+++ b/net/sctp/output.c
-@@ -417,6 +417,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
- __u8 has_data = 0;
- int gso = 0;
- int pktcount = 0;
-+ int auth_len = 0;
- struct dst_entry *dst;
- unsigned char *auth = NULL; /* pointer to auth in skb data */
-
-@@ -505,7 +506,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
- list_for_each_entry(chunk, &packet->chunk_list, list) {
- int padded = WORD_ROUND(chunk->skb->len);
-
-- if (pkt_size + padded > tp->pathmtu)
-+ if (chunk == packet->auth)
-+ auth_len = padded;
-+ else if (auth_len + padded + packet->overhead >
-+ tp->pathmtu)
-+ goto nomem;
-+ else if (pkt_size + padded > tp->pathmtu)
- break;
- pkt_size += padded;
- }
-diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
-index d88bb2b..920469e 100644
---- a/net/sctp/sm_statefuns.c
-+++ b/net/sctp/sm_statefuns.c
-@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
- commands);
-
-+ /* Report violation if chunk len overflows */
-+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-+ if (ch_end > skb_tail_pointer(skb))
-+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
-+ commands);
-+
- /* Now that we know we at least have a chunk header,
- * do things that are type appropriate.
- */
-@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
- }
- }
-
-- /* Report violation if chunk len overflows */
-- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-- if (ch_end > skb_tail_pointer(skb))
-- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
-- commands);
--
- ch = (sctp_chunkhdr_t *) ch_end;
- } while (ch_end < skb_tail_pointer(skb));
-
-diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 8ed2d99..baccbf3 100644
---- a/net/sctp/socket.c
-+++ b/net/sctp/socket.c
-@@ -4683,7 +4683,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
- static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
- int __user *optlen)
- {
-- if (len <= 0)
-+ if (len == 0)
- return -EINVAL;
- if (len > sizeof(struct sctp_event_subscribe))
- len = sizeof(struct sctp_event_subscribe);
-@@ -6426,6 +6426,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
- if (get_user(len, optlen))
- return -EFAULT;
-
-+ if (len < 0)
-+ return -EINVAL;
-+
- lock_sock(sk);
-
- switch (optname) {
-diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
-index a5fc9dd..a56c5e6 100644
---- a/net/switchdev/switchdev.c
-+++ b/net/switchdev/switchdev.c
-@@ -774,6 +774,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
- int err;
-
-+ if (!netif_is_bridge_port(dev))
-+ return -EOPNOTSUPP;
-+
- err = switchdev_port_attr_get(dev, &attr);
- if (err && err != -EOPNOTSUPP)
- return err;
-@@ -929,6 +932,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
- struct nlattr *afspec;
- int err = 0;
-
-+ if (!netif_is_bridge_port(dev))
-+ return -EOPNOTSUPP;
-+
- protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
- IFLA_PROTINFO);
- if (protinfo) {
-@@ -962,6 +968,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
- {
- struct nlattr *afspec;
-
-+ if (!netif_is_bridge_port(dev))
-+ return -EOPNOTSUPP;
-+
- afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
- IFLA_AF_SPEC);
- if (afspec)
diff --git a/4.8.8/0000_README b/4.8.9/0000_README
index 59f6fd4..1062a0b 100644
--- a/4.8.8/0000_README
+++ b/4.8.9/0000_README
@@ -2,11 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1007_linux-4.8.8.patch
+Patch: 1008_linux-4.8.9.patch
From: http://www.kernel.org
-Desc: Linux 4.8.8
+Desc: Linux 4.8.9
-Patch: 4420_grsecurity-3.1-4.8.8-201611150756.patch
+Patch: 4420_grsecurity-3.1-4.8.9-201611192033.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.8.9/1008_linux-4.8.9.patch b/4.8.9/1008_linux-4.8.9.patch
new file mode 100644
index 0000000..c526740
--- /dev/null
+++ b/4.8.9/1008_linux-4.8.9.patch
@@ -0,0 +1,3119 @@
+diff --git a/Makefile b/Makefile
+index c1519ab..8f18daa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 9
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
+index c10390d..f927b8d 100644
+--- a/arch/arc/kernel/time.c
++++ b/arch/arc/kernel/time.c
+@@ -152,17 +152,14 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
+ cycle_t full;
+ } stamp;
+
+- /*
+- * hardware has an internal state machine which tracks readout of
+- * low/high and updates the CTRL.status if
+- * - interrupt/exception taken between the two reads
+- * - high increments after low has been read
+- */
+- do {
+- stamp.low = read_aux_reg(AUX_RTC_LOW);
+- stamp.high = read_aux_reg(AUX_RTC_HIGH);
+- status = read_aux_reg(AUX_RTC_CTRL);
+- } while (!(status & _BITUL(31)));
++
++ __asm__ __volatile(
++ "1: \n"
++ " lr %0, [AUX_RTC_LOW] \n"
++ " lr %1, [AUX_RTC_HIGH] \n"
++ " lr %2, [AUX_RTC_CTRL] \n"
++ " bbit0.nt %2, 31, 1b \n"
++ : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+
+ return stamp.full;
+ }
+diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
+index 9288851..20afc65 100644
+--- a/arch/arc/mm/dma.c
++++ b/arch/arc/mm/dma.c
+@@ -105,31 +105,6 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ __free_pages(page, get_order(size));
+ }
+
+-static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size,
+- unsigned long attrs)
+-{
+- unsigned long user_count = vma_pages(vma);
+- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+- unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+- unsigned long off = vma->vm_pgoff;
+- int ret = -ENXIO;
+-
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+- return ret;
+-
+- if (off < count && user_count <= (count - off)) {
+- ret = remap_pfn_range(vma, vma->vm_start,
+- pfn + off,
+- user_count << PAGE_SHIFT,
+- vma->vm_page_prot);
+- }
+-
+- return ret;
+-}
+-
+ /*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+@@ -218,7 +193,6 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
+ struct dma_map_ops arc_dma_ops = {
+ .alloc = arc_dma_alloc,
+ .free = arc_dma_free,
+- .mmap = arc_dma_mmap,
+ .map_page = arc_dma_map_page,
+ .map_sg = arc_dma_map_sg,
+ .sync_single_for_device = arc_dma_sync_single_for_device,
+diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
+index 794bebb..28f03ca 100644
+--- a/arch/s390/hypfs/hypfs_diag.c
++++ b/arch/s390/hypfs/hypfs_diag.c
+@@ -363,11 +363,11 @@ static void *diag204_store(void)
+ static int diag224_get_name_table(void)
+ {
+ /* memory must be below 2GB */
+- diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
++ diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!diag224_cpu_names)
+ return -ENOMEM;
+ if (diag224(diag224_cpu_names)) {
+- free_page((unsigned long) diag224_cpu_names);
++ kfree(diag224_cpu_names);
+ return -EOPNOTSUPP;
+ }
+ EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
+
+ static void diag224_delete_name_table(void)
+ {
+- free_page((unsigned long) diag224_cpu_names);
++ kfree(diag224_cpu_names);
+ }
+
+ static int diag224_idx2name(int index, char *name)
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 602af69..0332317 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -192,7 +192,7 @@ struct task_struct;
+ struct mm_struct;
+ struct seq_file;
+
+-typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
++typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+ void dump_trace(dump_trace_func_t func, void *data,
+ struct task_struct *task, unsigned long sp);
+
+diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
+index 518f615..6693383 100644
+--- a/arch/s390/kernel/dumpstack.c
++++ b/arch/s390/kernel/dumpstack.c
+@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+- if (func(data, sf->gprs[8], 0))
+- return sp;
+ /* Follow the backchain. */
+ while (1) {
++ if (func(data, sf->gprs[8]))
++ return sp;
+ low = sp;
+ sp = sf->back_chain;
+ if (!sp)
+@@ -49,8 +49,6 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+- if (func(data, sf->gprs[8], 1))
+- return sp;
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+@@ -58,7 +56,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ return sp;
+ regs = (struct pt_regs *) sp;
+ if (!user_mode(regs)) {
+- if (func(data, regs->psw.addr, 1))
++ if (func(data, regs->psw.addr))
+ return sp;
+ }
+ low = sp;
+@@ -92,7 +90,7 @@ struct return_address_data {
+ int depth;
+ };
+
+-static int __return_address(void *data, unsigned long address, int reliable)
++static int __return_address(void *data, unsigned long address)
+ {
+ struct return_address_data *rd = data;
+
+@@ -111,12 +109,9 @@ unsigned long return_address(int depth)
+ }
+ EXPORT_SYMBOL_GPL(return_address);
+
+-static int show_address(void *data, unsigned long address, int reliable)
++static int show_address(void *data, unsigned long address)
+ {
+- if (reliable)
+- printk(" [<%016lx>] %pSR \n", address, (void *)address);
+- else
+- printk("([<%016lx>] %pSR)\n", address, (void *)address);
++ printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ return 0;
+ }
+
+diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
+index 955a7b6..17431f6 100644
+--- a/arch/s390/kernel/perf_event.c
++++ b/arch/s390/kernel/perf_event.c
+@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
+ }
+ arch_initcall(service_level_perf_register);
+
+-static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
++static int __perf_callchain_kernel(void *data, unsigned long address)
+ {
+ struct perf_callchain_entry_ctx *entry = data;
+
+diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
+index 355db9d..44f84b2 100644
+--- a/arch/s390/kernel/stacktrace.c
++++ b/arch/s390/kernel/stacktrace.c
+@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
+ return 1;
+ }
+
+-static int save_address(void *data, unsigned long address, int reliable)
++static int save_address(void *data, unsigned long address)
+ {
+ return __save_address(data, address, 0);
+ }
+
+-static int save_address_nosched(void *data, unsigned long address, int reliable)
++static int save_address_nosched(void *data, unsigned long address)
+ {
+ return __save_address(data, address, 1);
+ }
+diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
+index 9a4de45..16f4c39 100644
+--- a/arch/s390/oprofile/init.c
++++ b/arch/s390/oprofile/init.c
+@@ -13,7 +13,7 @@
+ #include <linux/init.h>
+ #include <asm/processor.h>
+
+-static int __s390_backtrace(void *data, unsigned long address, int reliable)
++static int __s390_backtrace(void *data, unsigned long address)
+ {
+ unsigned int *depth = data;
+
+diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
+index 9976fce..77f28ce 100644
+--- a/arch/x86/entry/Makefile
++++ b/arch/x86/entry/Makefile
+@@ -5,8 +5,8 @@
+ OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
+ OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+
+-CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
+-CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
++CFLAGS_syscall_64.o += -Wno-override-init
++CFLAGS_syscall_32.o += -Wno-override-init
+ obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+ obj-y += common.o
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index d99ca57..fbd1944 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -453,7 +453,6 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
+ polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
+
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+- acpi_penalize_sci_irq(bus_irq, trigger, polarity);
+
+ /*
+ * stash over-ride to indicate we've been here
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index caea575..60746ef 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
+ ghes_do_proc(ghes, ghes->estatus);
+ out:
+ ghes_clear_estatus(ghes);
+- return rc;
++ return 0;
+ }
+
+ static void ghes_add_timer(struct ghes *ghes)
+diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
+index bc3d914..c983bf7 100644
+--- a/drivers/acpi/pci_link.c
++++ b/drivers/acpi/pci_link.c
+@@ -87,7 +87,6 @@ struct acpi_pci_link {
+
+ static LIST_HEAD(acpi_link_list);
+ static DEFINE_MUTEX(acpi_link_lock);
+-static int sci_irq = -1, sci_penalty;
+
+ /* --------------------------------------------------------------------------
+ PCI Link Device Management
+@@ -497,13 +496,25 @@ static int acpi_irq_get_penalty(int irq)
+ {
+ int penalty = 0;
+
+- if (irq == sci_irq)
+- penalty += sci_penalty;
++ /*
++ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
++ * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
++ * use for PCI IRQs.
++ */
++ if (irq == acpi_gbl_FADT.sci_interrupt) {
++ u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
++
++ if (type != IRQ_TYPE_LEVEL_LOW)
++ penalty += PIRQ_PENALTY_ISA_ALWAYS;
++ else
++ penalty += PIRQ_PENALTY_PCI_USING;
++ }
+
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
+- return penalty + acpi_irq_pci_sharing_penalty(irq);
++ penalty += acpi_irq_pci_sharing_penalty(irq);
++ return penalty;
+ }
+
+ int __init acpi_irq_penalty_init(void)
+@@ -608,10 +619,6 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ } else {
+- if (link->irq.active < ACPI_MAX_ISA_IRQS)
+- acpi_isa_irq_penalty[link->irq.active] +=
+- PIRQ_PENALTY_PCI_USING;
+-
+ printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device), link->irq.active);
+@@ -842,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
+ continue;
+
+ if (used)
+- new_penalty = acpi_isa_irq_penalty[irq] +
++ new_penalty = acpi_irq_get_penalty(irq) +
+ PIRQ_PENALTY_ISA_USED;
+ else
+ new_penalty = 0;
+@@ -864,7 +871,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
+ void acpi_penalize_isa_irq(int irq, int active)
+ {
+ if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
+- acpi_isa_irq_penalty[irq] +=
++ acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+ }
+
+@@ -874,17 +881,6 @@ bool acpi_isa_irq_available(int irq)
+ acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+ }
+
+-void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+-{
+- sci_irq = irq;
+-
+- if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+- polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+- sci_penalty = PIRQ_PENALTY_PCI_USING;
+- else
+- sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+-}
+-
+ /*
+ * Over-ride default table to reserve additional IRQs for use by ISA
+ * e.g. acpi_irq_isa=5
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 8348272..100be55 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
+ drbd_update_congested(connection);
+ }
+ do {
+- rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
++ rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ if (rv == -EAGAIN) {
+ if (we_should_drop_the_connection(connection, sock))
+ break;
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 0f7d28a..4431129 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -845,8 +845,6 @@ void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int flags)
+ {
+ intel_private.driver->write_entry(addr, pg, flags);
+- if (intel_private.driver->chipset_flush)
+- intel_private.driver->chipset_flush();
+ }
+ EXPORT_SYMBOL(intel_gtt_insert_page);
+
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 340f96e..9203f2d 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
+
+ static void add_early_randomness(struct hwrng *rng)
+ {
++ unsigned char bytes[16];
+ int bytes_read;
+- size_t size = min_t(size_t, 16, rng_buffer_size());
+
+ mutex_lock(&reading_mutex);
+- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
++ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0)
+- add_device_randomness(rng_buffer, bytes_read);
++ add_device_randomness(bytes, bytes_read);
+ }
+
+ static inline void cleanup_rng(struct kref *kref)
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index 80ae2a5..20b1055 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -700,7 +700,6 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ const struct clk_ops *ops,
+ unsigned long min_rate,
+- unsigned long max_rate,
+ unsigned long pct80_rate,
+ const char *fmt, int idx)
+ {
+@@ -729,8 +728,6 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
+ continue;
+ if (rate < min_rate)
+ continue;
+- if (rate > max_rate)
+- continue;
+
+ parent_names[j] = div->name;
+ hwc->parent_to_clksel[j] = i;
+@@ -762,7 +759,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ struct mux_hwclock *hwc;
+ const struct clockgen_pll_div *div;
+ unsigned long plat_rate, min_rate;
+- u64 max_rate, pct80_rate;
++ u64 pct80_rate;
+ u32 clksel;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+@@ -790,8 +787,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ return NULL;
+ }
+
+- max_rate = clk_get_rate(div->clk);
+- pct80_rate = max_rate * 8;
++ pct80_rate = clk_get_rate(div->clk);
++ pct80_rate *= 8;
+ do_div(pct80_rate, 10);
+
+ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
+@@ -801,7 +798,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ else
+ min_rate = plat_rate / 2;
+
+- return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
++ return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ pct80_rate, "cg-cmux%d", idx);
+ }
+
+@@ -816,7 +813,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
+ hwc->reg = cg->regs + 0x20 * idx + 0x10;
+ hwc->info = cg->info.hwaccel[idx];
+
+- return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
++ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ "cg-hwaccel%d", idx);
+ }
+
+diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
+index 0fa91f3..bdf8b97 100644
+--- a/drivers/clk/samsung/clk-exynos-audss.c
++++ b/drivers/clk/samsung/clk-exynos-audss.c
+@@ -82,7 +82,6 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
+ .data = (void *)TYPE_EXYNOS5420, },
+ {},
+ };
+-MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
+
+ static void exynos_audss_clk_teardown(void)
+ {
+diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
+index 4f87f3e..c184eb8 100644
+--- a/drivers/clocksource/timer-sun5i.c
++++ b/drivers/clocksource/timer-sun5i.c
+@@ -152,13 +152,6 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
+-static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+-{
+- struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+-
+- return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+-}
+-
+ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
+ unsigned long event, void *data)
+ {
+@@ -217,13 +210,8 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
+ writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ base + TIMER_CTL_REG(1));
+
+- cs->clksrc.name = node->name;
+- cs->clksrc.rating = 340;
+- cs->clksrc.read = sun5i_clksrc_read;
+- cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+- cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+-
+- ret = clocksource_register_hz(&cs->clksrc, rate);
++ ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
++ rate, 340, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Couldn't register clock source.\n");
+ goto err_remove_notifier;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 1ed6132..cd5dc27 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+- u32 mask = d->mask;
++ u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+ irq_gc_lock(gc);
+- writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
++ writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv &= ~mask;
+@@ -319,7 +319,8 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv |= mask;
+@@ -332,7 +333,8 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv &= ~mask;
+@@ -345,7 +347,8 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv |= mask;
+@@ -459,7 +462,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
+ for (i = 0; i < mvchip->chip.ngpio; i++) {
+ int irq;
+
+- irq = irq_find_mapping(mvchip->domain, i);
++ irq = mvchip->irqbase + i;
+
+ if (!(cause & (1 << i)))
+ continue;
+@@ -652,7 +655,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ struct irq_chip_type *ct;
+ struct clk *clk;
+ unsigned int ngpios;
+- bool have_irqs;
+ int soc_variant;
+ int i, cpu, id;
+ int err;
+@@ -663,9 +665,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ else
+ soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+
+- /* Some gpio controllers do not provide irq support */
+- have_irqs = of_irq_count(np) != 0;
+-
+ mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
+ GFP_KERNEL);
+ if (!mvchip)
+@@ -698,8 +697,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ mvchip->chip.get = mvebu_gpio_get;
+ mvchip->chip.direction_output = mvebu_gpio_direction_output;
+ mvchip->chip.set = mvebu_gpio_set;
+- if (have_irqs)
+- mvchip->chip.to_irq = mvebu_gpio_to_irq;
++ mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
+ mvchip->chip.ngpio = ngpios;
+ mvchip->chip.can_sleep = false;
+@@ -760,30 +758,34 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+
+ /* Some gpio controllers do not provide irq support */
+- if (!have_irqs)
++ if (!of_irq_count(np))
+ return 0;
+
+- mvchip->domain =
+- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+- if (!mvchip->domain) {
+- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+- mvchip->chip.label);
+- return -ENODEV;
++ /* Setup the interrupt handlers. Each chip can have up to 4
++ * interrupt handlers, with each handler dealing with 8 GPIO
++ * pins. */
++ for (i = 0; i < 4; i++) {
++ int irq = platform_get_irq(pdev, i);
++
++ if (irq < 0)
++ continue;
++ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
++ mvchip);
+ }
+
+- err = irq_alloc_domain_generic_chips(
+- mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+- IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+- if (err) {
+- dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+- mvchip->chip.label);
+- goto err_domain;
++ mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
++ if (mvchip->irqbase < 0) {
++ dev_err(&pdev->dev, "no irqs\n");
++ return mvchip->irqbase;
++ }
++
++ gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
++ mvchip->membase, handle_level_irq);
++ if (!gc) {
++ dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
++ return -ENOMEM;
+ }
+
+- /* NOTE: The common accessors cannot be used because of the percpu
+- * access to the mask registers
+- */
+- gc = irq_get_domain_generic_chip(mvchip->domain, 0);
+ gc->private = mvchip;
+ ct = &gc->chip_types[0];
+ ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
+@@ -801,23 +803,27 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ ct->handler = handle_edge_irq;
+ ct->chip.name = mvchip->chip.label;
+
+- /* Setup the interrupt handlers. Each chip can have up to 4
+- * interrupt handlers, with each handler dealing with 8 GPIO
+- * pins.
+- */
+- for (i = 0; i < 4; i++) {
+- int irq = platform_get_irq(pdev, i);
++ irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
++ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+
+- if (irq < 0)
+- continue;
+- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+- mvchip);
++ /* Setup irq domain on top of the generic chip. */
++ mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
++ mvchip->irqbase,
++ &irq_domain_simple_ops,
++ mvchip);
++ if (!mvchip->domain) {
++ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
++ mvchip->chip.label);
++ err = -ENODEV;
++ goto err_generic_chip;
+ }
+
+ return 0;
+
+-err_domain:
+- irq_domain_remove(mvchip->domain);
++err_generic_chip:
++ irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
++ IRQ_LEVEL | IRQ_NOPROBE);
++ kfree(gc);
+
+ return err;
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index e3fc901..a28feb3 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -26,18 +26,14 @@
+
+ #include "gpiolib.h"
+
+-static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
++static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+ {
+- struct of_phandle_args *gpiospec = data;
+-
+- return chip->gpiodev->dev.of_node == gpiospec->np &&
+- chip->of_xlate(chip, gpiospec, NULL) >= 0;
++ return chip->gpiodev->dev.of_node == data;
+ }
+
+-static struct gpio_chip *of_find_gpiochip_by_xlate(
+- struct of_phandle_args *gpiospec)
++static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+ {
+- return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
++ return gpiochip_find(np, of_gpiochip_match_node);
+ }
+
+ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
+@@ -83,7 +79,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ return ERR_PTR(ret);
+ }
+
+- chip = of_find_gpiochip_by_xlate(&gpiospec);
++ chip = of_find_gpiochip_by_node(gpiospec.np);
+ if (!chip) {
+ desc = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index 2057683..892d60f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -395,11 +395,8 @@ static int acp_hw_fini(void *handle)
+ {
+ int i, ret;
+ struct device *dev;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- /* return early if no ACP */
+- if (!adev->acp.acp_genpd)
+- return 0;
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 414a160..9aa533c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -605,7 +605,6 @@ static int __init amdgpu_init(void)
+ {
+ amdgpu_sync_init();
+ amdgpu_fence_slab_init();
+- amd_sched_fence_slab_init();
+ if (vgacon_text_force()) {
+ DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+ return -EINVAL;
+@@ -625,7 +624,6 @@ static void __exit amdgpu_exit(void)
+ drm_pci_exit(driver, pdriver);
+ amdgpu_unregister_atpx_handler();
+ amdgpu_sync_fini();
+- amd_sched_fence_slab_fini();
+ amdgpu_fence_slab_fini();
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index c82b95b8..0b109ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -68,7 +68,6 @@ int amdgpu_fence_slab_init(void)
+
+ void amdgpu_fence_slab_fini(void)
+ {
+- rcu_barrier();
+ kmem_cache_destroy(amdgpu_fence_slab);
+ }
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 1ed64ae..e24a8af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -99,8 +99,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+
+ if ((amdgpu_runtime_pm != 0) &&
+ amdgpu_has_atpx() &&
+- (amdgpu_is_atpx_hybrid() ||
+- amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((flags & AMD_IS_APU) == 0))
+ flags |= AMD_IS_PX;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index e86ca39..80120fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1654,6 +1654,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+ fence_put(adev->vm_manager.ids[i].first);
+ amdgpu_sync_free(&adev->vm_manager.ids[i].active);
+ fence_put(id->flushed_updates);
+- fence_put(id->last_flush);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index ffe1f85..963a24d 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -34,6 +34,9 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
+ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+
++struct kmem_cache *sched_fence_slab;
++atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
++
+ /* Initialize a given run queue struct */
+ static void amd_sched_rq_init(struct amd_sched_rq *rq)
+ {
+@@ -615,6 +618,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
+ INIT_LIST_HEAD(&sched->ring_mirror_list);
+ spin_lock_init(&sched->job_list_lock);
+ atomic_set(&sched->hw_rq_count, 0);
++ if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
++ sched_fence_slab = kmem_cache_create(
++ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
++ SLAB_HWCACHE_ALIGN, NULL);
++ if (!sched_fence_slab)
++ return -ENOMEM;
++ }
+
+ /* Each scheduler will run on a seperate kernel thread */
+ sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+@@ -635,4 +645,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
+ {
+ if (sched->thread)
+ kthread_stop(sched->thread);
++ if (atomic_dec_and_test(&sched_fence_slab_ref))
++ kmem_cache_destroy(sched_fence_slab);
+ }
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 51068e6..7cbbbfb 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -30,6 +30,9 @@
+ struct amd_gpu_scheduler;
+ struct amd_sched_rq;
+
++extern struct kmem_cache *sched_fence_slab;
++extern atomic_t sched_fence_slab_ref;
++
+ /**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+@@ -142,9 +145,6 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
+ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+-int amd_sched_fence_slab_init(void);
+-void amd_sched_fence_slab_fini(void);
+-
+ struct amd_sched_fence *amd_sched_fence_create(
+ struct amd_sched_entity *s_entity, void *owner);
+ void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
+diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+index 93ad2e1..6b63bea 100644
+--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+@@ -27,25 +27,6 @@
+ #include <drm/drmP.h>
+ #include "gpu_scheduler.h"
+
+-static struct kmem_cache *sched_fence_slab;
+-
+-int amd_sched_fence_slab_init(void)
+-{
+- sched_fence_slab = kmem_cache_create(
+- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+- SLAB_HWCACHE_ALIGN, NULL);
+- if (!sched_fence_slab)
+- return -ENOMEM;
+-
+- return 0;
+-}
+-
+-void amd_sched_fence_slab_fini(void)
+-{
+- rcu_barrier();
+- kmem_cache_destroy(sched_fence_slab);
+-}
+-
+ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
+ {
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index d46fa22..5de36d8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1490,6 +1490,8 @@ static int i915_drm_suspend(struct drm_device *dev)
+
+ dev_priv->suspend_count++;
+
++ intel_display_set_init_power(dev_priv, false);
++
+ intel_csr_ucode_suspend(dev_priv);
+
+ out:
+@@ -1506,8 +1508,6 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+
+ disable_rpm_wakeref_asserts(dev_priv);
+
+- intel_display_set_init_power(dev_priv, false);
+-
+ fw_csr = !IS_BROXTON(dev_priv) &&
+ suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e26f889..63462f2 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9737,29 +9737,6 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+ bxt_set_cdclk(to_i915(dev), req_cdclk);
+ }
+
+-static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+- int pixel_rate)
+-{
+- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+-
+- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+-
+- /* BSpec says "Do not use DisplayPort with CDCLK less than
+- * 432 MHz, audio enabled, port width x4, and link rate
+- * HBR2 (5.4 GHz), or else there may be audio corruption or
+- * screen corruption."
+- */
+- if (intel_crtc_has_dp_encoder(crtc_state) &&
+- crtc_state->has_audio &&
+- crtc_state->port_clock >= 540000 &&
+- crtc_state->lane_count == 4)
+- pixel_rate = max(432000, pixel_rate);
+-
+- return pixel_rate;
+-}
+-
+ /* compute the max rate for new configuration */
+ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+ {
+@@ -9785,9 +9762,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+
+ pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+- if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+- pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+- pixel_rate);
++ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
++ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
++ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ intel_state->min_pixclk[i] = pixel_rate;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 1421270..c3aa9e6 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1759,50 +1759,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ }
+
+-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+- enum port port)
+-{
+- const struct ddi_vbt_port_info *info =
+- &dev_priv->vbt.ddi_port_info[port];
+- u8 ddc_pin;
+-
+- if (info->alternate_ddc_pin) {
+- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+- info->alternate_ddc_pin, port_name(port));
+- return info->alternate_ddc_pin;
+- }
+-
+- switch (port) {
+- case PORT_B:
+- if (IS_BROXTON(dev_priv))
+- ddc_pin = GMBUS_PIN_1_BXT;
+- else
+- ddc_pin = GMBUS_PIN_DPB;
+- break;
+- case PORT_C:
+- if (IS_BROXTON(dev_priv))
+- ddc_pin = GMBUS_PIN_2_BXT;
+- else
+- ddc_pin = GMBUS_PIN_DPC;
+- break;
+- case PORT_D:
+- if (IS_CHERRYVIEW(dev_priv))
+- ddc_pin = GMBUS_PIN_DPD_CHV;
+- else
+- ddc_pin = GMBUS_PIN_DPD;
+- break;
+- default:
+- MISSING_CASE(port);
+- ddc_pin = GMBUS_PIN_DPB;
+- break;
+- }
+-
+- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+- ddc_pin, port_name(port));
+-
+- return ddc_pin;
+-}
+-
+ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+ {
+@@ -1812,6 +1768,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->port;
++ uint8_t alternate_ddc_pin;
+
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+@@ -1829,10 +1786,12 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
+
+- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+-
+ switch (port) {
+ case PORT_B:
++ if (IS_BROXTON(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+ /*
+ * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+ * interrupts to check the external panel connection.
+@@ -1843,17 +1802,46 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ break;
+ case PORT_C:
++ if (IS_BROXTON(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ break;
+ case PORT_D:
++ if (WARN_ON(IS_BROXTON(dev_priv)))
++ intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
++ else if (IS_CHERRYVIEW(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ break;
+ case PORT_E:
++ /* On SKL PORT E doesn't have seperate GMBUS pin
++ * We rely on VBT to set a proper alternate GMBUS pin. */
++ alternate_ddc_pin =
++ dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
++ switch (alternate_ddc_pin) {
++ case DDC_PIN_B:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
++ break;
++ case DDC_PIN_C:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
++ break;
++ case DDC_PIN_D:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
++ break;
++ default:
++ MISSING_CASE(alternate_ddc_pin);
++ }
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
++ case PORT_A:
++ intel_encoder->hpd_pin = HPD_PORT_A;
++ /* Internal port only for eDP. */
+ default:
+- MISSING_CASE(port);
+- return;
++ BUG();
+ }
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index edd2d03..554ca71 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -104,14 +104,6 @@ static const char radeon_family_name[][16] = {
+ "LAST",
+ };
+
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_has_atpx_dgpu_power_cntl(void);
+-bool radeon_is_atpx_hybrid(void);
+-#else
+-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+-static inline bool radeon_is_atpx_hybrid(void) { return false; }
+-#endif
+-
+ #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+@@ -168,11 +160,6 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+-
+- /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+- if (!radeon_is_atpx_hybrid() &&
+- !radeon_has_atpx_dgpu_power_cntl())
+- rdev->flags &= ~RADEON_IS_PX;
+ }
+
+ /**
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index ce69048..da3fb06 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+- *val = adata->current_fullscale->gain / 1000000;
+- *val2 = adata->current_fullscale->gain % 1000000;
++ *val = 0;
++ *val2 = adata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adata->odr;
+@@ -763,13 +763,9 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
+ int err;
+
+ switch (mask) {
+- case IIO_CHAN_INFO_SCALE: {
+- int gain;
+-
+- gain = val * 1000000 + val2;
+- err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
++ case IIO_CHAN_INFO_SCALE:
++ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+- }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+index b5beea53..dc33c1d 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+@@ -30,26 +30,26 @@ static struct {
+ u32 usage_id;
+ int unit; /* 0 for default others from HID sensor spec */
+ int scale_val0; /* scale, whole number */
+- int scale_val1; /* scale, fraction in nanos */
++ int scale_val1; /* scale, fraction in micros */
+ } unit_conversion[] = {
+- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
++ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+ HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+- HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
++ HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+
+- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
++ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
+ {HID_USAGE_SENSOR_GYRO_3D,
+- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
++ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+
+- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
++ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
+
+- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
++ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
++ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
+
+@@ -57,7 +57,7 @@ static struct {
+ {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
+
+ {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
++ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ };
+
+ static int pow_10(unsigned power)
+@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
+ /*
+ * This fuction applies the unit exponent to the scale.
+ * For example:
+- * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+- * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+- * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+- * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+- * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+- * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+- * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
++ * 9.806650 ->exp:2-> val0[980]val1[665000]
++ * 9.000806 ->exp:2-> val0[900]val1[80600]
++ * 0.174535 ->exp:2-> val0[17]val1[453500]
++ * 1.001745 ->exp:0-> val0[1]val1[1745]
++ * 1.001745 ->exp:2-> val0[100]val1[174500]
++ * 1.001745 ->exp:4-> val0[10017]val1[450000]
++ * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ */
+-static void adjust_exponent_nano(int *val0, int *val1, int scale0,
++static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+ int scale1, int exp)
+ {
+ int i;
+@@ -285,32 +285,32 @@ static void adjust_exponent_nano(int *val0, int *val1, int scale0,
+ if (exp > 0) {
+ *val0 = scale0 * pow_10(exp);
+ res = 0;
+- if (exp > 9) {
++ if (exp > 6) {
+ *val1 = 0;
+ return;
+ }
+ for (i = 0; i < exp; ++i) {
+- x = scale1 / pow_10(8 - i);
++ x = scale1 / pow_10(5 - i);
+ res += (pow_10(exp - 1 - i) * x);
+- scale1 = scale1 % pow_10(8 - i);
++ scale1 = scale1 % pow_10(5 - i);
+ }
+ *val0 += res;
+ *val1 = scale1 * pow_10(exp);
+ } else if (exp < 0) {
+ exp = abs(exp);
+- if (exp > 9) {
++ if (exp > 6) {
+ *val0 = *val1 = 0;
+ return;
+ }
+ *val0 = scale0 / pow_10(exp);
+ rem = scale0 % pow_10(exp);
+ res = 0;
+- for (i = 0; i < (9 - exp); ++i) {
+- x = scale1 / pow_10(8 - i);
+- res += (pow_10(8 - exp - i) * x);
+- scale1 = scale1 % pow_10(8 - i);
++ for (i = 0; i < (6 - exp); ++i) {
++ x = scale1 / pow_10(5 - i);
++ res += (pow_10(5 - exp - i) * x);
++ scale1 = scale1 % pow_10(5 - i);
+ }
+- *val1 = rem * pow_10(9 - exp) + res;
++ *val1 = rem * pow_10(6 - exp) + res;
+ } else {
+ *val0 = scale0;
+ *val1 = scale1;
+@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
+ unit_conversion[i].unit == attr_info->units) {
+ exp = hid_sensor_convert_exponent(
+ attr_info->unit_expo);
+- adjust_exponent_nano(val0, val1,
++ adjust_exponent_micro(val0, val1,
+ unit_conversion[i].scale_val0,
+ unit_conversion[i].scale_val1, exp);
+ break;
+ }
+ }
+
+- return IIO_VAL_INT_PLUS_NANO;
++ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ EXPORT_SYMBOL(hid_sensor_format_scale);
+
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index 32a5946..2d5282e 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -619,7 +619,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
+ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- int i, len = 0, q, r;
++ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+@@ -628,10 +628,8 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
+ break;
+
+- q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+- r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+-
+- len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
++ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
++ sdata->sensor_settings->fs.fs_avl[i].gain);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
+index a97e802c..b98b9d9 100644
+--- a/drivers/iio/orientation/hid-sensor-rotation.c
++++ b/drivers/iio/orientation/hid-sensor-rotation.c
+@@ -335,7 +335,6 @@ static struct platform_driver hid_dev_rot_platform_driver = {
+ .id_table = hid_dev_rot_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+- .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_dev_rot_probe,
+ .remove = hid_dev_rot_remove,
+diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
+index 1ebc2c1..6f2e0e4 100644
+--- a/drivers/input/rmi4/rmi_i2c.c
++++ b/drivers/input/rmi4/rmi_i2c.c
+@@ -221,21 +221,6 @@ static const struct of_device_id rmi_i2c_of_match[] = {
+ MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
+ #endif
+
+-static void rmi_i2c_regulator_bulk_disable(void *data)
+-{
+- struct rmi_i2c_xport *rmi_i2c = data;
+-
+- regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+- rmi_i2c->supplies);
+-}
+-
+-static void rmi_i2c_unregister_transport(void *data)
+-{
+- struct rmi_i2c_xport *rmi_i2c = data;
+-
+- rmi_unregister_transport_device(&rmi_i2c->xport);
+-}
+-
+ static int rmi_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -279,12 +264,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ if (retval < 0)
+ return retval;
+
+- retval = devm_add_action_or_reset(&client->dev,
+- rmi_i2c_regulator_bulk_disable,
+- rmi_i2c);
+- if (retval)
+- return retval;
+-
+ of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
+ &rmi_i2c->startup_delay);
+
+@@ -315,11 +294,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ client->addr);
+ return retval;
+ }
+- retval = devm_add_action_or_reset(&client->dev,
+- rmi_i2c_unregister_transport,
+- rmi_i2c);
+- if (retval)
+- return retval;
+
+ retval = rmi_i2c_init_irq(client);
+ if (retval < 0)
+@@ -330,6 +304,17 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ return 0;
+ }
+
++static int rmi_i2c_remove(struct i2c_client *client)
++{
++ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
++
++ rmi_unregister_transport_device(&rmi_i2c->xport);
++ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
++ rmi_i2c->supplies);
++
++ return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int rmi_i2c_suspend(struct device *dev)
+ {
+@@ -446,6 +431,7 @@ static struct i2c_driver rmi_i2c_driver = {
+ },
+ .id_table = rmi_id,
+ .probe = rmi_i2c_probe,
++ .remove = rmi_i2c_remove,
+ };
+
+ module_i2c_driver(rmi_i2c_driver);
+diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
+index 4ebef60..55bd1b3 100644
+--- a/drivers/input/rmi4/rmi_spi.c
++++ b/drivers/input/rmi4/rmi_spi.c
+@@ -396,13 +396,6 @@ static inline int rmi_spi_of_probe(struct spi_device *spi,
+ }
+ #endif
+
+-static void rmi_spi_unregister_transport(void *data)
+-{
+- struct rmi_spi_xport *rmi_spi = data;
+-
+- rmi_unregister_transport_device(&rmi_spi->xport);
+-}
+-
+ static int rmi_spi_probe(struct spi_device *spi)
+ {
+ struct rmi_spi_xport *rmi_spi;
+@@ -471,11 +464,6 @@ static int rmi_spi_probe(struct spi_device *spi)
+ dev_err(&spi->dev, "failed to register transport.\n");
+ return retval;
+ }
+- retval = devm_add_action_or_reset(&spi->dev,
+- rmi_spi_unregister_transport,
+- rmi_spi);
+- if (retval)
+- return retval;
+
+ retval = rmi_spi_init_irq(spi);
+ if (retval < 0)
+@@ -485,6 +473,15 @@ static int rmi_spi_probe(struct spi_device *spi)
+ return 0;
+ }
+
++static int rmi_spi_remove(struct spi_device *spi)
++{
++ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
++
++ rmi_unregister_transport_device(&rmi_spi->xport);
++
++ return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int rmi_spi_suspend(struct device *dev)
+ {
+@@ -580,6 +577,7 @@ static struct spi_driver rmi_spi_driver = {
+ },
+ .id_table = rmi_id,
+ .probe = rmi_spi_probe,
++ .remove = rmi_spi_remove,
+ };
+
+ module_spi_driver(rmi_spi_driver);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 822fc4a..96de97a 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1654,9 +1654,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
+
+ free_pagetable(&dom->domain);
+
+- if (dom->domain.id)
+- domain_id_free(dom->domain.id);
+-
+ kfree(dom);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 1257b0b..ebb5bf3 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1711,7 +1711,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
+ if (!iommu->domains || !iommu->domain_ids)
+ return;
+
+-again:
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+ struct dmar_domain *domain;
+@@ -1724,19 +1723,10 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
+
+ domain = info->domain;
+
+- __dmar_remove_one_dev_info(info);
++ dmar_remove_one_dev_info(domain, info->dev);
+
+- if (!domain_type_is_vm_or_si(domain)) {
+- /*
+- * The domain_exit() function can't be called under
+- * device_domain_lock, as it takes this lock itself.
+- * So release the lock here and re-run the loop
+- * afterwards.
+- */
+- spin_unlock_irqrestore(&device_domain_lock, flags);
++ if (!domain_type_is_vm_or_si(domain))
+ domain_exit(domain);
+- goto again;
+- }
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index f50e51c..def8ca1 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -633,10 +633,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+ {
+ struct arm_v7s_io_pgtable *data;
+
+-#ifdef PHYS_OFFSET
+- if (upper_32_bits(PHYS_OFFSET))
+- return NULL;
+-#endif
+ if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+ return NULL;
+
+diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
+index f73e108..bf890c3 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_core.c
++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
+@@ -677,7 +677,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ struct dvb_usb_device *d = purb->context;
+ struct dib0700_rc_response *poll_reply;
+ enum rc_type protocol;
+- u32 keycode;
++ u32 uninitialized_var(keycode);
+ u8 toggle;
+
+ deb_info("%s()\n", __func__);
+@@ -719,8 +719,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ poll_reply->nec.data == 0x00 &&
+ poll_reply->nec.not_data == 0xff) {
+ poll_reply->data_state = 2;
+- rc_repeat(d->rc_dev);
+- goto resubmit;
++ break;
+ }
+
+ if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
+diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
+index 75b9d4a..e9e6ea3 100644
+--- a/drivers/misc/mei/bus-fixup.c
++++ b/drivers/misc/mei/bus-fixup.c
+@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+
+ ret = 0;
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+- if (bytes_recv < if_version_length) {
++ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ dev_err(bus->dev, "Could not read IF version\n");
+ ret = -EIO;
+ goto err;
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 6ef1e3c..c57eb32 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -26,8 +26,6 @@
+ #include "mmc_ops.h"
+ #include "sd_ops.h"
+
+-#define DEFAULT_CMD6_TIMEOUT_MS 500
+-
+ static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+@@ -573,7 +571,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ card->erased_byte = 0x0;
+
+ /* eMMC v4.5 or later */
+- card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 44ecebd..d839147 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, mmc);
+
+- spin_lock_init(&host->lock);
+-
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto out_free_dma;
+
++ spin_lock_init(&host->lock);
++
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_dma;
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 90ed2e1..8ef44a2a 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -647,7 +647,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
+ msm_host->pwr_irq);
+- ret = msm_host->pwr_irq;
+ goto clk_disable;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 6eb8f07..a8a022a 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2269,8 +2269,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+- if (mrq)
++ if (mrq) {
++ host->mrqs_done[i] = NULL;
+ break;
++ }
+ }
+
+ if (!mrq) {
+@@ -2301,17 +2303,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ * upon error conditions.
+ */
+ if (sdhci_needs_reset(host, mrq)) {
+- /*
+- * Do not finish until command and data lines are available for
+- * reset. Note there can only be one other mrq, so it cannot
+- * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+- * would both be null.
+- */
+- if (host->cmd || host->data_cmd) {
+- spin_unlock_irqrestore(&host->lock, flags);
+- return true;
+- }
+-
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+ /* This is to force an update */
+@@ -2319,8 +2310,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+- sdhci_do_reset(host, SDHCI_RESET_CMD);
+- sdhci_do_reset(host, SDHCI_RESET_DATA);
++ if (!host->cmd)
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ if (!host->data_cmd)
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ host->pending_reset = false;
+ }
+@@ -2328,8 +2321,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ if (!sdhci_has_requests(host))
+ sdhci_led_deactivate(host);
+
+- host->mrqs_done[i] = NULL;
+-
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+@@ -2509,6 +2500,9 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ if (!host->data) {
+ struct mmc_command *data_cmd = host->data_cmd;
+
++ if (data_cmd)
++ host->data_cmd = NULL;
++
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+@@ -2516,13 +2510,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ */
+ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+- host->data_cmd = NULL;
+ data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, data_cmd->mrq);
+ return;
+ }
+ if (intmask & SDHCI_INT_DATA_END) {
+- host->data_cmd = NULL;
+ /*
+ * Some cards handle busy-end interrupt
+ * before the command completed, so make
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6b46a37..c74d164 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -9001,7 +9001,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+- 0, 0, nlflags, filter_mask, NULL);
++ nlflags, 0, 0, filter_mask, NULL);
+ }
+
+ /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
+diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
+index 6f9563a..83deda4 100644
+--- a/drivers/nfc/mei_phy.c
++++ b/drivers/nfc/mei_phy.c
+@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
+ return -ENOMEM;
+
+ bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
+- if (bytes_recv < 0 || bytes_recv < if_version_length) {
++ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ pr_err("Could not read IF version\n");
+ r = -EIO;
+ goto err;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index da134a0..60f7eab 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1531,9 +1531,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+ return 0;
+ }
+
+-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
++static void nvme_disable_io_queues(struct nvme_dev *dev)
+ {
+- int pass;
++ int pass, queues = dev->online_queues - 1;
+ unsigned long timeout;
+ u8 opcode = nvme_admin_delete_sq;
+
+@@ -1678,7 +1678,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
+
+ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ {
+- int i, queues;
++ int i;
+ u32 csts = -1;
+
+ del_timer_sync(&dev->watchdog_timer);
+@@ -1689,7 +1689,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ }
+
+- queues = dev->online_queues - 1;
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev->queues[i]);
+
+@@ -1701,7 +1700,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
+ } else {
+- nvme_disable_io_queues(dev, queues);
++ nvme_disable_io_queues(dev);
+ nvme_disable_admin_queue(dev, shutdown);
+ }
+ nvme_pci_disable(dev);
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 9526e34..66c4d8f 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -121,14 +121,6 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
+ return -EINVAL;
+ }
+
+- /*
+- * If we have a shadow copy in RAM, the PCI device doesn't respond
+- * to the shadow range, so we don't need to claim it, and upstream
+- * bridges don't need to route the range to the device.
+- */
+- if (res->flags & IORESOURCE_ROM_SHADOW)
+- return 0;
+-
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+index 5d1e505c3..7f77007 100644
+--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
++++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
+
+ static int __init iproc_gpio_init(void)
+ {
+- return platform_driver_register(&iproc_gpio_driver);
++ return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe);
+ }
+ arch_initcall_sync(iproc_gpio_init);
+diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+index c8deb8b..35783db 100644
+--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
++++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
+
+ static int __init nsp_gpio_init(void)
+ {
+- return platform_driver_register(&nsp_gpio_driver);
++ return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe);
+ }
+ arch_initcall_sync(nsp_gpio_init);
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index bc31504..0fe8fad 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1634,15 +1634,12 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+-static int chv_pinctrl_suspend_noirq(struct device *dev)
++static int chv_pinctrl_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+- unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&chv_lock, flags);
+-
+ pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
+
+ for (i = 0; i < pctrl->community->npins; i++) {
+@@ -1663,20 +1660,15 @@ static int chv_pinctrl_suspend_noirq(struct device *dev)
+ ctx->padctrl1 = readl(reg);
+ }
+
+- raw_spin_unlock_irqrestore(&chv_lock, flags);
+-
+ return 0;
+ }
+
+-static int chv_pinctrl_resume_noirq(struct device *dev)
++static int chv_pinctrl_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+- unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&chv_lock, flags);
+-
+ /*
+ * Mask all interrupts before restoring per-pin configuration
+ * registers because we don't know in which state BIOS left them
+@@ -1721,15 +1713,12 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
+ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+ chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+
+- raw_spin_unlock_irqrestore(&chv_lock, flags);
+-
+ return 0;
+ }
+ #endif
+
+ static const struct dev_pm_ops chv_pinctrl_pm_ops = {
+- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
+- chv_pinctrl_resume_noirq)
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+ };
+
+ static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
+diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
+index 2df07ee..feac457 100644
+--- a/drivers/platform/x86/toshiba-wmi.c
++++ b/drivers/platform/x86/toshiba-wmi.c
+@@ -24,15 +24,14 @@
+ #include <linux/acpi.h>
+ #include <linux/input.h>
+ #include <linux/input/sparse-keymap.h>
+-#include <linux/dmi.h>
+
+ MODULE_AUTHOR("Azael Avalos");
+ MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
+ MODULE_LICENSE("GPL");
+
+-#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
++#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+
+-MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
++MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID);
+
+ static struct input_dev *toshiba_wmi_input_dev;
+
+@@ -64,16 +63,6 @@ static void toshiba_wmi_notify(u32 value, void *context)
+ kfree(response.pointer);
+ }
+
+-static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
+- {
+- .ident = "Toshiba laptop",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+- },
+- },
+- {}
+-};
+-
+ static int __init toshiba_wmi_input_setup(void)
+ {
+ acpi_status status;
+@@ -92,7 +81,7 @@ static int __init toshiba_wmi_input_setup(void)
+ if (err)
+ goto err_free_dev;
+
+- status = wmi_install_notify_handler(WMI_EVENT_GUID,
++ status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID,
+ toshiba_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+@@ -106,7 +95,7 @@ static int __init toshiba_wmi_input_setup(void)
+ return 0;
+
+ err_remove_notifier:
+- wmi_remove_notify_handler(WMI_EVENT_GUID);
++ wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ err_free_keymap:
+ sparse_keymap_free(toshiba_wmi_input_dev);
+ err_free_dev:
+@@ -116,7 +105,7 @@ static int __init toshiba_wmi_input_setup(void)
+
+ static void toshiba_wmi_input_destroy(void)
+ {
+- wmi_remove_notify_handler(WMI_EVENT_GUID);
++ wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ sparse_keymap_free(toshiba_wmi_input_dev);
+ input_unregister_device(toshiba_wmi_input_dev);
+ }
+@@ -125,8 +114,7 @@ static int __init toshiba_wmi_init(void)
+ {
+ int ret;
+
+- if (!wmi_has_guid(WMI_EVENT_GUID) ||
+- !dmi_check_system(toshiba_wmi_dmi_table))
++ if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ return -ENODEV;
+
+ ret = toshiba_wmi_input_setup();
+@@ -142,7 +130,7 @@ static int __init toshiba_wmi_init(void)
+
+ static void __exit toshiba_wmi_exit(void)
+ {
+- if (wmi_has_guid(WMI_EVENT_GUID))
++ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ toshiba_wmi_input_destroy();
+ }
+
+diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
+index 8895f77..b4478cc 100644
+--- a/drivers/rtc/rtc-pcf2123.c
++++ b/drivers/rtc/rtc-pcf2123.c
+@@ -182,8 +182,7 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
+ }
+
+ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
+- const char *buffer, size_t count)
+-{
++ const char *buffer, size_t count) {
+ struct pcf2123_sysfs_reg *r;
+ unsigned long reg;
+ unsigned long val;
+@@ -200,7 +199,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
+ if (ret)
+ return ret;
+
+- ret = pcf2123_write_reg(dev, reg, val);
++ pcf2123_write_reg(dev, reg, val);
+ if (ret < 0)
+ return -EIO;
+ return count;
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 920c421..752b5c9 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -792,7 +792,6 @@ static void alua_rtpg_work(struct work_struct *work)
+ WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
+ WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
+ spin_unlock_irqrestore(&pg->lock, flags);
+- kref_put(&pg->kref, release_port_group);
+ return;
+ }
+ if (pg->flags & ALUA_SYNC_STPG)
+@@ -890,7 +889,6 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
+ /* Do not queue if the worker is already running */
+ if (!(pg->flags & ALUA_PG_RUNNING)) {
+ kref_get(&pg->kref);
+- sdev = NULL;
+ start_queue = 1;
+ }
+ }
+@@ -902,8 +900,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
+ if (start_queue &&
+ !queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
+- if (sdev)
+- scsi_device_put(sdev);
++ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+ }
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 46c0f5e..4cb7990 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
++ sas_target_priv_data->raid_device = raid_device;
+ if (ioc->is_warpdrive)
+- sas_target_priv_data->raid_device = raid_device;
+- raid_device->starget = starget;
++ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index e46e2c5..2674f4c 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2341,8 +2341,6 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+- if (test_bit(UNLOADING, &vha->dpc_flags))
+- return 1;
+ if (!vha->host)
+ return 1;
+ if (time > vha->hw->loop_reset_delay * HZ)
+diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
+index 5ab49a7..7043eb0 100644
+--- a/drivers/staging/comedi/drivers/ni_tio.c
++++ b/drivers/staging/comedi/drivers/ni_tio.c
+@@ -207,8 +207,7 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
+ * clock period is specified by user with prescaling
+ * already taken into account.
+ */
+- *period_ps = counter->clock_period_ps;
+- return 0;
++ return counter->clock_period_ps;
+ }
+
+ switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 98d9473..24c348d 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -655,7 +655,6 @@ static void ad5933_work(struct work_struct *work)
+ __be16 buf[2];
+ int val[2];
+ unsigned char status;
+- int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->state == AD5933_CTRL_INIT_START_FREQ) {
+@@ -663,22 +662,19 @@ static void ad5933_work(struct work_struct *work)
+ ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
+ st->state = AD5933_CTRL_START_SWEEP;
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+- goto out;
++ mutex_unlock(&indio_dev->mlock);
++ return;
+ }
+
+- ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+- if (ret)
+- goto out;
++ ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+
+ if (status & AD5933_STAT_DATA_VALID) {
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+- ret = ad5933_i2c_read(st->client,
++ ad5933_i2c_read(st->client,
+ test_bit(1, indio_dev->active_scan_mask) ?
+ AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
+ scan_count * 2, (u8 *)buf);
+- if (ret)
+- goto out;
+
+ if (scan_count == 2) {
+ val[0] = be16_to_cpu(buf[0]);
+@@ -690,7 +686,8 @@ static void ad5933_work(struct work_struct *work)
+ } else {
+ /* no data available - try again later */
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+- goto out;
++ mutex_unlock(&indio_dev->mlock);
++ return;
+ }
+
+ if (status & AD5933_STAT_SWEEP_DONE) {
+@@ -703,7 +700,7 @@ static void ad5933_work(struct work_struct *work)
+ ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ }
+-out:
++
+ mutex_unlock(&indio_dev->mlock);
+ }
+
+diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
+index 499952c..a324322 100644
+--- a/drivers/staging/nvec/nvec_ps2.c
++++ b/drivers/staging/nvec/nvec_ps2.c
+@@ -106,12 +106,13 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+ {
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+ struct serio *ser_dev;
++ char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
+
+- ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
++ ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ if (!ser_dev)
+ return -ENOMEM;
+
+- ser_dev->id.type = SERIO_8042;
++ ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->write = ps2_sendcommand;
+ ser_dev->start = ps2_startstreaming;
+ ser_dev->stop = ps2_stopstreaming;
+@@ -126,6 +127,9 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+
+ serio_register_port(ser_dev);
+
++ /* mouse reset */
++ nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
++
+ return 0;
+ }
+
+diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
+index 4ed6d8d..9552479 100644
+--- a/drivers/staging/sm750fb/ddk750_reg.h
++++ b/drivers/staging/sm750fb/ddk750_reg.h
+@@ -601,13 +601,13 @@
+
+ #define PANEL_PLANE_TL 0x08001C
+ #define PANEL_PLANE_TL_TOP_SHIFT 16
+-#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
+-#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
++#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
++#define PANEL_PLANE_TL_LEFT_MASK 0xeff
+
+ #define PANEL_PLANE_BR 0x080020
+ #define PANEL_PLANE_BR_BOTTOM_SHIFT 16
+-#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+-#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
++#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
++#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
+
+ #define PANEL_HORIZONTAL_TOTAL 0x080024
+ #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 21aeac5..8bbde52 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2026,7 +2026,6 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
+ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+ {
+- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned long flags;
+ unsigned int old_mode, mode, imr, quot, baud;
+
+@@ -2130,29 +2129,11 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ mode |= ATMEL_US_USMODE_RS485;
+ } else if (termios->c_cflag & CRTSCTS) {
+ /* RS232 with hardware handshake (RTS/CTS) */
+- if (atmel_use_fifo(port) &&
+- !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+- /*
+- * with ATMEL_US_USMODE_HWHS set, the controller will
+- * be able to drive the RTS pin high/low when the RX
+- * FIFO is above RXFTHRES/below RXFTHRES2.
+- * It will also disable the transmitter when the CTS
+- * pin is high.
+- * This mode is not activated if CTS pin is a GPIO
+- * because in this case, the transmitter is always
+- * disabled (there must be an internal pull-up
+- * responsible for this behaviour).
+- * If the RTS pin is a GPIO, the controller won't be
+- * able to drive it according to the FIFO thresholds,
+- * but it will be handled by the driver.
+- */
+- mode |= ATMEL_US_USMODE_HWHS;
++ if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
++ dev_info(port->dev, "not enabling hardware flow control because DMA is used");
++ termios->c_cflag &= ~CRTSCTS;
+ } else {
+- /*
+- * For platforms without FIFO, the flow control is
+- * handled by the driver.
+- */
+- mode |= ATMEL_US_USMODE_NORMAL;
++ mode |= ATMEL_US_USMODE_HWHS;
+ }
+ } else {
+ /* RS232 without hadware handshake */
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 3ca9fdb..0f3f62e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -946,6 +946,8 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
+ DECLARE_WAITQUEUE(wait, current);
+ struct async_icount old, new;
+
++ if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
++ return -EINVAL;
+ do {
+ spin_lock_irq(&acm->read_lock);
+ old = acm->oldcount;
+@@ -1173,8 +1175,6 @@ static int acm_probe(struct usb_interface *intf,
+ if (quirks == IGNORE_DEVICE)
+ return -ENODEV;
+
+- memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
+-
+ num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
+
+ /* handle quirks deadly to normal probing*/
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2d47010..35d0924 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -669,14 +669,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ return 0;
+
+ err4:
+- phy_power_off(dwc->usb3_generic_phy);
++ phy_power_off(dwc->usb2_generic_phy);
+
+ err3:
+- phy_power_off(dwc->usb2_generic_phy);
++ phy_power_off(dwc->usb3_generic_phy);
+
+ err2:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
++ dwc3_core_exit(dwc);
+
+ err1:
+ usb_phy_shutdown(dwc->usb2_phy);
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index f590ada..9b9e71f 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -585,6 +585,14 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+
+ req->length = length;
+
++ /* throttle high/super speed IRQ rate back slightly */
++ if (gadget_is_dualspeed(dev->gadget))
++ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
++ dev->gadget->speed == USB_SPEED_SUPER)) &&
++ !list_empty(&dev->tx_reqs))
++ ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
++ : 0;
++
+ retval = usb_ep_queue(in, req, GFP_ATOMIC);
+ switch (retval) {
+ default:
+diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
+index 74265b2..6abb83c 100644
+--- a/drivers/watchdog/watchdog_core.c
++++ b/drivers/watchdog/watchdog_core.c
+@@ -349,7 +349,7 @@ int devm_watchdog_register_device(struct device *dev,
+ struct watchdog_device **rcwdd;
+ int ret;
+
+- rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
++ rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+ GFP_KERNEL);
+ if (!rcwdd)
+ return -ENOMEM;
+diff --git a/fs/coredump.c b/fs/coredump.c
+index eb9c92c..281b768 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -1,7 +1,6 @@
+ #include <linux/slab.h>
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+-#include <linux/freezer.h>
+ #include <linux/mm.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -424,9 +423,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
+ if (core_waiters > 0) {
+ struct core_thread *ptr;
+
+- freezer_do_not_count();
+ wait_for_completion(&core_state->startup);
+- freezer_count();
+ /*
+ * Wait for all the threads to become inactive, so that
+ * all the thread context (extended register state, like
+diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
+index 150c5a1..b629730 100644
+--- a/fs/nfs/nfs4session.c
++++ b/fs/nfs/nfs4session.c
+@@ -178,14 +178,12 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
+ __must_hold(&tbl->slot_tbl_lock)
+ {
+ struct nfs4_slot *slot;
+- int ret;
+
+ slot = nfs4_lookup_slot(tbl, slotid);
+- ret = PTR_ERR_OR_ZERO(slot);
+- if (!ret)
+- *seq_nr = slot->seq_nr;
+-
+- return ret;
++ if (IS_ERR(slot))
++ return PTR_ERR(slot);
++ *seq_nr = slot->seq_nr;
++ return 0;
+ }
+
+ /*
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 67d1d3e..c5eaf2f 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -318,7 +318,6 @@ struct pci_dev;
+ int acpi_pci_irq_enable (struct pci_dev *dev);
+ void acpi_penalize_isa_irq(int irq, int active);
+ bool acpi_isa_irq_available(int irq);
+-void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+ void acpi_pci_irq_disable (struct pci_dev *dev);
+
+ extern int ec_read(u8 addr, u8 *val);
+diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
+index 1d18af0..c46d2aa 100644
+--- a/include/linux/frontswap.h
++++ b/include/linux/frontswap.h
+@@ -106,9 +106,8 @@ static inline void frontswap_invalidate_area(unsigned type)
+
+ static inline void frontswap_init(unsigned type, unsigned long *map)
+ {
+-#ifdef CONFIG_FRONTSWAP
+- __frontswap_init(type, map);
+-#endif
++ if (frontswap_enabled())
++ __frontswap_init(type, map);
+ }
+
+ #endif /* _LINUX_FRONTSWAP_H */
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index 3584bc8..d6917b8 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -86,7 +86,6 @@ struct svc_rdma_op_ctxt {
+ unsigned long flags;
+ enum dma_data_direction direction;
+ int count;
+- unsigned int mapped_sges;
+ struct ib_sge sge[RPCSVC_MAXPAGES];
+ struct page *pages[RPCSVC_MAXPAGES];
+ };
+@@ -194,14 +193,6 @@ struct svcxprt_rdma {
+
+ #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+
+-/* Track DMA maps for this transport and context */
+-static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
+- struct svc_rdma_op_ctxt *ctxt)
+-{
+- ctxt->mapped_sges++;
+- atomic_inc(&rdma->sc_dma_used);
+-}
+-
+ /* svc_rdma_backchannel.c */
+ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
+ struct rpcrdma_msg *rmsgp,
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index 144fe6b..0a11396 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+ struct gen_pool_chunk *chunk;
+ unsigned long addr = 0;
+ int order = pool->min_alloc_order;
+- int nbits, start_bit, end_bit, remain;
++ int nbits, start_bit = 0, end_bit, remain;
+
+ #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+@@ -307,7 +307,6 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+ if (size > atomic_read(&chunk->avail))
+ continue;
+
+- start_bit = 0;
+ end_bit = chunk_size(chunk) >> order;
+ retry:
+ start_bit = algo(chunk->bits, end_bit, start_bit,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0ddce6a..770d83e 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1826,17 +1826,11 @@ static void return_unused_surplus_pages(struct hstate *h,
+ * is not the case is if a reserve map was changed between calls. It
+ * is the responsibility of the caller to notice the difference and
+ * take appropriate action.
+- *
+- * vma_add_reservation is used in error paths where a reservation must
+- * be restored when a newly allocated huge page must be freed. It is
+- * to be called after calling vma_needs_reservation to determine if a
+- * reservation exists.
+ */
+ enum vma_resv_mode {
+ VMA_NEEDS_RESV,
+ VMA_COMMIT_RESV,
+ VMA_END_RESV,
+- VMA_ADD_RESV,
+ };
+ static long __vma_reservation_common(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr,
+@@ -1862,14 +1856,6 @@ static long __vma_reservation_common(struct hstate *h,
+ region_abort(resv, idx, idx + 1);
+ ret = 0;
+ break;
+- case VMA_ADD_RESV:
+- if (vma->vm_flags & VM_MAYSHARE)
+- ret = region_add(resv, idx, idx + 1);
+- else {
+- region_abort(resv, idx, idx + 1);
+- ret = region_del(resv, idx, idx + 1);
+- }
+- break;
+ default:
+ BUG();
+ }
+@@ -1917,56 +1903,6 @@ static void vma_end_reservation(struct hstate *h,
+ (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
+ }
+
+-static long vma_add_reservation(struct hstate *h,
+- struct vm_area_struct *vma, unsigned long addr)
+-{
+- return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+-}
+-
+-/*
+- * This routine is called to restore a reservation on error paths. In the
+- * specific error paths, a huge page was allocated (via alloc_huge_page)
+- * and is about to be freed. If a reservation for the page existed,
+- * alloc_huge_page would have consumed the reservation and set PagePrivate
+- * in the newly allocated page. When the page is freed via free_huge_page,
+- * the global reservation count will be incremented if PagePrivate is set.
+- * However, free_huge_page can not adjust the reserve map. Adjust the
+- * reserve map here to be consistent with global reserve count adjustments
+- * to be made by free_huge_page.
+- */
+-static void restore_reserve_on_error(struct hstate *h,
+- struct vm_area_struct *vma, unsigned long address,
+- struct page *page)
+-{
+- if (unlikely(PagePrivate(page))) {
+- long rc = vma_needs_reservation(h, vma, address);
+-
+- if (unlikely(rc < 0)) {
+- /*
+- * Rare out of memory condition in reserve map
+- * manipulation. Clear PagePrivate so that
+- * global reserve count will not be incremented
+- * by free_huge_page. This will make it appear
+- * as though the reservation for this page was
+- * consumed. This may prevent the task from
+- * faulting in the page at a later time. This
+- * is better than inconsistent global huge page
+- * accounting of reserve counts.
+- */
+- ClearPagePrivate(page);
+- } else if (rc) {
+- rc = vma_add_reservation(h, vma, address);
+- if (unlikely(rc < 0))
+- /*
+- * See above comment about rare out of
+- * memory condition.
+- */
+- ClearPagePrivate(page);
+- } else
+- vma_end_reservation(h, vma, address);
+- }
+-}
+-
+ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
+@@ -3562,7 +3498,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ out_release_all:
+- restore_reserve_on_error(h, vma, address, new_page);
+ put_page(new_page);
+ out_release_old:
+ put_page(old_page);
+@@ -3745,7 +3680,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ spin_unlock(ptl);
+ backout_unlocked:
+ unlock_page(page);
+- restore_reserve_on_error(h, vma, address, page);
+ put_page(page);
+ goto out;
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 19e796d..de88f33 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ }
+
+ if (!PageHuge(p) && PageTransHuge(hpage)) {
+- lock_page(p);
+- if (!PageAnon(p) || unlikely(split_huge_page(p))) {
+- unlock_page(p);
+- if (!PageAnon(p))
++ lock_page(hpage);
++ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
++ unlock_page(hpage);
++ if (!PageAnon(hpage))
+ pr_err("Memory failure: %#lx: non anonymous thp\n",
+ pfn);
+ else
+@@ -1126,7 +1126,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ put_hwpoison_page(p);
+ return -EBUSY;
+ }
+- unlock_page(p);
++ unlock_page(hpage);
++ get_hwpoison_page(p);
++ put_hwpoison_page(hpage);
+ VM_BUG_ON_PAGE(!page_count(p), p);
+ hpage = compound_head(p);
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 38aa5e0..971fc83 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1483,8 +1483,6 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ copy_highpage(newpage, oldpage);
+ flush_dcache_page(newpage);
+
+- __SetPageLocked(newpage);
+- __SetPageSwapBacked(newpage);
+ SetPageUptodate(newpage);
+ set_page_private(newpage, swap_index);
+ SetPageSwapCache(newpage);
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 329b038..71f0b28 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
+
+ s = create_cache(cache_name, root_cache->object_size,
+ root_cache->size, root_cache->align,
+- root_cache->flags & CACHE_CREATE_MASK,
+- root_cache->ctor, memcg, root_cache);
++ root_cache->flags, root_cache->ctor,
++ memcg, root_cache);
+ /*
+ * If we could not create a memcg cache, do not complain, because
+ * that's not critical at all as we can always proceed with the root
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index bf262e4..2657acc 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2218,8 +2218,6 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
+ swab32s(&swap_header->info.version);
+ swab32s(&swap_header->info.last_page);
+ swab32s(&swap_header->info.nr_badpages);
+- if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+- return 0;
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ swab32s(&swap_header->info.badpages[i]);
+ }
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 3e9667e..3940b5d 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -537,7 +537,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
+ if (bat_priv->algo_ops->neigh.hardif_init)
+ bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
+
+- hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
++ hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+
+ out:
+ spin_unlock_bh(&hard_iface->neigh_list_lock);
+diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
+index dcbe67f..7d54e94 100644
+--- a/net/ceph/ceph_fs.c
++++ b/net/ceph/ceph_fs.c
+@@ -34,8 +34,7 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+ fl->object_size = le32_to_cpu(legacy->fl_object_size);
+ fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+- if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+- fl->stripe_count == 0 && fl->object_size == 0)
++ if (fl->pool_id == 0)
+ fl->pool_id = -1;
+ }
+ EXPORT_SYMBOL(ceph_file_layout_from_legacy);
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 1df2c8d..aa5847a 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -420,7 +420,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
+ char buf[NFLOGGER_NAME_LEN];
+ int r = 0;
+ int tindex = (unsigned long)table->extra1;
+- struct net *net = table->extra2;
++ struct net *net = current->nsproxy->net_ns;
+
+ if (write) {
+ struct ctl_table tmp = *table;
+@@ -474,6 +474,7 @@ static int netfilter_log_sysctl_init(struct net *net)
+ 3, "%d", i);
+ nf_log_sysctl_table[i].procname =
+ nf_log_sysctl_fnames[i];
++ nf_log_sysctl_table[i].data = NULL;
+ nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
+ nf_log_sysctl_table[i].mode = 0644;
+ nf_log_sysctl_table[i].proc_handler =
+@@ -483,9 +484,6 @@ static int netfilter_log_sysctl_init(struct net *net)
+ }
+ }
+
+- for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
+- table[i].extra2 = net;
+-
+ net->nf.nf_log_dir_header = register_net_sysctl(net,
+ "net/netfilter/nf_log",
+ table);
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 2761377..892b5e1 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -44,20 +44,18 @@
+ * being done.
+ *
+ * When the underlying transport disconnects, MRs are left in one of
+- * four states:
++ * three states:
+ *
+ * INVALID: The MR was not in use before the QP entered ERROR state.
++ * (Or, the LOCAL_INV WR has not completed or flushed yet).
+ *
+- * VALID: The MR was registered before the QP entered ERROR state.
+- *
+- * FLUSHED_FR: The MR was being registered when the QP entered ERROR
+- * state, and the pending WR was flushed.
++ * STALE: The MR was being registered or unregistered when the QP
++ * entered ERROR state, and the pending WR was flushed.
+ *
+- * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
+- * state, and the pending WR was flushed.
++ * VALID: The MR was registered before the QP entered ERROR state.
+ *
+- * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
+- * with ib_dereg_mr and then are re-initialized. Because MR recovery
++ * When frwr_op_map encounters STALE and VALID MRs, they are recovered
++ * with ib_dereg_mr and then are re-initialized. Beause MR recovery
+ * allocates fresh resources, it is deferred to a workqueue, and the
+ * recovered MRs are placed back on the rb_mws list when recovery is
+ * complete. frwr_op_map allocates another MR for the current RPC while
+@@ -177,15 +175,12 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+ static void
+ frwr_op_recover_mr(struct rpcrdma_mw *mw)
+ {
+- enum rpcrdma_frmr_state state = mw->frmr.fr_state;
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ int rc;
+
+ rc = __frwr_reset_mr(ia, mw);
+- if (state != FRMR_FLUSHED_LI)
+- ib_dma_unmap_sg(ia->ri_device,
+- mw->mw_sg, mw->mw_nents, mw->mw_dir);
++ ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (rc)
+ goto out_release;
+
+@@ -266,8 +261,10 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
+ }
+
+ static void
+-__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
++__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
++ const char *wr)
+ {
++ frmr->fr_state = FRMR_IS_STALE;
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
+ wr, ib_wc_status_msg(wc->status),
+@@ -290,8 +287,7 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->status != IB_WC_SUCCESS) {
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- frmr->fr_state = FRMR_FLUSHED_FR;
+- __frwr_sendcompletion_flush(wc, "fastreg");
++ __frwr_sendcompletion_flush(wc, frmr, "fastreg");
+ }
+ }
+
+@@ -311,8 +307,7 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->status != IB_WC_SUCCESS) {
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- frmr->fr_state = FRMR_FLUSHED_LI;
+- __frwr_sendcompletion_flush(wc, "localinv");
++ __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ }
+ }
+
+@@ -332,11 +327,9 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
+ /* WARNING: Only wr_cqe and status are reliable at this point */
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- if (wc->status != IB_WC_SUCCESS) {
+- frmr->fr_state = FRMR_FLUSHED_LI;
+- __frwr_sendcompletion_flush(wc, "localinv");
+- }
+- complete(&frmr->fr_linv_done);
++ if (wc->status != IB_WC_SUCCESS)
++ __frwr_sendcompletion_flush(wc, frmr, "localinv");
++ complete_all(&frmr->fr_linv_done);
+ }
+
+ /* Post a REG_MR Work Request to register a memory region
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index cd0c558..a2a7519 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -129,7 +129,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
+ ret = -EIO;
+ goto out_unmap;
+ }
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+
+ memset(&send_wr, 0, sizeof(send_wr));
+ ctxt->cqe.done = svc_rdma_wc_send;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index ad1df97..2c25606 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -159,7 +159,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
+ ctxt->sge[pno].addr);
+ if (ret)
+ goto err;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+
+ ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
+ ctxt->sge[pno].length = len;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 3b95b19..54d53330 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -280,7 +280,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device,
+ sge[sge_no].addr))
+ goto err;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+ sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
+ ctxt->count++;
+ sge_off = 0;
+@@ -489,7 +489,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ ctxt->sge[0].length, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
+ goto err;
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+
+ ctxt->direction = DMA_TO_DEVICE;
+
+@@ -505,7 +505,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ if (ib_dma_mapping_error(rdma->sc_cm_id->device,
+ ctxt->sge[sge_no].addr))
+ goto err;
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+ ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
+ ctxt->sge[sge_no].length = sge_bytes;
+ }
+@@ -523,9 +523,23 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
+ ctxt->count++;
+ rqstp->rq_respages[page_no] = NULL;
++ /*
++ * If there are more pages than SGE, terminate SGE
++ * list so that svc_rdma_unmap_dma doesn't attempt to
++ * unmap garbage.
++ */
++ if (page_no+1 >= sge_no)
++ ctxt->sge[page_no+1].length = 0;
+ }
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
++ /* The loop above bumps sc_dma_used for each sge. The
++ * xdr_buf.tail gets a separate sge, but resides in the
++ * same page as xdr_buf.head. Don't count it twice.
++ */
++ if (sge_no > ctxt->count)
++ atomic_dec(&rdma->sc_dma_used);
++
+ if (sge_no > rdma->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err;
+@@ -621,7 +635,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
+ inline_bytes);
+ if (ret < 0)
+- goto err0;
++ goto err1;
+
+ svc_rdma_put_req_map(rdma, vec);
+ dprintk("svcrdma: send_reply returns %d\n", ret);
+@@ -678,7 +692,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+ svc_rdma_put_context(ctxt, 1);
+ return;
+ }
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+
+ /* Prepare SEND WR */
+ memset(&err_wr, 0, sizeof(err_wr));
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 924271c..dd94401 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -198,7 +198,6 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+
+ out:
+ ctxt->count = 0;
+- ctxt->mapped_sges = 0;
+ ctxt->frmr = NULL;
+ return ctxt;
+
+@@ -222,27 +221,22 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
+ {
+ struct svcxprt_rdma *xprt = ctxt->xprt;
+- struct ib_device *device = xprt->sc_cm_id->device;
+- u32 lkey = xprt->sc_pd->local_dma_lkey;
+- unsigned int i, count;
+-
+- for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
++ int i;
++ for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
+ /*
+ * Unmap the DMA addr in the SGE if the lkey matches
+ * the local_dma_lkey, otherwise, ignore it since it is
+ * an FRMR lkey and will be unmapped later when the
+ * last WR that uses it completes.
+ */
+- if (ctxt->sge[i].lkey == lkey) {
+- count++;
+- ib_dma_unmap_page(device,
++ if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) {
++ atomic_dec(&xprt->sc_dma_used);
++ ib_dma_unmap_page(xprt->sc_cm_id->device,
+ ctxt->sge[i].addr,
+ ctxt->sge[i].length,
+ ctxt->direction);
+ }
+ }
+- ctxt->mapped_sges = 0;
+- atomic_sub(count, &xprt->sc_dma_used);
+ }
+
+ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
+@@ -606,7 +600,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
+ goto err_put_ctxt;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+ ctxt->sge[sge_no].addr = pa;
+ ctxt->sge[sge_no].length = PAGE_SIZE;
+ ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index edc0344..a71b0f5 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -207,8 +207,7 @@ struct rpcrdma_rep {
+ enum rpcrdma_frmr_state {
+ FRMR_IS_INVALID, /* ready to be used */
+ FRMR_IS_VALID, /* in use */
+- FRMR_FLUSHED_FR, /* flushed FASTREG WR */
+- FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
++ FRMR_IS_STALE, /* failed completion */
+ };
+
+ struct rpcrdma_frmr {
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 8ab72e0..895362a 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -325,15 +325,10 @@ static ssize_t snd_info_text_entry_write(struct file *file,
+ size_t next;
+ int err = 0;
+
+- if (!entry->c.text.write)
+- return -EIO;
+ pos = *offset;
+ if (!valid_pos(pos, count))
+ return -EIO;
+ next = pos + count;
+- /* don't handle too large text inputs */
+- if (next > 16 * 1024)
+- return -EIO;
+ mutex_lock(&entry->access);
+ buf = data->wbuffer;
+ if (!buf) {
+@@ -371,9 +366,7 @@ static int snd_info_seq_show(struct seq_file *seq, void *p)
+ struct snd_info_private_data *data = seq->private;
+ struct snd_info_entry *entry = data->entry;
+
+- if (!entry->c.text.read) {
+- return -EIO;
+- } else {
++ if (entry->c.text.read) {
+ data->rbuffer->buffer = (char *)seq; /* XXX hack! */
+ entry->c.text.read(entry, data->rbuffer);
+ }
+diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
+index 3670086b..e07807d 100644
+--- a/sound/soc/codecs/cs4270.c
++++ b/sound/soc/codecs/cs4270.c
+@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
+ };
+
+ static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
+- { "Capture", NULL, "AINL" },
+- { "Capture", NULL, "AINR" },
++ { "Capture", NULL, "AINA" },
++ { "Capture", NULL, "AINB" },
+
+- { "AOUTL", NULL, "Playback" },
+- { "AOUTR", NULL, "Playback" },
++ { "AOUTA", NULL, "Playback" },
++ { "AOUTB", NULL, "Playback" },
+ };
+
+ /**
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 7b7a380..e3e7641 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -785,7 +785,8 @@ static void skl_remove(struct pci_dev *pci)
+
+ release_firmware(skl->tplg);
+
+- pm_runtime_get_noresume(&pci->dev);
++ if (pci_dev_run_wake(pci))
++ pm_runtime_get_noresume(&pci->dev);
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(ebus);
+diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
+index 03c18db..44f170c 100644
+--- a/sound/soc/sunxi/sun4i-codec.c
++++ b/sound/soc/sunxi/sun4i-codec.c
+@@ -738,11 +738,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+- return ERR_PTR(-ENOMEM);
++ return NULL;
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+- return ERR_PTR(-ENOMEM);
++ return NULL;
+
+ card->dev = dev;
+ card->name = "sun4i-codec";
+@@ -842,8 +842,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
+ }
+
+ card = sun4i_codec_create_card(&pdev->dev);
+- if (IS_ERR(card)) {
+- ret = PTR_ERR(card);
++ if (!card) {
+ dev_err(&pdev->dev, "Failed to create our card\n");
+ goto err_unregister_codec;
+ }
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 4ad1eac..7aee954 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -595,8 +595,7 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
+ u64 nr_entries;
+ hbt->timer(hbt->arg);
+
+- if (hist_browser__has_filter(browser) ||
+- symbol_conf.report_hierarchy)
++ if (hist_browser__has_filter(browser))
+ hist_browser__update_nr_entries(browser);
+
+ nr_entries = hist_browser__nr_entries(browser);
+diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
+index 1eef0ae..b4bf769 100644
+--- a/tools/power/cpupower/utils/cpufreq-set.c
++++ b/tools/power/cpupower/utils/cpufreq-set.c
+@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv)
+ struct cpufreq_affected_cpus *cpus;
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+- cpupower_is_cpu_online(cpu) != 1)
++ cpupower_is_cpu_online(cpu))
+ continue;
+
+ cpus = cpufreq_get_related_cpus(cpu);
+@@ -316,7 +316,10 @@ int cmd_freq_set(int argc, char **argv)
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+- cpupower_is_cpu_online(cpu) != 1)
++ cpupower_is_cpu_online(cpu))
++ continue;
++
++ if (cpupower_is_cpu_online(cpu) != 1)
+ continue;
+
+ printf(_("Setting cpu: %d\n"), cpu);
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index d1b080c..3bad3c5 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -453,33 +453,17 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
+ return container_of(dev, struct vgic_io_device, dev);
+ }
+
+-static bool check_region(const struct kvm *kvm,
+- const struct vgic_register_region *region,
++static bool check_region(const struct vgic_register_region *region,
+ gpa_t addr, int len)
+ {
+- int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+-
+- switch (len) {
+- case sizeof(u8):
+- flags = VGIC_ACCESS_8bit;
+- break;
+- case sizeof(u32):
+- flags = VGIC_ACCESS_32bit;
+- break;
+- case sizeof(u64):
+- flags = VGIC_ACCESS_64bit;
+- break;
+- default:
+- return false;
+- }
+-
+- if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
+- if (!region->bits_per_irq)
+- return true;
+-
+- /* Do we access a non-allocated IRQ? */
+- return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
+- }
++ if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
++ return true;
++ if ((region->access_flags & VGIC_ACCESS_32bit) &&
++ len == sizeof(u32) && !(addr & 3))
++ return true;
++ if ((region->access_flags & VGIC_ACCESS_64bit) &&
++ len == sizeof(u64) && !(addr & 7))
++ return true;
+
+ return false;
+ }
+@@ -493,7 +477,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+- if (!region || !check_region(vcpu->kvm, region, addr, len)) {
++ if (!region || !check_region(region, addr, len)) {
+ memset(val, 0, len);
+ return 0;
+ }
+@@ -526,7 +510,10 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+- if (!region || !check_region(vcpu->kvm, region, addr, len))
++ if (!region)
++ return 0;
++
++ if (!check_region(region, addr, len))
+ return 0;
+
+ switch (iodev->iodev_type) {
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
+index ba63d91..0b3ecf9 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.h
++++ b/virt/kvm/arm/vgic/vgic-mmio.h
+@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
+ #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
+
+ /*
+- * (addr & mask) gives us the _byte_ offset for the INT ID.
+- * We multiply this by 8 the get the _bit_ offset, then divide this by
+- * the number of bits to learn the actual INT ID.
+- * But instead of a division (which requires a "long long div" implementation),
+- * we shift by the binary logarithm of <bits>.
+- * This assumes that <bits> is a power of two.
++ * (addr & mask) gives us the byte offset for the INT ID, so we want to
++ * divide this with 'bytes per irq' to get the INT ID, which is given
++ * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
++ * take advantage of the fact that division by a fraction equals
++ * multiplication with the inverted fraction, and scale up both the
++ * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ */
+ #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
+- 8 >> ilog2(bits))
++ 64 / (bits) / 8)
+
+ /*
+ * Some VGIC registers store per-IRQ information, with a different number
diff --git a/4.8.8/4420_grsecurity-3.1-4.8.8-201611150756.patch b/4.8.9/4420_grsecurity-3.1-4.8.9-201611192033.patch
index e0579c2..f8ce60c 100644
--- a/4.8.8/4420_grsecurity-3.1-4.8.8-201611150756.patch
+++ b/4.8.9/4420_grsecurity-3.1-4.8.9-201611192033.patch
@@ -407,7 +407,7 @@ index ffab8b5..b8fcd61 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index 8f18daa..a2e9eda 100644
+index c1519ab..90f06a0 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -16544,7 +16544,7 @@ index 2ebb5e9..a0b0aa9 100644
};
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
-index 77f28ce..7714ca0 100644
+index 9976fce..bf5f3e0 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -15,3 +15,5 @@ obj-y += vsyscall/
@@ -25812,10 +25812,10 @@ index 0503f5b..f00b6e8 100644
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index fbd1944..7d27c3c 100644
+index d99ca57..b63aafc 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1357,7 +1357,7 @@ static void __init acpi_reduced_hw_init(void)
+@@ -1358,7 +1358,7 @@ static void __init acpi_reduced_hw_init(void)
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
*/
@@ -25824,7 +25824,7 @@ index fbd1944..7d27c3c 100644
/*
* Boxes that need ACPI disabled
*/
-@@ -1432,7 +1432,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
};
/* second table for DMI checks that should run after early-quirks */
@@ -41425,7 +41425,7 @@ index 6e9f14c..7f9a99d 100644
struct apei_exec_context {
u32 ip;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
-index 60746ef..02a1ddc 100644
+index caea575..4dd8306 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -483,7 +483,7 @@ static void __ghes_print_estatus(const char *pfx,
@@ -43580,7 +43580,7 @@ index 4cb8f21..fc2c3e2 100644
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 100be55..eead333 100644
+index 8348272..f2ddf22 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1363,7 +1363,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
@@ -44309,10 +44309,10 @@ index 0f64d14..4cf4d6b 100644
kfree(segment);
return -EFAULT;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
-index 4431129..3983729 100644
+index 0f7d28a..d8576c6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
-@@ -1418,8 +1418,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+@@ -1420,8 +1420,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
@@ -46809,7 +46809,7 @@ index 39c01b9..ced138c 100644
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-index 9aa533c..2f39e50 100644
+index 414a160..d0bb0ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -588,9 +588,6 @@ static struct drm_driver kms_driver = {
@@ -46822,7 +46822,7 @@ index 9aa533c..2f39e50 100644
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
-@@ -610,18 +607,20 @@ static int __init amdgpu_init(void)
+@@ -611,18 +608,20 @@ static int __init amdgpu_init(void)
return -EINVAL;
}
DRM_INFO("amdgpu kernel modesetting enabled.\n");
@@ -46847,7 +46847,7 @@ index 9aa533c..2f39e50 100644
+ drm_pci_exit(&kms_driver, &amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
+ amd_sched_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 51321e1..3c80c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -46861,7 +46861,7 @@ index 51321e1..3c80c0b 100644
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
-index 80120fa..20c5411 100644
+index e86ca39..f935a89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -202,7 +202,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
@@ -47648,10 +47648,10 @@ index 47ef1ca..d352d38 100644
static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
-index 963a24d..e5d0a91 100644
+index ffe1f85..7017bfc 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
-@@ -140,7 +140,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+@@ -137,7 +137,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
if (r)
return r;
@@ -47661,10 +47661,10 @@ index 963a24d..e5d0a91 100644
return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
-index 7cbbbfb..a1e3949 100644
+index 51068e6..35b4c71 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
-@@ -47,7 +47,7 @@ struct amd_sched_entity {
+@@ -44,7 +44,7 @@ struct amd_sched_entity {
spinlock_t queue_lock;
struct kfifo job_queue;
@@ -47674,10 +47674,10 @@ index 7cbbbfb..a1e3949 100644
struct fence *dependency;
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
-index 6b63bea..d7aa8a9 100644
+index 93ad2e1..970aeca 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
-@@ -41,7 +41,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+@@ -60,7 +60,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
fence->sched = entity->sched;
spin_lock_init(&fence->lock);
@@ -48339,7 +48339,7 @@ index 5e6a301..b6e143e 100644
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 5de36d8..7d7899c 100644
+index d46fa22..a93cd8a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -50,7 +50,7 @@
@@ -48604,10 +48604,10 @@ index 1c2aec3..f807515 100644
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 63462f2..37eef36 100644
+index e26f889..5d197b7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -15569,13 +15569,13 @@ struct intel_quirk {
+@@ -15592,13 +15592,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -48624,7 +48624,7 @@ index 63462f2..37eef36 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -15583,18 +15583,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -15606,18 +15606,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -48655,7 +48655,7 @@ index 63462f2..37eef36 100644
.hook = quirk_invert_brightness,
},
};
-@@ -15677,7 +15679,7 @@ static void intel_init_quirks(struct drm_device *dev)
+@@ -15700,7 +15702,7 @@ static void intel_init_quirks(struct drm_device *dev)
q->hook(dev);
}
for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
@@ -49655,10 +49655,10 @@ index b79f3b0..a1fd177 100644
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 554ca71..e573a41 100644
+index edd2d03..62af79a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
-@@ -1276,7 +1276,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+@@ -1289,7 +1289,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
@@ -53086,7 +53086,7 @@ index 92e2243..8fd9092 100644
.ident = "Shift",
.matches = {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 96de97a..04eaea7 100644
+index 822fc4a..24fd1f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -791,11 +791,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
@@ -53356,7 +53356,7 @@ index 2db74eb..4bbcf9d 100644
smmu->pgsize_bitmap);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
-index def8ca1..039660d 100644
+index f50e51c..02c0247 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -49,9 +49,6 @@
@@ -53422,7 +53422,7 @@ index def8ca1..039660d 100644
static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
void *cookie)
{
-@@ -658,11 +660,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+@@ -662,11 +664,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (!data->l2_tables)
goto out_free_data;
@@ -53435,7 +53435,7 @@ index def8ca1..039660d 100644
/* We have to do this early for __arm_v7s_alloc_table to work... */
data->iop.cfg = *cfg;
-@@ -751,7 +749,7 @@ static struct iommu_gather_ops dummy_tlb_ops = {
+@@ -755,7 +753,7 @@ static struct iommu_gather_ops dummy_tlb_ops = {
static int __init arm_v7s_do_selftests(void)
{
@@ -53444,7 +53444,7 @@ index def8ca1..039660d 100644
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 32,
-@@ -766,8 +764,8 @@ static int __init arm_v7s_do_selftests(void)
+@@ -770,8 +768,8 @@ static int __init arm_v7s_do_selftests(void)
cfg_cookie = &cfg;
@@ -53455,7 +53455,7 @@ index def8ca1..039660d 100644
pr_err("selftest: failed to allocate io pgtable ops\n");
return -EINVAL;
}
-@@ -776,13 +774,13 @@ static int __init arm_v7s_do_selftests(void)
+@@ -780,13 +778,13 @@ static int __init arm_v7s_do_selftests(void)
* Initial sanity checks.
* Empty page tables shouldn't provide any translations.
*/
@@ -53472,7 +53472,7 @@ index def8ca1..039660d 100644
return __FAIL(ops);
/*
-@@ -792,18 +790,18 @@ static int __init arm_v7s_do_selftests(void)
+@@ -796,18 +794,18 @@ static int __init arm_v7s_do_selftests(void)
i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
while (i != BITS_PER_LONG) {
size = 1UL << i;
@@ -53494,7 +53494,7 @@ index def8ca1..039660d 100644
return __FAIL(ops);
iova += SZ_16M;
-@@ -817,14 +815,14 @@ static int __init arm_v7s_do_selftests(void)
+@@ -821,14 +819,14 @@ static int __init arm_v7s_do_selftests(void)
size = 1UL << __ffs(cfg.pgsize_bitmap);
while (i < loopnr) {
iova_start = i * SZ_16M;
@@ -53512,7 +53512,7 @@ index def8ca1..039660d 100644
!= (size + 42))
return __FAIL(ops);
i++;
-@@ -836,17 +834,17 @@ static int __init arm_v7s_do_selftests(void)
+@@ -840,17 +838,17 @@ static int __init arm_v7s_do_selftests(void)
while (i != BITS_PER_LONG) {
size = 1UL << i;
@@ -53534,7 +53534,7 @@ index def8ca1..039660d 100644
return __FAIL(ops);
iova += SZ_16M;
-@@ -854,7 +852,7 @@ static int __init arm_v7s_do_selftests(void)
+@@ -858,7 +856,7 @@ static int __init arm_v7s_do_selftests(void)
i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
}
@@ -70888,10 +70888,10 @@ index 7d31179..a188713 100644
setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
pp->db_delay = msecs_to_jiffies(delay_ms);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
-index 60f7eab..1e905da 100644
+index da134a0..e3236aa 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
-@@ -2053,7 +2053,7 @@ static int nvme_resume(struct device *dev)
+@@ -2054,7 +2054,7 @@ static int nvme_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
@@ -71831,6 +71831,19 @@ index b65ce75..d92001e 100644
{
unsigned int i;
struct ibm_struct *ibm;
+diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
+index 2df07ee..92dca69 100644
+--- a/drivers/platform/x86/toshiba-wmi.c
++++ b/drivers/platform/x86/toshiba-wmi.c
+@@ -64,7 +64,7 @@ static void toshiba_wmi_notify(u32 value, void *context)
+ kfree(response.pointer);
+ }
+
+-static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
++static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = {
+ {
+ .ident = "Toshiba laptop",
+ .matches = {
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 3151fd1..12c5b20 100644
--- a/drivers/pnp/base.h
@@ -75054,7 +75067,7 @@ index 750f82c..956cdf0 100644
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
-index 4cb7990..66bfb63 100644
+index 46c0f5e..f3228ca 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -280,7 +280,7 @@ struct _scsi_io_transfer {
@@ -75330,7 +75343,7 @@ index 6ca0081..fbb9efd 100644
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index 2674f4c..1e15020 100644
+index e46e2c5..c3cea11 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -301,12 +301,12 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -75361,7 +75374,7 @@ index 2674f4c..1e15020 100644
return;
}
}
-@@ -5381,8 +5383,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
+@@ -5383,8 +5385,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
* Context: Interrupt
***************************************************************************/
void
@@ -75372,7 +75385,7 @@ index 2674f4c..1e15020 100644
unsigned long cpu_flags = 0;
int start_dpc = 0;
int index;
-@@ -5644,7 +5647,7 @@ qla2x00_release_firmware(void)
+@@ -5646,7 +5649,7 @@ qla2x00_release_firmware(void)
}
static pci_ers_result_t
@@ -80265,10 +80278,45 @@ index 5e4fa92..39fe3d2 100644
struct tty_struct *tty;
struct tty_ldisc *ld;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
-index ce86487..8ff3311 100644
+index ce86487..c643376 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
-@@ -343,7 +343,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
+@@ -143,7 +143,9 @@ static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
+ static void hvc_console_print(struct console *co, const char *b,
+ unsigned count)
+ {
+- char c[N_OUTBUF] __ALIGNED__;
++ char c_stack[N_OUTBUF] __ALIGNED__;
++ char *c_alloc = NULL;
++ char *c;
+ unsigned i = 0, n = 0;
+ int r, donecr = 0, index = co->index;
+
+@@ -155,8 +157,13 @@ static void hvc_console_print(struct console *co, const char *b,
+ if (vtermnos[index] == -1)
+ return;
+
++ if (slab_is_available())
++ c = c_alloc = kmalloc(N_OUTBUF, GFP_ATOMIC);
++ else
++ c = c_stack;
++
+ while (count > 0 || i > 0) {
+- if (count > 0 && i < sizeof(c)) {
++ if (count > 0 && i < sizeof(c_stack)) {
+ if (b[n] == '\n' && !donecr) {
+ c[i++] = '\r';
+ donecr = 1;
+@@ -179,6 +186,8 @@ static void hvc_console_print(struct console *co, const char *b,
+ }
+ }
+ }
++
++ kfree(c_alloc);
+ }
+
+ static struct tty_driver *hvc_console_device(struct console *c, int *index)
+@@ -343,7 +352,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
spin_lock_irqsave(&hp->port.lock, flags);
/* Check and then increment for fast path open. */
@@ -80277,7 +80325,7 @@ index ce86487..8ff3311 100644
spin_unlock_irqrestore(&hp->port.lock, flags);
hvc_kick();
return 0;
-@@ -398,7 +398,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+@@ -398,7 +407,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
spin_lock_irqsave(&hp->port.lock, flags);
@@ -80286,7 +80334,7 @@ index ce86487..8ff3311 100644
spin_unlock_irqrestore(&hp->port.lock, flags);
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
-@@ -420,9 +420,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
+@@ -420,9 +429,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
} else {
@@ -80298,7 +80346,7 @@ index ce86487..8ff3311 100644
spin_unlock_irqrestore(&hp->port.lock, flags);
}
}
-@@ -452,12 +452,12 @@ static void hvc_hangup(struct tty_struct *tty)
+@@ -452,12 +461,12 @@ static void hvc_hangup(struct tty_struct *tty)
* open->hangup case this can be called after the final close so prevent
* that from happening for now.
*/
@@ -80313,7 +80361,7 @@ index ce86487..8ff3311 100644
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, NULL);
-@@ -505,7 +505,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
+@@ -505,7 +514,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
return -EPIPE;
/* FIXME what's this (unprotected) check for? */
@@ -101946,10 +101994,10 @@ index 56fb261..8c808f1 100644
struct inode *inode = NULL;
diff --git a/fs/coredump.c b/fs/coredump.c
-index 281b768..f39dcdf 100644
+index eb9c92c..235adec 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
-@@ -483,8 +483,8 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -486,8 +486,8 @@ static void wait_for_dump_helpers(struct file *file)
struct pipe_inode_info *pipe = file->private_data;
pipe_lock(pipe);
@@ -101960,7 +102008,7 @@ index 281b768..f39dcdf 100644
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
pipe_unlock(pipe);
-@@ -493,11 +493,11 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -496,11 +496,11 @@ static void wait_for_dump_helpers(struct file *file)
* We actually want wait_event_freezable() but then we need
* to clear TIF_SIGPENDING and improve dump_interrupted().
*/
@@ -101975,7 +102023,7 @@ index 281b768..f39dcdf 100644
pipe_unlock(pipe);
}
-@@ -544,7 +544,9 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -547,7 +547,9 @@ void do_coredump(const siginfo_t *siginfo)
/* require nonrelative corefile path and be extra careful */
bool need_suid_safe = false;
bool core_dumped = false;
@@ -101986,7 +102034,7 @@ index 281b768..f39dcdf 100644
struct coredump_params cprm = {
.siginfo = siginfo,
.regs = signal_pt_regs(),
-@@ -557,12 +559,17 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -560,12 +562,14 @@ void do_coredump(const siginfo_t *siginfo)
.mm_flags = mm->flags,
};
@@ -101994,9 +102042,6 @@ index 281b768..f39dcdf 100644
+ audit_core_dumps(signr);
+
+ dumpable = __get_dumpable(cprm.mm_flags);
-+
-+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
-+ gr_handle_brute_attach(dumpable);
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
@@ -102006,7 +102051,7 @@ index 281b768..f39dcdf 100644
goto fail;
cred = prepare_creds();
-@@ -580,7 +587,7 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -583,7 +587,7 @@ void do_coredump(const siginfo_t *siginfo)
need_suid_safe = true;
}
@@ -102015,7 +102060,7 @@ index 281b768..f39dcdf 100644
if (retval < 0)
goto fail_creds;
-@@ -623,7 +630,7 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -626,7 +630,7 @@ void do_coredump(const siginfo_t *siginfo)
}
cprm.limit = RLIM_INFINITY;
@@ -102024,7 +102069,7 @@ index 281b768..f39dcdf 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -657,6 +664,8 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -660,6 +664,8 @@ void do_coredump(const siginfo_t *siginfo)
int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
O_LARGEFILE | O_EXCL;
@@ -102033,7 +102078,7 @@ index 281b768..f39dcdf 100644
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
-@@ -682,7 +691,7 @@ void do_coredump(const siginfo_t *siginfo)
+@@ -685,7 +691,7 @@ void do_coredump(const siginfo_t *siginfo)
* If it doesn't exist, that's fine. If there's some
* other problem, we'll catch it at the filp_open().
*/
@@ -102042,7 +102087,7 @@ index 281b768..f39dcdf 100644
set_fs(old_fs);
}
-@@ -763,7 +772,7 @@ close_fail:
+@@ -766,7 +772,7 @@ close_fail:
filp_close(cprm.file, NULL);
fail_dropcount:
if (ispipe)
@@ -102051,7 +102096,17 @@ index 281b768..f39dcdf 100644
fail_unlock:
kfree(cn.corename);
coredump_finish(mm, core_dumped);
-@@ -784,6 +793,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
+@@ -774,6 +780,9 @@ fail_unlock:
+ fail_creds:
+ put_cred(cred);
+ fail:
++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
++ gr_handle_brute_attach(dumpable);
++
+ return;
+ }
+
+@@ -787,6 +796,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
struct file *file = cprm->file;
loff_t pos = file->f_pos;
ssize_t n;
@@ -139535,7 +139590,7 @@ index 7321ae9..f37a11e 100644
/*
* Mode for mapping cpus to pools.
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
-index d6917b8..e05ca83 100644
+index 3584bc8..0c8ec4c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -54,15 +54,15 @@ extern unsigned int svcrdma_max_requests;
@@ -151872,7 +151927,7 @@ index 50b4ca6..cf64608 100644
pkmap_count[last_pkmap_nr] = 1;
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 770d83e..7cd013a 100644
+index 0ddce6a..3f23c4f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -38,7 +38,72 @@ int hugepages_treat_as_movable;
@@ -151949,7 +152004,7 @@ index 770d83e..7cd013a 100644
/*
* Minimum page order among possible hugepage sizes, set to a proper value
* at boot time.
-@@ -2830,6 +2895,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2894,6 +2959,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
@@ -151957,7 +152012,7 @@ index 770d83e..7cd013a 100644
struct hstate *h = &default_hstate;
unsigned long tmp = h->max_huge_pages;
int ret;
-@@ -2837,9 +2903,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2901,9 +2967,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -EOPNOTSUPP;
@@ -151971,7 +152026,7 @@ index 770d83e..7cd013a 100644
if (ret)
goto out;
-@@ -2874,6 +2941,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2938,6 +3005,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
@@ -151979,7 +152034,7 @@ index 770d83e..7cd013a 100644
if (!hugepages_supported())
return -EOPNOTSUPP;
-@@ -2883,9 +2951,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2947,9 +3015,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
@@ -151993,7 +152048,7 @@ index 770d83e..7cd013a 100644
if (ret)
goto out;
-@@ -3379,6 +3448,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3443,6 +3512,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
i_mmap_unlock_write(mapping);
}
@@ -152021,7 +152076,7 @@ index 770d83e..7cd013a 100644
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
-@@ -3492,6 +3582,11 @@ retry_avoidcopy:
+@@ -3556,6 +3646,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -152033,7 +152088,7 @@ index 770d83e..7cd013a 100644
/* Make the old page be freed below */
new_page = old_page;
}
-@@ -3665,6 +3760,10 @@ retry:
+@@ -3730,6 +3825,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -152044,7 +152099,7 @@ index 770d83e..7cd013a 100644
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
-@@ -3733,6 +3832,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3799,6 +3898,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct address_space *mapping;
int need_wait_lock = 0;
@@ -152055,7 +152110,7 @@ index 770d83e..7cd013a 100644
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address);
-@@ -3750,6 +3853,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3816,6 +3919,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
@@ -152372,7 +152427,7 @@ index dddead1..8832645 100644
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index de88f33..f9d9816 100644
+index 19e796d..9c8fa80 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -64,7 +64,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -155232,7 +155287,7 @@ index 1ef3640..88c345d 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index 971fc83..6afaf44 100644
+index 38aa5e0..9b3e13b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -34,7 +34,7 @@
@@ -155253,7 +155308,7 @@ index 971fc83..6afaf44 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -3255,6 +3255,24 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+@@ -3257,6 +3257,24 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
@@ -155278,7 +155333,7 @@ index 971fc83..6afaf44 100644
static const struct xattr_handler shmem_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = shmem_xattr_handler_get,
-@@ -3267,6 +3285,14 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+@@ -3269,6 +3287,14 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
.set = shmem_xattr_handler_set,
};
@@ -155293,7 +155348,7 @@ index 971fc83..6afaf44 100644
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
&posix_acl_access_xattr_handler,
-@@ -3274,6 +3300,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -3276,6 +3302,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
#endif
&shmem_security_xattr_handler,
&shmem_trusted_xattr_handler,
@@ -155305,7 +155360,7 @@ index 971fc83..6afaf44 100644
NULL
};
-@@ -3653,8 +3684,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -3655,8 +3686,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -155648,7 +155703,7 @@ index 9653f2e..9b9e8cd 100644
if (slab_equal_or_root(cachep, s))
return cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 71f0b28..83ad94c 100644
+index 329b038..52e9e91 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -25,11 +25,35 @@
@@ -155826,14 +155881,12 @@ index 71f0b28..83ad94c 100644
sysfs_slab_remove(s);
#else
slab_kmem_cache_release(s);
-@@ -533,7 +585,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
-
+@@ -534,6 +586,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
s = create_cache(cache_name, root_cache->object_size,
root_cache->size, root_cache->align,
-- root_cache->flags, root_cache->ctor,
-+ root_cache->flags, root_cache->useroffset,
-+ root_cache->usersize, root_cache->ctor,
- memcg, root_cache);
+ root_cache->flags & CACHE_CREATE_MASK,
++ root_cache->useroffset, root_cache->usersize,
+ root_cache->ctor, memcg, root_cache);
/*
* If we could not create a memcg cache, do not complain, because
@@ -718,8 +771,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
@@ -156909,7 +156962,7 @@ index 75c63bb..a4dce20 100644
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
-index 2657acc..7eedf77 100644
+index bf262e4..c5bc390 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -90,7 +90,7 @@ static DEFINE_MUTEX(swapon_mutex);
@@ -156950,7 +157003,7 @@ index 2657acc..7eedf77 100644
return 0;
}
-@@ -2543,7 +2543,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+@@ -2545,7 +2545,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
(frontswap_map) ? "FS" : "");
mutex_unlock(&swapon_mutex);
@@ -164201,7 +164254,7 @@ index 7a394df..bd91a8a 100644
table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
GFP_KERNEL);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
-index aa5847a..763f663 100644
+index 1df2c8d..3e79332 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -411,7 +411,7 @@ static const struct file_operations nflog_file_ops = {
@@ -164214,7 +164267,7 @@ index aa5847a..763f663 100644
static int nf_log_proc_dostring(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -423,7 +423,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
- struct net *net = current->nsproxy->net_ns;
+ struct net *net = table->extra2;
if (write) {
- struct ctl_table tmp = *table;
@@ -164241,6 +164294,15 @@ index aa5847a..763f663 100644
mutex_unlock(&nf_log_mutex);
}
+@@ -459,7 +461,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
+ static int netfilter_log_sysctl_init(struct net *net)
+ {
+ int i;
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = nf_log_sysctl_table;
+ if (!net_eq(net, &init_net)) {
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index e84a578..d76afaf 100644
--- a/net/netfilter/nf_nat_ftp.c
@@ -166576,7 +166638,7 @@ index c846ca9..d5968b4 100644
.proc_handler = read_reset_stat,
},
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
-index 2c25606..521a8e0 100644
+index ad1df97..83b90c7 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -200,7 +200,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
@@ -166616,7 +166678,7 @@ index 2c25606..521a8e0 100644
/* Build up the XDR from the receive buffers. */
rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
-index 54d53330..fd6c4ac 100644
+index 3b95b19..914e482 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -307,7 +307,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
@@ -166629,10 +166691,10 @@ index 54d53330..fd6c4ac 100644
goto err;
return write_len - bc;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-index dd94401..9540398 100644
+index 924271c..e7a0ab3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-@@ -1298,7 +1298,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+@@ -1304,7 +1304,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
spin_lock_bh(&xprt->sc_lock);
if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
spin_unlock_bh(&xprt->sc_lock);
diff --git a/4.8.8/4425_grsec_remove_EI_PAX.patch b/4.8.9/4425_grsec_remove_EI_PAX.patch
index 594598a..594598a 100644
--- a/4.8.8/4425_grsec_remove_EI_PAX.patch
+++ b/4.8.9/4425_grsec_remove_EI_PAX.patch
diff --git a/4.8.8/4427_force_XATTR_PAX_tmpfs.patch b/4.8.9/4427_force_XATTR_PAX_tmpfs.patch
index 2562d2f..2562d2f 100644
--- a/4.8.8/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.8.9/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.8.8/4430_grsec-remove-localversion-grsec.patch b/4.8.9/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.8.8/4430_grsec-remove-localversion-grsec.patch
+++ b/4.8.9/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.8.8/4435_grsec-mute-warnings.patch b/4.8.9/4435_grsec-mute-warnings.patch
index 8929222..8929222 100644
--- a/4.8.8/4435_grsec-mute-warnings.patch
+++ b/4.8.9/4435_grsec-mute-warnings.patch
diff --git a/4.8.8/4440_grsec-remove-protected-paths.patch b/4.8.9/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.8.8/4440_grsec-remove-protected-paths.patch
+++ b/4.8.9/4440_grsec-remove-protected-paths.patch
diff --git a/4.8.8/4450_grsec-kconfig-default-gids.patch b/4.8.9/4450_grsec-kconfig-default-gids.patch
index 6fd0511..6fd0511 100644
--- a/4.8.8/4450_grsec-kconfig-default-gids.patch
+++ b/4.8.9/4450_grsec-kconfig-default-gids.patch
diff --git a/4.8.8/4465_selinux-avc_audit-log-curr_ip.patch b/4.8.9/4465_selinux-avc_audit-log-curr_ip.patch
index 7248385..7248385 100644
--- a/4.8.8/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.8.9/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.8.8/4470_disable-compat_vdso.patch b/4.8.9/4470_disable-compat_vdso.patch
index 1e4b84a..1e4b84a 100644
--- a/4.8.8/4470_disable-compat_vdso.patch
+++ b/4.8.9/4470_disable-compat_vdso.patch
diff --git a/4.8.8/4475_emutramp_default_on.patch b/4.8.9/4475_emutramp_default_on.patch
index 7b468ee..7b468ee 100644
--- a/4.8.8/4475_emutramp_default_on.patch
+++ b/4.8.9/4475_emutramp_default_on.patch