summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.8.8/1007_linux-4.8.8.patch')
-rw-r--r--4.8.8/1007_linux-4.8.8.patch1846
1 files changed, 1846 insertions, 0 deletions
diff --git a/4.8.8/1007_linux-4.8.8.patch b/4.8.8/1007_linux-4.8.8.patch
new file mode 100644
index 0000000..35fb91c
--- /dev/null
+++ b/4.8.8/1007_linux-4.8.8.patch
@@ -0,0 +1,1846 @@
+diff --git a/Makefile b/Makefile
+index 4d0f28c..8f18daa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
+index ee655ed..1e8fceb 100644
+--- a/arch/powerpc/include/asm/checksum.h
++++ b/arch/powerpc/include/asm/checksum.h
+@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
+ return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+ }
+
+-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+- unsigned short len,
+- unsigned short proto,
+- __wsum sum)
++static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
++ __u8 proto, __wsum sum)
+ {
+ #ifdef __powerpc64__
+ unsigned long s = (__force u32)sum;
+@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+- unsigned short len,
+- unsigned short proto,
+- __wsum sum)
++static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
++ __u8 proto, __wsum sum)
+ {
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+ }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index 9dbfcc0..5ff64af 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -63,6 +63,8 @@ enum ipoib_flush_level {
+
+ enum {
+ IPOIB_ENCAP_LEN = 4,
++ IPOIB_PSEUDO_LEN = 20,
++ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
+
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+@@ -134,15 +136,21 @@ struct ipoib_header {
+ u16 reserved;
+ };
+
+-struct ipoib_cb {
+- struct qdisc_skb_cb qdisc_cb;
+- u8 hwaddr[INFINIBAND_ALEN];
++struct ipoib_pseudo_header {
++ u8 hwaddr[INFINIBAND_ALEN];
+ };
+
+-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
++static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
+- return (struct ipoib_cb *)skb->cb;
++ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
++
++ /*
++ * only the ipoib header is present now, make room for a dummy
++ * pseudo header and set skb field accordingly
++ */
++ memset(data, 0, IPOIB_PSEUDO_LEN);
++ skb_reset_mac_header(skb);
++ skb_pull(skb, IPOIB_HARD_LEN);
+ }
+
+ /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 4ad297d..339a1ee 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
+ #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
+ #define IPOIB_CM_RX_UPDATE_MASK (0x3)
+
++#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
++
+ static struct ib_qp_attr ipoib_cm_err_attr = {
+ .qp_state = IB_QPS_ERR
+ };
+@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+ struct sk_buff *skb;
+ int i;
+
+- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
++ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
++ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
+ * IP header to a multiple of 16.
+ */
+- skb_reserve(skb, 12);
++ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
+
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+ int dlen = wc->byte_len;
+
+- small_skb = dev_alloc_skb(dlen + 12);
++ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
+ if (small_skb) {
+- skb_reserve(small_skb, 12);
++ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
+ ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+ dlen, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, small_skb->data, dlen);
+@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+
+ copied:
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+- skb_reset_mac_header(skb);
+- skb_pull(skb, IPOIB_ENCAP_LEN);
++ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index be11d5d..830fecb 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
+
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
++ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /*
+- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
+- * header. So we need 4 more bytes to get to 48 and align the
+- * IP header to a multiple of 16.
++ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
++ * 64 bytes aligned
+ */
+- skb_reserve(skb, 4);
++ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
+
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
+ skb_pull(skb, IB_GRH_BYTES);
+
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+- skb_reset_mac_header(skb);
+- skb_pull(skb, IPOIB_ENCAP_LEN);
++ skb_add_pseudo_hdr(skb);
+
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += skb->len;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index cc1c1b0..823a528 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ ipoib_neigh_free(neigh);
+ goto err_drop;
+ }
+- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
++ if (skb_queue_len(&neigh->queue) <
++ IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&neigh->queue, skb);
+- else {
++ } else {
+ ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
+ skb_queue_len(&neigh->queue));
+ goto err_drop;
+@@ -964,7 +967,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ }
+
+ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+- struct ipoib_cb *cb)
++ struct ipoib_pseudo_header *phdr)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_path *path;
+@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+- path = __path_find(dev, cb->hwaddr + 4);
++ path = __path_find(dev, phdr->hwaddr + 4);
+ if (!path || !path->valid) {
+ int new_path = 0;
+
+ if (!path) {
+- path = path_rec_create(dev, cb->hwaddr + 4);
++ path = path_rec_create(dev, phdr->hwaddr + 4);
+ new_path = 1;
+ }
+ if (path) {
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+ be16_to_cpu(path->pathrec.dlid));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
++ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ return;
+ } else if ((path->query || !path_rec_start(dev, path)) &&
+ skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, IPOIB_PSEUDO_LEN);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh *neigh;
+- struct ipoib_cb *cb = ipoib_skb_cb(skb);
++ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+ unsigned long flags;
+
++ phdr = (struct ipoib_pseudo_header *) skb->data;
++ skb_pull(skb, sizeof(*phdr));
+ header = (struct ipoib_header *) skb->data;
+
+- if (unlikely(cb->hwaddr[4] == 0xff)) {
++ if (unlikely(phdr->hwaddr[4] == 0xff)) {
+ /* multicast, arrange "if" according to probability */
+ if ((header->proto != htons(ETH_P_IP)) &&
+ (header->proto != htons(ETH_P_IPV6)) &&
+@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ return NETDEV_TX_OK;
+ }
+ /* Add in the P_Key for multicast*/
+- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+- cb->hwaddr[9] = priv->pkey & 0xff;
++ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
++ phdr->hwaddr[9] = priv->pkey & 0xff;
+
+- neigh = ipoib_neigh_get(dev, cb->hwaddr);
++ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (likely(neigh))
+ goto send_using_neigh;
+- ipoib_mcast_send(dev, cb->hwaddr, skb);
++ ipoib_mcast_send(dev, phdr->hwaddr, skb);
+ return NETDEV_TX_OK;
+ }
+
+@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ case htons(ETH_P_TIPC):
+- neigh = ipoib_neigh_get(dev, cb->hwaddr);
++ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
+ if (unlikely(!neigh)) {
+- neigh_add_path(skb, cb->hwaddr, dev);
++ neigh_add_path(skb, phdr->hwaddr, dev);
+ return NETDEV_TX_OK;
+ }
+ break;
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP):
+ /* for unicast ARP and RARP should always perform path find */
+- unicast_arp_send(skb, dev, cb);
++ unicast_arp_send(skb, dev, phdr);
+ return NETDEV_TX_OK;
+ default:
+ /* ethertype not supported by IPoIB */
+@@ -1086,11 +1095,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ goto unref;
+ }
+ } else if (neigh->ah) {
+- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
++ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
+ goto unref;
+ }
+
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, sizeof(*phdr));
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned len)
+ {
++ struct ipoib_pseudo_header *phdr;
+ struct ipoib_header *header;
+- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+
+ header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+
+@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
+
+ /*
+ * we don't rely on dst_entry structure, always stuff the
+- * destination address into skb->cb so we can figure out where
++ * destination address into skb hard header so we can figure out where
+ * to send the packet later.
+ */
+- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
++ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
++ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+
+- return sizeof *header;
++ return IPOIB_HARD_LEN;
+ }
+
+ static void ipoib_set_mcast_list(struct net_device *dev)
+@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+
+- dev->hard_header_len = IPOIB_ENCAP_LEN;
++ dev->hard_header_len = IPOIB_HARD_LEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index d3394b6..1909dd2 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
+- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
++ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
++ /* put pseudoheader back on for next time */
++ skb_push(skb, sizeof(struct ipoib_pseudo_header));
+ skb_queue_tail(&mcast->pkt_queue, skb);
+- else {
++ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 692ee24..3474de5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -913,13 +913,11 @@ fec_restart(struct net_device *ndev)
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+- writel((__force u32)cpu_to_be32(temp_mac[0]),
+- fep->hwp + FEC_ADDR_LOW);
+- writel((__force u32)cpu_to_be32(temp_mac[1]),
+- fep->hwp + FEC_ADDR_HIGH);
+- }
++ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
++ writel((__force u32)cpu_to_be32(temp_mac[0]),
++ fep->hwp + FEC_ADDR_LOW);
++ writel((__force u32)cpu_to_be32(temp_mac[1]),
++ fep->hwp + FEC_ADDR_HIGH);
+
+ /* Clear any outstanding interrupt. */
+ writel(0xffffffff, fep->hwp + FEC_IEVENT);
+@@ -1432,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ skb_put(skb, pkt_len - 4);
+ data = skb->data;
+
++ if (!is_copybreak && need_swap)
++ swap_buffer(data, pkt_len);
++
+ #if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ data = skb_pull_inline(skb, 2);
+ #endif
+
+- if (!is_copybreak && need_swap)
+- swap_buffer(data, pkt_len);
+-
+ /* Extract the enhanced buffer descriptor */
+ ebdp = NULL;
+ if (fep->bufdesc_ex)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+index 132cea6..e3be7e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ /* For TX we use the same irq per
+ ring we assigned for the RX */
+ struct mlx4_en_cq *rx_cq;
+-
++ int xdp_index;
++
++ /* The xdp tx irq must align with the rx ring that forwards to
++ * it, so reindex these from 0. This should only happen when
++ * tx_ring_num is not a multiple of rx_ring_num.
++ */
++ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
++ if (xdp_index >= 0)
++ cq_idx = xdp_index;
+ cq_idx = cq_idx % priv->rx_ring_num;
+ rx_cq = priv->rx_cq[cq_idx];
+ cq->vector = rx_cq->vector;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 3c20e87..16af1ce 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
+
+ skb_gro_pull(skb, gh_len);
+ skb_gro_postpull_rcsum(skb, gh, gh_len);
+- pp = ptype->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ flush = 0;
+
+ out_unlock:
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 3ba29fc..c4d9653 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -624,15 +624,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ packet->total_data_buflen);
+
+ skb->protocol = eth_type_trans(skb, net);
+- if (csum_info) {
+- /* We only look at the IP checksum here.
+- * Should we be dropping the packet if checksum
+- * failed? How do we deal with other checksums - TCP/UDP?
+- */
+- if (csum_info->receive.ip_checksum_succeeded)
++
++ /* skb is already created with CHECKSUM_NONE */
++ skb_checksum_none_assert(skb);
++
++ /*
++ * In Linux, the IP checksum is always checked.
++ * Do L4 checksum offload if enabled and present.
++ */
++ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
++ if (csum_info->receive.tcp_checksum_succeeded ||
++ csum_info->receive.udp_checksum_succeeded)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+- else
+- skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ if (vlan_tci & VLAN_TAG_PRESENT)
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 351e701..b72ddc6 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
+ #define DEFAULT_ENCRYPT false
+ #define DEFAULT_ENCODING_SA 0
+
++static bool send_sci(const struct macsec_secy *secy)
++{
++ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
++
++ return tx_sc->send_sci ||
++ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
++}
++
+ static sci_t make_sci(u8 *addr, __be16 port)
+ {
+ sci_t sci;
+@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
+
+ /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
+ static void macsec_fill_sectag(struct macsec_eth_header *h,
+- const struct macsec_secy *secy, u32 pn)
++ const struct macsec_secy *secy, u32 pn,
++ bool sci_present)
+ {
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
++ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
+ h->eth.h_proto = htons(ETH_P_MACSEC);
+
+- if (tx_sc->send_sci ||
+- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
++ if (sci_present) {
+ h->tci_an |= MACSEC_TCI_SC;
+ memcpy(&h->secure_channel_id, &secy->sci,
+ sizeof(h->secure_channel_id));
+@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+ struct macsec_dev *macsec = macsec_priv(dev);
++ bool sci_present;
+ u32 pn;
+
+ secy = &macsec->secy;
+@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+
+ unprotected_len = skb->len;
+ eth = eth_hdr(skb);
+- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
++ sci_present = send_sci(secy);
++ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
+ memmove(hh, eth, 2 * ETH_ALEN);
+
+ pn = tx_sa_update_pn(tx_sa, secy);
+@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ kfree_skb(skb);
+ return ERR_PTR(-ENOLINK);
+ }
+- macsec_fill_sectag(hh, secy, pn);
++ macsec_fill_sectag(hh, secy, pn, sci_present);
+ macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
+
+ skb_put(skb, secy->icv_len);
+@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ skb_to_sgvec(skb, sg, 0, skb->len);
+
+ if (tx_sc->encrypt) {
+- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
++ int len = skb->len - macsec_hdr_len(sci_present) -
+ secy->icv_len;
+ aead_request_set_crypt(req, sg, sg, len, iv);
+- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
++ aead_request_set_ad(req, macsec_hdr_len(sci_present));
+ } else {
+ aead_request_set_crypt(req, sg, sg, 0, iv);
+ aead_request_set_ad(req, skb->len - secy->icv_len);
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index c6f6683..f424b86 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -608,6 +608,21 @@ void phy_start_machine(struct phy_device *phydev)
+ }
+
+ /**
++ * phy_trigger_machine - trigger the state machine to run
++ *
++ * @phydev: the phy_device struct
++ *
++ * Description: There has been a change in state which requires that the
++ * state machine runs.
++ */
++
++static void phy_trigger_machine(struct phy_device *phydev)
++{
++ cancel_delayed_work_sync(&phydev->state_queue);
++ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
++}
++
++/**
+ * phy_stop_machine - stop the PHY state machine tracking
+ * @phydev: target phy_device struct
+ *
+@@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
+ mutex_lock(&phydev->lock);
+ phydev->state = PHY_HALTED;
+ mutex_unlock(&phydev->lock);
++
++ phy_trigger_machine(phydev);
+ }
+
+ /**
+@@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
+ }
+
+ /* reschedule state queue work to run as soon as possible */
+- cancel_delayed_work_sync(&phydev->state_queue);
+- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
++ phy_trigger_machine(phydev);
+ return;
+
+ ignore:
+@@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
+ /* if phy was suspended, bring the physical link up again */
+ if (do_resume)
+ phy_resume(phydev);
++
++ phy_trigger_machine(phydev);
+ }
+ EXPORT_SYMBOL(phy_start);
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 6e65832..5ae664c 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -584,7 +584,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
+ }
+ }
+
+- pp = eth_gro_receive(head, skb);
++ pp = call_gro_receive(eth_gro_receive, head, skb);
+ flush = 0;
+
+ out:
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index d637c93..58a97d4 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
+ if (err)
+ break;
+
++ memset(&precise_offset, 0, sizeof(precise_offset));
+ ts = ktime_to_timespec64(xtstamp.device);
+ precise_offset.device.sec = ts.tv_sec;
+ precise_offset.device.nsec = ts.tv_nsec;
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index ca86c88..3aaea71 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
+ };
+
+ #define MEGASAS_IS_LOGICAL(scp) \
+- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
++ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+
+ #define MEGASAS_DEV_INDEX(scp) \
+ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index c1ed25a..71e4899 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1713,16 +1713,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ goto out_done;
+ }
+
+- switch (scmd->cmnd[0]) {
+- case SYNCHRONIZE_CACHE:
+- /*
+- * FW takes care of flush cache on its own
+- * No need to send it down
+- */
++ /*
++ * FW takes care of flush cache on its own for Virtual Disk.
++ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
++ */
++ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ scmd->result = DID_OK << 16;
+ goto out_done;
+- default:
+- break;
+ }
+
+ return instance->instancet->build_and_issue_cmd(instance, scmd);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 6443cfb..dc3b596 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -789,6 +789,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ req->trb = trb;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+ req->first_trb_index = dep->trb_enqueue;
++ dep->queued_requests++;
+ }
+
+ dwc3_ep_inc_enq(dep);
+@@ -841,8 +842,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+- dep->queued_requests++;
+-
+ trace_dwc3_prepare_trb(dep, trb);
+ }
+
+@@ -1963,7 +1962,9 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ unsigned int s_pkt = 0;
+ unsigned int trb_status;
+
+- dep->queued_requests--;
++ if (req->trb == trb)
++ dep->queued_requests--;
++
+ trace_dwc3_complete_trb(dep, trb);
+
+ /*
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index e8d79d4..e942c67 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2154,7 +2154,10 @@ struct napi_gro_cb {
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+- /* 5 bit hole */
++ /* Number of gro_receive callbacks this packet already went through */
++ u8 recursion_counter:4;
++
++ /* 1 bit hole */
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+@@ -2165,6 +2168,40 @@ struct napi_gro_cb {
+
+ #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
++#define GRO_RECURSION_LIMIT 15
++static inline int gro_recursion_inc_test(struct sk_buff *skb)
++{
++ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
++}
++
++typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
++static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
++ struct sk_buff **head,
++ struct sk_buff *skb)
++{
++ if (unlikely(gro_recursion_inc_test(skb))) {
++ NAPI_GRO_CB(skb)->flush |= 1;
++ return NULL;
++ }
++
++ return cb(head, skb);
++}
++
++typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
++ struct sk_buff *);
++static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
++ struct sock *sk,
++ struct sk_buff **head,
++ struct sk_buff *skb)
++{
++ if (unlikely(gro_recursion_inc_test(skb))) {
++ NAPI_GRO_CB(skb)->flush |= 1;
++ return NULL;
++ }
++
++ return cb(sk, head, skb);
++}
++
+ struct packet_type {
+ __be16 type; /* This is really htons(ether_type). */
+ struct net_device *dev; /* NULL is wildcarded here */
+@@ -3862,7 +3899,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
+ ldev = netdev_all_lower_get_next(dev, &(iter)))
+
+ #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
+- for (iter = (dev)->all_adj_list.lower.next, \
++ for (iter = &(dev)->all_adj_list.lower, \
+ ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
+ ldev; \
+ ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 9742b92..156b0c1 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -549,7 +549,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
+ */
+
+ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
++void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
+ int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
+ struct ipcm_cookie *ipc, bool allow_ipv6);
+ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+@@ -571,7 +571,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+
+ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
+ {
+- ip_cmsg_recv_offset(msg, skb, 0);
++ ip_cmsg_recv_offset(msg, skb, 0, 0);
+ }
+
+ bool icmp_global_allow(void);
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index d97305d..0a2d270 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -32,6 +32,7 @@ struct route_info {
+ #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
+ #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
+ #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
++#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
+
+ /* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
+ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
+diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
+index 262f037..5a78be5 100644
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -350,7 +350,7 @@ struct rtnexthop {
+ #define RTNH_F_OFFLOAD 8 /* offloaded route */
+ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
+
+-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
++#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
+
+ /* Macros to handle hexthops */
+
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 8de138d..f2531ad 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
+
+ skb_gro_pull(skb, sizeof(*vhdr));
+ skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+- pp = ptype->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index c5fea93..2136e45 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
+ mod_timer(&query->timer, jiffies);
+ }
+
+-void br_multicast_enable_port(struct net_bridge_port *port)
++static void __br_multicast_enable_port(struct net_bridge_port *port)
+ {
+ struct net_bridge *br = port->br;
+
+- spin_lock(&br->multicast_lock);
+ if (br->multicast_disabled || !netif_running(br->dev))
+- goto out;
++ return;
+
+ br_multicast_enable(&port->ip4_own_query);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
+ if (port->multicast_router == MDB_RTR_TYPE_PERM &&
+ hlist_unhashed(&port->rlist))
+ br_multicast_add_router(br, port);
++}
+
+-out:
++void br_multicast_enable_port(struct net_bridge_port *port)
++{
++ struct net_bridge *br = port->br;
++
++ spin_lock(&br->multicast_lock);
++ __br_multicast_enable_port(port);
+ spin_unlock(&br->multicast_lock);
+ }
+
+@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
+
+ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ {
+- int err = 0;
+ struct net_bridge_mdb_htable *mdb;
++ struct net_bridge_port *port;
++ int err = 0;
+
+ spin_lock_bh(&br->multicast_lock);
+ if (br->multicast_disabled == !val)
+@@ -2023,10 +2029,9 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ goto rollback;
+ }
+
+- br_multicast_start_querier(br, &br->ip4_own_query);
+-#if IS_ENABLED(CONFIG_IPV6)
+- br_multicast_start_querier(br, &br->ip6_own_query);
+-#endif
++ br_multicast_open(br);
++ list_for_each_entry(port, &br->port_list, list)
++ __br_multicast_enable_port(port);
+
+ unlock:
+ spin_unlock_bh(&br->multicast_lock);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ea63120..44b3ba4 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
+ }
+ return head;
+ }
++EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
+
+ static void qdisc_pkt_len_init(struct sk_buff *skb)
+ {
+@@ -4496,6 +4497,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
+ NAPI_GRO_CB(skb)->flush = 0;
+ NAPI_GRO_CB(skb)->free = 0;
+ NAPI_GRO_CB(skb)->encap_mark = 0;
++ NAPI_GRO_CB(skb)->recursion_counter = 0;
+ NAPI_GRO_CB(skb)->is_fou = 0;
+ NAPI_GRO_CB(skb)->is_atomic = 1;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+@@ -5500,10 +5502,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
+ {
+ struct netdev_adjacent *lower;
+
+- lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
+- struct netdev_adjacent, list);
++ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
++
++ if (&lower->list == &dev->all_adj_list.lower)
++ return NULL;
++
++ *iter = &lower->list;
+
+- return lower ? lower->dev : NULL;
++ return lower->dev;
+ }
+ EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
+
+@@ -5578,6 +5584,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
+
+ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ struct net_device *adj_dev,
++ u16 ref_nr,
+ struct list_head *dev_list,
+ void *private, bool master)
+ {
+@@ -5587,7 +5594,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ adj = __netdev_find_adj(adj_dev, dev_list);
+
+ if (adj) {
+- adj->ref_nr++;
++ adj->ref_nr += ref_nr;
+ return 0;
+ }
+
+@@ -5597,7 +5604,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+
+ adj->dev = adj_dev;
+ adj->master = master;
+- adj->ref_nr = 1;
++ adj->ref_nr = ref_nr;
+ adj->private = private;
+ dev_hold(adj_dev);
+
+@@ -5636,6 +5643,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+
+ static void __netdev_adjacent_dev_remove(struct net_device *dev,
+ struct net_device *adj_dev,
++ u16 ref_nr,
+ struct list_head *dev_list)
+ {
+ struct netdev_adjacent *adj;
+@@ -5648,10 +5656,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
+ BUG();
+ }
+
+- if (adj->ref_nr > 1) {
+- pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+- adj->ref_nr-1);
+- adj->ref_nr--;
++ if (adj->ref_nr > ref_nr) {
++ pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
++ ref_nr, adj->ref_nr-ref_nr);
++ adj->ref_nr -= ref_nr;
+ return;
+ }
+
+@@ -5670,21 +5678,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
+
+ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ struct net_device *upper_dev,
++ u16 ref_nr,
+ struct list_head *up_list,
+ struct list_head *down_list,
+ void *private, bool master)
+ {
+ int ret;
+
+- ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+- master);
++ ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
++ private, master);
+ if (ret)
+ return ret;
+
+- ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+- false);
++ ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
++ private, false);
+ if (ret) {
+- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
++ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
+ return ret;
+ }
+
+@@ -5692,9 +5701,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+ }
+
+ static int __netdev_adjacent_dev_link(struct net_device *dev,
+- struct net_device *upper_dev)
++ struct net_device *upper_dev,
++ u16 ref_nr)
+ {
+- return __netdev_adjacent_dev_link_lists(dev, upper_dev,
++ return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
+ &dev->all_adj_list.upper,
+ &upper_dev->all_adj_list.lower,
+ NULL, false);
+@@ -5702,17 +5712,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
+
+ static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+ struct net_device *upper_dev,
++ u16 ref_nr,
+ struct list_head *up_list,
+ struct list_head *down_list)
+ {
+- __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+- __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
++ __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
++ __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
+ }
+
+ static void __netdev_adjacent_dev_unlink(struct net_device *dev,
+- struct net_device *upper_dev)
++ struct net_device *upper_dev,
++ u16 ref_nr)
+ {
+- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
+ &dev->all_adj_list.upper,
+ &upper_dev->all_adj_list.lower);
+ }
+@@ -5721,17 +5733,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private, bool master)
+ {
+- int ret = __netdev_adjacent_dev_link(dev, upper_dev);
++ int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
+
+ if (ret)
+ return ret;
+
+- ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
++ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
+ &dev->adj_list.upper,
+ &upper_dev->adj_list.lower,
+ private, master);
+ if (ret) {
+- __netdev_adjacent_dev_unlink(dev, upper_dev);
++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
+ return ret;
+ }
+
+@@ -5741,8 +5753,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+ struct net_device *upper_dev)
+ {
+- __netdev_adjacent_dev_unlink(dev, upper_dev);
+- __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
++ __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
+ &dev->adj_list.upper,
+ &upper_dev->adj_list.lower);
+ }
+@@ -5795,7 +5807,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+ pr_debug("Interlinking %s with %s, non-neighbour\n",
+ i->dev->name, j->dev->name);
+- ret = __netdev_adjacent_dev_link(i->dev, j->dev);
++ ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
+ if (ret)
+ goto rollback_mesh;
+ }
+@@ -5805,7 +5817,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+ pr_debug("linking %s's upper device %s with %s\n",
+ upper_dev->name, i->dev->name, dev->name);
+- ret = __netdev_adjacent_dev_link(dev, i->dev);
++ ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
+ if (ret)
+ goto rollback_upper_mesh;
+ }
+@@ -5814,7 +5826,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+ pr_debug("linking %s's lower device %s with %s\n", dev->name,
+ i->dev->name, upper_dev->name);
+- ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
++ ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
+ if (ret)
+ goto rollback_lower_mesh;
+ }
+@@ -5832,7 +5844,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+ if (i == to_i)
+ break;
+- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
+ }
+
+ i = NULL;
+@@ -5842,7 +5854,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+ if (i == to_i)
+ break;
+- __netdev_adjacent_dev_unlink(dev, i->dev);
++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
+ }
+
+ i = j = NULL;
+@@ -5854,7 +5866,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+ if (i == to_i && j == to_j)
+ break;
+- __netdev_adjacent_dev_unlink(i->dev, j->dev);
++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
+ }
+ if (i == to_i)
+ break;
+@@ -5934,16 +5946,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
+ */
+ list_for_each_entry(i, &dev->all_adj_list.lower, list)
+ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
+- __netdev_adjacent_dev_unlink(i->dev, j->dev);
++ __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
+
+ /* remove also the devices itself from lower/upper device
+ * list
+ */
+ list_for_each_entry(i, &dev->all_adj_list.lower, list)
+- __netdev_adjacent_dev_unlink(i->dev, upper_dev);
++ __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
+
+ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
+- __netdev_adjacent_dev_unlink(dev, i->dev);
++ __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
+
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+ &changeupper_info.info);
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index bbd118b..306b8f0 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -216,8 +216,8 @@
+ #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
+
+ /* If lock -- protects updating of if_list */
+-#define if_lock(t) spin_lock(&(t->if_lock));
+-#define if_unlock(t) spin_unlock(&(t->if_lock));
++#define if_lock(t) mutex_lock(&(t->if_lock));
++#define if_unlock(t) mutex_unlock(&(t->if_lock));
+
+ /* Used to help with determining the pkts on receive */
+ #define PKTGEN_MAGIC 0xbe9be955
+@@ -423,7 +423,7 @@ struct pktgen_net {
+ };
+
+ struct pktgen_thread {
+- spinlock_t if_lock; /* for list of devices */
++ struct mutex if_lock; /* for list of devices */
+ struct list_head if_list; /* All device here */
+ struct list_head th_list;
+ struct task_struct *tsk;
+@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
+ {
+ struct pktgen_thread *t;
+
++ mutex_lock(&pktgen_thread_lock);
++
+ list_for_each_entry(t, &pn->pktgen_threads, th_list) {
+ struct pktgen_dev *pkt_dev;
+
+- rcu_read_lock();
+- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
++ if_lock(t);
++ list_for_each_entry(pkt_dev, &t->if_list, list) {
+ if (pkt_dev->odev != dev)
+ continue;
+
+@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
+ dev->name);
+ break;
+ }
+- rcu_read_unlock();
++ if_unlock(t);
+ }
++ mutex_unlock(&pktgen_thread_lock);
+ }
+
+ static int pktgen_device_event(struct notifier_block *unused,
+@@ -2286,7 +2289,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
+
+ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
+ {
+- pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
++ pkt_dev->pkt_overhead = 0;
+ pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
+ pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
+ pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
+@@ -2777,13 +2780,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+ }
+
+ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
+- struct pktgen_dev *pkt_dev,
+- unsigned int extralen)
++ struct pktgen_dev *pkt_dev)
+ {
++ unsigned int extralen = LL_RESERVED_SPACE(dev);
+ struct sk_buff *skb = NULL;
+- unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
+- pkt_dev->pkt_overhead;
++ unsigned int size;
+
++ size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
+ if (pkt_dev->flags & F_NODE) {
+ int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
+
+@@ -2796,8 +2799,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
+ skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
+ }
+
++ /* the caller pre-fetches from skb->data and reserves for the mac hdr */
+ if (likely(skb))
+- skb_reserve(skb, LL_RESERVED_SPACE(dev));
++ skb_reserve(skb, extralen - 16);
+
+ return skb;
+ }
+@@ -2830,16 +2834,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
+ mod_cur_headers(pkt_dev);
+ queue_map = pkt_dev->cur_queue_map;
+
+- datalen = (odev->hard_header_len + 16) & ~0xf;
+-
+- skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
++ skb = pktgen_alloc_skb(odev, pkt_dev);
+ if (!skb) {
+ sprintf(pkt_dev->result, "No memory");
+ return NULL;
+ }
+
+ prefetchw(skb->data);
+- skb_reserve(skb, datalen);
++ skb_reserve(skb, 16);
+
+ /* Reserve for ethernet and IP header */
+ eth = (__u8 *) skb_push(skb, 14);
+@@ -2959,7 +2961,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ mod_cur_headers(pkt_dev);
+ queue_map = pkt_dev->cur_queue_map;
+
+- skb = pktgen_alloc_skb(odev, pkt_dev, 16);
++ skb = pktgen_alloc_skb(odev, pkt_dev);
+ if (!skb) {
+ sprintf(pkt_dev->result, "No memory");
+ return NULL;
+@@ -3763,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
+ return -ENOMEM;
+ }
+
+- spin_lock_init(&t->if_lock);
++ mutex_init(&t->if_lock);
+ t->cpu = cpu;
+
+ INIT_LIST_HEAD(&t->if_list);
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 66dff5e..02acfff 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
+
+ skb_gro_pull(skb, sizeof(*eh));
+ skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+- pp = ptype->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 55513e6..eebbc0f 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1388,7 +1388,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+ skb_gro_pull(skb, sizeof(*iph));
+ skb_set_transport_header(skb, skb_gro_offset(skb));
+
+- pp = ops->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 321d57f..5351b61 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
+ if (!ops || !ops->callbacks.gro_receive)
+ goto out_unlock;
+
+- pp = ops->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+@@ -441,7 +441,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
+ if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
+ goto out_unlock;
+
+- pp = ops->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ flush = 0;
+
+ out_unlock:
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index ecd1e09..6871f59 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -227,7 +227,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+ skb_gro_postpull_rcsum(skb, greh, grehlen);
+
+- pp = ptype->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ flush = 0;
+
+ out_unlock:
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 71a52f4d..11ef96e 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
+ }
+
+ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
+- int offset)
++ int tlen, int offset)
+ {
+ __wsum csum = skb->csum;
+
+@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
+ return;
+
+ if (offset != 0)
+- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
+- offset, 0));
++ csum = csum_sub(csum,
++ csum_partial(skb_transport_header(skb) + tlen,
++ offset, 0));
+
+ put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
+ }
+@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ }
+
+ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
+- int offset)
++ int tlen, int offset)
+ {
+ struct inet_sock *inet = inet_sk(skb->sk);
+ unsigned int flags = inet->cmsg_flags;
+@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
+ }
+
+ if (flags & IP_CMSG_CHECKSUM)
+- ip_cmsg_recv_checksum(msg, skb, offset);
++ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
+ }
+ EXPORT_SYMBOL(ip_cmsg_recv_offset);
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 1cb67de..80bc36b 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
+ container_of(table->data, struct net, ipv4.ping_group_range.range);
+ unsigned int seq;
+ do {
+- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
++ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
+
+ *low = data[0];
+ *high = data[1];
+- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
++ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
+ }
+
+ /* Update system visible IP port range */
+@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
+ kgid_t *data = table->data;
+ struct net *net =
+ container_of(table->data, struct net, ipv4.ping_group_range.range);
+- write_seqlock(&net->ipv4.ip_local_ports.lock);
++ write_seqlock(&net->ipv4.ping_group_range.lock);
+ data[0] = low;
+ data[1] = high;
+- write_sequnlock(&net->ipv4.ip_local_ports.lock);
++ write_sequnlock(&net->ipv4.ping_group_range.lock);
+ }
+
+ /* Validate changes from /proc interface. */
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5fdcb8d..c0d71e7 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1327,7 +1327,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
++ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
+
+ err = copied;
+ if (flags & MSG_TRUNC)
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 81f253b..6de9f97 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -293,7 +293,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+
+ skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
+ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+- pp = udp_sk(sk)->gro_receive(sk, head, skb);
++ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 2f1f5d4..f5432d6 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2995,7 +2995,7 @@ static void init_loopback(struct net_device *dev)
+ * lo device down, release this obsolete dst and
+ * reallocate a new router for ifa.
+ */
+- if (sp_ifa->rt->dst.obsolete > 0) {
++ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
+ ip6_rt_put(sp_ifa->rt);
+ sp_ifa->rt = NULL;
+ } else {
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 22e90e5..a09418b 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -243,7 +243,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+
+ skb_gro_postpull_rcsum(skb, iph, nlen);
+
+- pp = ops->callbacks.gro_receive(head, skb);
++ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+
+ out_unlock:
+ rcu_read_unlock();
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 888543d..41489f3 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -155,6 +155,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
+ hash = HASH(&any, local);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(local, &t->parms.laddr) &&
++ ipv6_addr_any(&t->parms.raddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+@@ -162,6 +163,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
+ hash = HASH(remote, &any);
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+ if (ipv6_addr_equal(remote, &t->parms.raddr) &&
++ ipv6_addr_any(&t->parms.laddr) &&
+ (t->dev->flags & IFF_UP))
+ return t;
+ }
+@@ -1132,6 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ if (err)
+ return err;
+
++ skb->protocol = htons(ETH_P_IPV6);
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ipv6h = ipv6_hdr(skb);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 269218a..23153ac 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -656,7 +656,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
+ struct net_device *dev = rt->dst.dev;
+
+ if (dev && !netif_carrier_ok(dev) &&
+- idev->cnf.ignore_routes_with_linkdown)
++ idev->cnf.ignore_routes_with_linkdown &&
++ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
+ goto out;
+
+ if (rt6_check_expired(rt))
+@@ -1050,6 +1051,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+ int strict = 0;
+
+ strict |= flags & RT6_LOOKUP_F_IFACE;
++ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
+ if (net->ipv6.devconf_all->forwarding == 0)
+ strict |= RT6_LOOKUP_F_REACHABLE;
+
+@@ -1783,7 +1785,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
+ };
+ struct fib6_table *table;
+ struct rt6_info *rt;
+- int flags = RT6_LOOKUP_F_IFACE;
++ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
+
+ table = fib6_get_table(net, cfg->fc_table);
+ if (!table)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 94f4f89..fc67822 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1193,6 +1193,16 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ return NULL;
+ }
+
++static void tcp_v6_restore_cb(struct sk_buff *skb)
++{
++ /* We need to move header back to the beginning if xfrm6_policy_check()
++ * and tcp_v6_fill_cb() are going to be called again.
++ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
++ */
++ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
++ sizeof(struct inet6_skb_parm));
++}
++
+ /* The socket must have it's spinlock held when we get
+ * here, unless it is a TCP_LISTEN socket.
+ *
+@@ -1322,6 +1332,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
+ if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
+ skb_set_owner_r(opt_skb, sk);
++ tcp_v6_restore_cb(opt_skb);
+ opt_skb = xchg(&np->pktoptions, opt_skb);
+ } else {
+ __kfree_skb(opt_skb);
+@@ -1355,15 +1366,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ TCP_SKB_CB(skb)->sacked = 0;
+ }
+
+-static void tcp_v6_restore_cb(struct sk_buff *skb)
+-{
+- /* We need to move header back to the beginning if xfrm6_policy_check()
+- * and tcp_v6_fill_cb() are going to be called again.
+- */
+- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+- sizeof(struct inet6_skb_parm));
+-}
+-
+ static int tcp_v6_rcv(struct sk_buff *skb)
+ {
+ const struct tcphdr *th;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 19ac3a1..c2a8656 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -427,7 +427,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+
+ if (is_udp4) {
+ if (inet->cmsg_flags)
+- ip_cmsg_recv(msg, skb);
++ ip_cmsg_recv_offset(msg, skb,
++ sizeof(struct udphdr), off);
+ } else {
+ if (np->rxopt.all)
+ ip6_datagram_recv_specific_ctl(sk, msg, skb);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 627f898..62bea45 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1832,7 +1832,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ /* Record the max length of recvmsg() calls for future allocations */
+ nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+ nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+- 16384);
++ SKB_WITH_OVERHEAD(32768));
+
+ copied = data_skb->len;
+ if (len < copied) {
+@@ -2083,8 +2083,9 @@ static int netlink_dump(struct sock *sk)
+
+ if (alloc_min_size < nlk->max_recvmsg_len) {
+ alloc_size = nlk->max_recvmsg_len;
+- skb = alloc_skb(alloc_size, GFP_KERNEL |
+- __GFP_NOWARN | __GFP_NORETRY);
++ skb = alloc_skb(alloc_size,
++ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
++ __GFP_NOWARN | __GFP_NORETRY);
+ }
+ if (!skb) {
+ alloc_size = alloc_min_size;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 33a4697..d2238b2 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
+ static int packet_direct_xmit(struct sk_buff *skb)
+ {
+ struct net_device *dev = skb->dev;
+- netdev_features_t features;
++ struct sk_buff *orig_skb = skb;
+ struct netdev_queue *txq;
+ int ret = NETDEV_TX_BUSY;
+
+@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
+ !netif_carrier_ok(dev)))
+ goto drop;
+
+- features = netif_skb_features(skb);
+- if (skb_needs_linearize(skb, features) &&
+- __skb_linearize(skb))
++ skb = validate_xmit_skb_list(skb, dev);
++ if (skb != orig_skb)
+ goto drop;
+
+ txq = skb_get_tx_queue(dev, skb);
+@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
+ return ret;
+ drop:
+ atomic_long_inc(&dev->tx_dropped);
+- kfree_skb(skb);
++ kfree_skb_list(skb);
+ return NET_XMIT_DROP;
+ }
+
+@@ -3952,6 +3951,7 @@ static int packet_notifier(struct notifier_block *this,
+ }
+ if (msg == NETDEV_UNREGISTER) {
+ packet_cached_dev_reset(po);
++ fanout_release(sk);
+ po->ifindex = -1;
+ if (po->prot_hook.dev)
+ dev_put(po->prot_hook.dev);
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index d09d068..027ddf4 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -341,22 +341,25 @@ int tcf_register_action(struct tc_action_ops *act,
+ if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
+ return -EINVAL;
+
++ /* We have to register pernet ops before making the action ops visible,
++ * otherwise tcf_action_init_1() could get a partially initialized
++ * netns.
++ */
++ ret = register_pernet_subsys(ops);
++ if (ret)
++ return ret;
++
+ write_lock(&act_mod_lock);
+ list_for_each_entry(a, &act_base, head) {
+ if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
+ write_unlock(&act_mod_lock);
++ unregister_pernet_subsys(ops);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&act->head, &act_base);
+ write_unlock(&act_mod_lock);
+
+- ret = register_pernet_subsys(ops);
+- if (ret) {
+- tcf_unregister_action(act, ops);
+- return ret;
+- }
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(tcf_register_action);
+@@ -367,8 +370,6 @@ int tcf_unregister_action(struct tc_action_ops *act,
+ struct tc_action_ops *a;
+ int err = -ENOENT;
+
+- unregister_pernet_subsys(ops);
+-
+ write_lock(&act_mod_lock);
+ list_for_each_entry(a, &act_base, head) {
+ if (a == act) {
+@@ -378,6 +379,8 @@ int tcf_unregister_action(struct tc_action_ops *act,
+ }
+ }
+ write_unlock(&act_mod_lock);
++ if (!err)
++ unregister_pernet_subsys(ops);
+ return err;
+ }
+ EXPORT_SYMBOL(tcf_unregister_action);
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index 691409d..4ffc6c1 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -36,6 +36,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
+ bstats_update(&v->tcf_bstats, skb);
+ action = v->tcf_action;
+
++ /* Ensure 'data' points at mac_header prior calling vlan manipulating
++ * functions.
++ */
++ if (skb_at_tc_ingress(skb))
++ skb_push_rcsum(skb, skb->mac_len);
++
+ switch (v->tcfv_action) {
+ case TCA_VLAN_ACT_POP:
+ err = skb_vlan_pop(skb);
+@@ -57,6 +63,9 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
+ action = TC_ACT_SHOT;
+ v->tcf_qstats.drops++;
+ unlock:
++ if (skb_at_tc_ingress(skb))
++ skb_pull_rcsum(skb, skb->mac_len);
++
+ spin_unlock(&v->tcf_lock);
+ return action;
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index a7c5645..74bed5e 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -344,7 +344,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
+ if (err == 0) {
+ struct tcf_proto *next = rtnl_dereference(tp->next);
+
+- tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
++ tfilter_notify(net, skb, n, tp,
++ t->tcm_handle, RTM_DELTFILTER);
+ if (tcf_destroy(tp, false))
+ RCU_INIT_POINTER(*back, next);
+ }
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 31b7bc3..8192990 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -417,6 +417,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
+ __u8 has_data = 0;
+ int gso = 0;
+ int pktcount = 0;
++ int auth_len = 0;
+ struct dst_entry *dst;
+ unsigned char *auth = NULL; /* pointer to auth in skb data */
+
+@@ -505,7 +506,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
+ list_for_each_entry(chunk, &packet->chunk_list, list) {
+ int padded = WORD_ROUND(chunk->skb->len);
+
+- if (pkt_size + padded > tp->pathmtu)
++ if (chunk == packet->auth)
++ auth_len = padded;
++ else if (auth_len + padded + packet->overhead >
++ tp->pathmtu)
++ goto nomem;
++ else if (pkt_size + padded > tp->pathmtu)
+ break;
+ pkt_size += padded;
+ }
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index d88bb2b..920469e 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
++ /* Report violation if chunk len overflows */
++ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
++ if (ch_end > skb_tail_pointer(skb))
++ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++ commands);
++
+ /* Now that we know we at least have a chunk header,
+ * do things that are type appropriate.
+ */
+@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
+ }
+ }
+
+- /* Report violation if chunk len overflows */
+- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+- if (ch_end > skb_tail_pointer(skb))
+- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+- commands);
+-
+ ch = (sctp_chunkhdr_t *) ch_end;
+ } while (ch_end < skb_tail_pointer(skb));
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8ed2d99..baccbf3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4683,7 +4683,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+ {
+- if (len <= 0)
++ if (len == 0)
+ return -EINVAL;
+ if (len > sizeof(struct sctp_event_subscribe))
+ len = sizeof(struct sctp_event_subscribe);
+@@ -6426,6 +6426,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ if (get_user(len, optlen))
+ return -EFAULT;
+
++ if (len < 0)
++ return -EINVAL;
++
+ lock_sock(sk);
+
+ switch (optname) {
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index a5fc9dd..a56c5e6 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -774,6 +774,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
+ int err;
+
++ if (!netif_is_bridge_port(dev))
++ return -EOPNOTSUPP;
++
+ err = switchdev_port_attr_get(dev, &attr);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+@@ -929,6 +932,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlattr *afspec;
+ int err = 0;
+
++ if (!netif_is_bridge_port(dev))
++ return -EOPNOTSUPP;
++
+ protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_PROTINFO);
+ if (protinfo) {
+@@ -962,6 +968,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
+ {
+ struct nlattr *afspec;
+
++ if (!netif_is_bridge_port(dev))
++ return -EOPNOTSUPP;
++
+ afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_AF_SPEC);
+ if (afspec)