summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.9.11/1010_linux-4.9.11.patch')
-rw-r--r--4.9.11/1010_linux-4.9.11.patch1893
1 files changed, 1893 insertions, 0 deletions
diff --git a/4.9.11/1010_linux-4.9.11.patch b/4.9.11/1010_linux-4.9.11.patch
new file mode 100644
index 0000000..59eb5c7
--- /dev/null
+++ b/4.9.11/1010_linux-4.9.11.patch
@@ -0,0 +1,1893 @@
+diff --git a/Makefile b/Makefile
+index d2fe757..18b0c5a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index ebb4e95..96d80df 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -236,7 +236,8 @@ void fpstate_init(union fpregs_state *state)
+ * it will #GP. Make sure it is replaced after the memset().
+ */
+ if (static_cpu_has(X86_FEATURE_XSAVES))
+- state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
++ state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
++ xfeatures_mask;
+
+ if (static_cpu_has(X86_FEATURE_FXSR))
+ fpstate_init_fxstate(&state->fxsave);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index f2e8bed..4d3ddc2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -507,8 +507,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+ return;
+
+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
+- if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++ if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
++ local_bh_disable();
+ napi_reschedule(&priv->rx_cq[ring]->napi);
++ local_bh_enable();
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 71382df..81d8e3b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -765,7 +765,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
+-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
++void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
++ enum mlx5e_traffic_types tt);
+
+ int mlx5e_open_locked(struct net_device *netdev);
+ int mlx5e_close_locked(struct net_device *netdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 51c6a57..126cfeb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -975,15 +975,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+
+ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+ {
+- struct mlx5_core_dev *mdev = priv->mdev;
+ void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+- int i;
++ struct mlx5_core_dev *mdev = priv->mdev;
++ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
++ int tt;
+
+ MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+- mlx5e_build_tir_ctx_hash(tirc, priv);
+
+- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+- mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
++ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
++ memset(tirc, 0, ctxlen);
++ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
++ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
++ }
+ }
+
+ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 5dc3e24..b3067137 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1978,8 +1978,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+ }
+
+-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
++void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
++ enum mlx5e_traffic_types tt)
+ {
++ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
++
++#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
++ MLX5_HASH_FIELD_SEL_DST_IP)
++
++#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
++ MLX5_HASH_FIELD_SEL_DST_IP |\
++ MLX5_HASH_FIELD_SEL_L4_SPORT |\
++ MLX5_HASH_FIELD_SEL_L4_DPORT)
++
++#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
++ MLX5_HASH_FIELD_SEL_DST_IP |\
++ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
++
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+@@ -1991,6 +2006,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
++
++ switch (tt) {
++ case MLX5E_TT_IPV4_TCP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV4);
++ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++ MLX5_L4_PROT_TYPE_TCP);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_L4PORTS);
++ break;
++
++ case MLX5E_TT_IPV6_TCP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV6);
++ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++ MLX5_L4_PROT_TYPE_TCP);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_L4PORTS);
++ break;
++
++ case MLX5E_TT_IPV4_UDP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV4);
++ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++ MLX5_L4_PROT_TYPE_UDP);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_L4PORTS);
++ break;
++
++ case MLX5E_TT_IPV6_UDP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV6);
++ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++ MLX5_L4_PROT_TYPE_UDP);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_L4PORTS);
++ break;
++
++ case MLX5E_TT_IPV4_IPSEC_AH:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV4);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_IPSEC_SPI);
++ break;
++
++ case MLX5E_TT_IPV6_IPSEC_AH:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV6);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_IPSEC_SPI);
++ break;
++
++ case MLX5E_TT_IPV4_IPSEC_ESP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV4);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_IPSEC_SPI);
++ break;
++
++ case MLX5E_TT_IPV6_IPSEC_ESP:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV6);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP_IPSEC_SPI);
++ break;
++
++ case MLX5E_TT_IPV4:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV4);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP);
++ break;
++
++ case MLX5E_TT_IPV6:
++ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++ MLX5_L3_PROT_TYPE_IPV6);
++ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++ MLX5_HASH_IP);
++ break;
++ default:
++ WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
++ }
+ }
+
+ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+@@ -2360,110 +2457,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ enum mlx5e_traffic_types tt)
+ {
+- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+-
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+
+-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+- MLX5_HASH_FIELD_SEL_DST_IP)
+-
+-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+- MLX5_HASH_FIELD_SEL_DST_IP |\
+- MLX5_HASH_FIELD_SEL_L4_SPORT |\
+- MLX5_HASH_FIELD_SEL_L4_DPORT)
+-
+-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+- MLX5_HASH_FIELD_SEL_DST_IP |\
+- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+-
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
+- mlx5e_build_tir_ctx_hash(tirc, priv);
+-
+- switch (tt) {
+- case MLX5E_TT_IPV4_TCP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV4);
+- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+- MLX5_L4_PROT_TYPE_TCP);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_L4PORTS);
+- break;
+-
+- case MLX5E_TT_IPV6_TCP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV6);
+- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+- MLX5_L4_PROT_TYPE_TCP);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_L4PORTS);
+- break;
+-
+- case MLX5E_TT_IPV4_UDP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV4);
+- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+- MLX5_L4_PROT_TYPE_UDP);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_L4PORTS);
+- break;
+-
+- case MLX5E_TT_IPV6_UDP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV6);
+- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+- MLX5_L4_PROT_TYPE_UDP);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_L4PORTS);
+- break;
+-
+- case MLX5E_TT_IPV4_IPSEC_AH:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV4);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_IPSEC_SPI);
+- break;
+-
+- case MLX5E_TT_IPV6_IPSEC_AH:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV6);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_IPSEC_SPI);
+- break;
+-
+- case MLX5E_TT_IPV4_IPSEC_ESP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV4);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_IPSEC_SPI);
+- break;
+-
+- case MLX5E_TT_IPV6_IPSEC_ESP:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV6);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP_IPSEC_SPI);
+- break;
+-
+- case MLX5E_TT_IPV4:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV4);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP);
+- break;
+-
+- case MLX5E_TT_IPV6:
+- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+- MLX5_L3_PROT_TYPE_IPV6);
+- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+- MLX5_HASH_IP);
+- break;
+- default:
+- WARN_ONCE(true,
+- "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
+- }
++ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ }
+
+ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 914e546..7e20e4b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1110,9 +1110,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ return rule;
+ }
+ rule = add_rule_fte(fte, fg, dest);
+- unlock_ref_node(&fte->node);
+ if (IS_ERR(rule))
+- goto unlock_fg;
++ goto unlock_fte;
+ else
+ goto add_rule;
+ }
+@@ -1130,6 +1129,7 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ goto unlock_fg;
+ }
+ tree_init_node(&fte->node, 0, del_fte);
++ nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
+ rule = add_rule_fte(fte, fg, dest);
+ if (IS_ERR(rule)) {
+ kfree(fte);
+@@ -1142,6 +1142,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ list_add(&fte->node.list, prev);
+ add_rule:
+ tree_add_node(&rule->node, &fte->node);
++unlock_fte:
++ unlock_ref_node(&fte->node);
+ unlock_fg:
+ unlock_ref_node(&fg->node);
+ return rule;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 7df4ff1..7d19029 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
+ {
+ void __iomem *ioaddr = hw->pcsr;
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
++ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+ int ret = 0;
+
++ /* Discard masked bits */
++ intr_status &= ~intr_mask;
++
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & GMAC_INT_STATUS_MMCTIS))
+ x->mmc_tx_irq_n++;
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 6255973..1b65f0f 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
+ {
+ dev->mtu = 64 * 1024;
+ dev->hard_header_len = ETH_HLEN; /* 14 */
++ dev->min_header_len = ETH_HLEN; /* 14 */
+ dev->addr_len = ETH_ALEN; /* 6 */
+ dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
+ dev->flags = IFF_LOOPBACK;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 6f38daf..adea6f5 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -682,7 +682,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ ssize_t n;
+
+ if (q->flags & IFF_VNET_HDR) {
+- vnet_hdr_len = q->vnet_hdr_sz;
++ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+
+ err = -EINVAL;
+ if (len < vnet_hdr_len)
+@@ -822,7 +822,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+- vnet_hdr_len = q->vnet_hdr_sz;
++ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ if (iov_iter_count(iter) < vnet_hdr_len)
+ return -EINVAL;
+
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 18402d7..b31aca8 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1187,9 +1187,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ }
+
+ if (tun->flags & IFF_VNET_HDR) {
+- if (len < tun->vnet_hdr_sz)
++ int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
++
++ if (len < vnet_hdr_sz)
+ return -EINVAL;
+- len -= tun->vnet_hdr_sz;
++ len -= vnet_hdr_sz;
+
+ n = copy_from_iter(&gso, sizeof(gso), from);
+ if (n != sizeof(gso))
+@@ -1201,7 +1203,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+
+ if (tun16_to_cpu(tun, gso.hdr_len) > len)
+ return -EINVAL;
+- iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
++ iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+ }
+
+ if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
+@@ -1348,7 +1350,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ vlan_hlen = VLAN_HLEN;
+
+ if (tun->flags & IFF_VNET_HDR)
+- vnet_hdr_sz = tun->vnet_hdr_sz;
++ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+ total = skb->len + vlan_hlen + vnet_hdr_sz;
+
+diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
+index d9ca05d..4086415 100644
+--- a/drivers/net/usb/catc.c
++++ b/drivers/net/usb/catc.c
+@@ -777,7 +777,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ struct net_device *netdev;
+ struct catc *catc;
+ u8 broadcast[ETH_ALEN];
+- int i, pktsz;
++ int pktsz, ret;
+
+ if (usb_set_interface(usbdev,
+ intf->altsetting->desc.bInterfaceNumber, 1)) {
+@@ -812,12 +812,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
+ (!catc->rx_urb) || (!catc->irq_urb)) {
+ dev_err(&intf->dev, "No free urbs available.\n");
+- usb_free_urb(catc->ctrl_urb);
+- usb_free_urb(catc->tx_urb);
+- usb_free_urb(catc->rx_urb);
+- usb_free_urb(catc->irq_urb);
+- free_netdev(netdev);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto fail_free;
+ }
+
+ /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
+@@ -845,15 +841,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ catc->irq_buf, 2, catc_irq_done, catc, 1);
+
+ if (!catc->is_f5u011) {
++ u32 *buf;
++ int i;
++
+ dev_dbg(dev, "Checking memory size\n");
+
+- i = 0x12345678;
+- catc_write_mem(catc, 0x7a80, &i, 4);
+- i = 0x87654321;
+- catc_write_mem(catc, 0xfa80, &i, 4);
+- catc_read_mem(catc, 0x7a80, &i, 4);
++ buf = kmalloc(4, GFP_KERNEL);
++ if (!buf) {
++ ret = -ENOMEM;
++ goto fail_free;
++ }
++
++ *buf = 0x12345678;
++ catc_write_mem(catc, 0x7a80, buf, 4);
++ *buf = 0x87654321;
++ catc_write_mem(catc, 0xfa80, buf, 4);
++ catc_read_mem(catc, 0x7a80, buf, 4);
+
+- switch (i) {
++ switch (*buf) {
+ case 0x12345678:
+ catc_set_reg(catc, TxBufCount, 8);
+ catc_set_reg(catc, RxBufCount, 32);
+@@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ dev_dbg(dev, "32k Memory\n");
+ break;
+ }
++
++ kfree(buf);
+
+ dev_dbg(dev, "Getting MAC from SEEROM.\n");
+
+@@ -914,16 +921,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ usb_set_intfdata(intf, catc);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+- if (register_netdev(netdev) != 0) {
+- usb_set_intfdata(intf, NULL);
+- usb_free_urb(catc->ctrl_urb);
+- usb_free_urb(catc->tx_urb);
+- usb_free_urb(catc->rx_urb);
+- usb_free_urb(catc->irq_urb);
+- free_netdev(netdev);
+- return -EIO;
+- }
++ ret = register_netdev(netdev);
++ if (ret)
++ goto fail_clear_intfdata;
++
+ return 0;
++
++fail_clear_intfdata:
++ usb_set_intfdata(intf, NULL);
++fail_free:
++ usb_free_urb(catc->ctrl_urb);
++ usb_free_urb(catc->tx_urb);
++ usb_free_urb(catc->rx_urb);
++ usb_free_urb(catc->irq_urb);
++ free_netdev(netdev);
++ return ret;
+ }
+
+ static void catc_disconnect(struct usb_interface *intf)
+diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
+index 1434e5d..ee40ac2 100644
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
+
+ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+ {
++ u8 *buf;
+ int ret;
+
++ buf = kmalloc(size, GFP_NOIO);
++ if (!buf)
++ return -ENOMEM;
++
+ ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
+ PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
+- indx, data, size, 1000);
++ indx, buf, size, 1000);
+ if (ret < 0)
+ netif_dbg(pegasus, drv, pegasus->net,
+ "%s returned %d\n", __func__, ret);
++ else if (ret <= size)
++ memcpy(data, buf, ret);
++ kfree(buf);
+ return ret;
+ }
+
+-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
++static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
++ const void *data)
+ {
++ u8 *buf;
+ int ret;
+
++ buf = kmemdup(data, size, GFP_NOIO);
++ if (!buf)
++ return -ENOMEM;
++
+ ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
+- indx, data, size, 100);
++ indx, buf, size, 100);
+ if (ret < 0)
+ netif_dbg(pegasus, drv, pegasus->net,
+ "%s returned %d\n", __func__, ret);
++ kfree(buf);
+ return ret;
+ }
+
+ static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
+ {
++ u8 *buf;
+ int ret;
+
++ buf = kmemdup(&data, 1, GFP_NOIO);
++ if (!buf)
++ return -ENOMEM;
++
+ ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
+- indx, &data, 1, 1000);
++ indx, buf, 1, 1000);
+ if (ret < 0)
+ netif_dbg(pegasus, drv, pegasus->net,
+ "%s returned %d\n", __func__, ret);
++ kfree(buf);
+ return ret;
+ }
+
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 7c72bfa..dc4f7ea 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
+ */
+ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+ {
+- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+- indx, 0, data, size, 500);
++ void *buf;
++ int ret;
++
++ buf = kmalloc(size, GFP_NOIO);
++ if (!buf)
++ return -ENOMEM;
++
++ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
++ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
++ indx, 0, buf, size, 500);
++ if (ret > 0 && ret <= size)
++ memcpy(data, buf, ret);
++ kfree(buf);
++ return ret;
+ }
+
+-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
++static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
+ {
+- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+- indx, 0, data, size, 500);
++ void *buf;
++ int ret;
++
++ buf = kmemdup(data, size, GFP_NOIO);
++ if (!buf)
++ return -ENOMEM;
++
++ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
++ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
++ indx, 0, buf, size, 500);
++ kfree(buf);
++ return ret;
+ }
+
+ static void async_set_reg_cb(struct urb *urb)
+diff --git a/include/linux/can/core.h b/include/linux/can/core.h
+index a087500..df08a41 100644
+--- a/include/linux/can/core.h
++++ b/include/linux/can/core.h
+@@ -45,10 +45,9 @@ struct can_proto {
+ extern int can_proto_register(const struct can_proto *cp);
+ extern void can_proto_unregister(const struct can_proto *cp);
+
+-extern int can_rx_register(struct net_device *dev, canid_t can_id,
+- canid_t mask,
+- void (*func)(struct sk_buff *, void *),
+- void *data, char *ident);
++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
++ void (*func)(struct sk_buff *, void *),
++ void *data, char *ident, struct sock *sk);
+
+ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
+ canid_t mask,
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index d83590e..bb9b102 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1508,6 +1508,7 @@ enum netdev_priv_flags {
+ * @mtu: Interface MTU value
+ * @type: Interface hardware type
+ * @hard_header_len: Maximum hardware header length.
++ * @min_header_len: Minimum hardware header length
+ *
+ * @needed_headroom: Extra headroom the hardware may need, but not in all
+ * cases can this be guaranteed
+@@ -1728,6 +1729,7 @@ struct net_device {
+ unsigned int mtu;
+ unsigned short type;
+ unsigned short hard_header_len;
++ unsigned short min_header_len;
+
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+@@ -2783,6 +2785,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
+ {
+ if (likely(len >= dev->hard_header_len))
+ return true;
++ if (len < dev->min_header_len)
++ return false;
+
+ if (capable(CAP_SYS_RAWIO)) {
+ memset(ll_header + len, 0, dev->hard_header_len - len);
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index 3ebb168..a34b141 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
++ if (opt_iter + 1 == opt_len) {
++ err_offset = opt_iter;
++ goto out;
++ }
+ tag_len = opt[opt_iter + 1];
+ if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index f11ca83..7f15f95 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
+ * upper-layer output functions
+ */
+ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+- struct ipv6_txoptions *opt, int tclass);
++ __u32 mark, struct ipv6_txoptions *opt, int tclass);
+
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index fc7c0db..3f40132 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -176,7 +176,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type)
+ }
+ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+ {
+- return -EOPNOTSUPP;
++ /* return 0 since we are not walking attr looking for
++ * RTA_ENCAP_TYPE attribute on nexthops.
++ */
++ return 0;
+ }
+
+ static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 1108079..5488e4a 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+ * @func: callback function on filter match
+ * @data: returned parameter for callback function
+ * @ident: string for calling module identification
++ * @sk: socket pointer (might be NULL)
+ *
+ * Description:
+ * Invokes the callback function with the received sk_buff and the given
+@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+ */
+ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ void (*func)(struct sk_buff *, void *), void *data,
+- char *ident)
++ char *ident, struct sock *sk)
+ {
+ struct receiver *r;
+ struct hlist_head *rl;
+@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ r->func = func;
+ r->data = data;
+ r->ident = ident;
++ r->sk = sk;
+
+ hlist_add_head_rcu(&r->list, rl);
+ d->entries++;
+@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
+ static void can_rx_delete_receiver(struct rcu_head *rp)
+ {
+ struct receiver *r = container_of(rp, struct receiver, rcu);
++ struct sock *sk = r->sk;
+
+ kmem_cache_free(rcv_cache, r);
++ if (sk)
++ sock_put(sk);
+ }
+
+ /**
+@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
+ spin_unlock(&can_rcvlists_lock);
+
+ /* schedule the receiver item for deletion */
+- if (r)
++ if (r) {
++ if (r->sk)
++ sock_hold(r->sk);
+ call_rcu(&r->rcu, can_rx_delete_receiver);
++ }
+ }
+ EXPORT_SYMBOL(can_rx_unregister);
+
+diff --git a/net/can/af_can.h b/net/can/af_can.h
+index fca0fe9..b86f512 100644
+--- a/net/can/af_can.h
++++ b/net/can/af_can.h
+@@ -50,13 +50,14 @@
+
+ struct receiver {
+ struct hlist_node list;
+- struct rcu_head rcu;
+ canid_t can_id;
+ canid_t mask;
+ unsigned long matches;
+ void (*func)(struct sk_buff *, void *);
+ void *data;
+ char *ident;
++ struct sock *sk;
++ struct rcu_head rcu;
+ };
+
+ #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 5e9ed5e..e4f694d 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1225,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ err = can_rx_register(dev, op->can_id,
+ REGMASK(op->can_id),
+ bcm_rx_handler, op,
+- "bcm");
++ "bcm", sk);
+
+ op->rx_reg_dev = dev;
+ dev_put(dev);
+@@ -1234,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ } else
+ err = can_rx_register(NULL, op->can_id,
+ REGMASK(op->can_id),
+- bcm_rx_handler, op, "bcm");
++ bcm_rx_handler, op, "bcm", sk);
+ if (err) {
+ /* this bcm rx op is broken -> remove it */
+ list_del(&op->list);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 4551687..77c8af4 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
+ {
+ return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+- gwj, "gw");
++ gwj, "gw", NULL);
+ }
+
+ static inline void cgw_unregister_filter(struct cgw_job *gwj)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index b075f02..6dc546a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
+ for (i = 0; i < count; i++) {
+ err = can_rx_register(dev, filter[i].can_id,
+ filter[i].can_mask,
+- raw_rcv, sk, "raw");
++ raw_rcv, sk, "raw", sk);
+ if (err) {
+ /* clean up successfully registered filters */
+ while (--i >= 0)
+@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
+
+ if (err_mask)
+ err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
+- raw_rcv, sk, "raw");
++ raw_rcv, sk, "raw", sk);
+
+ return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index df51c50..60b0a604 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1696,24 +1696,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+-/* We are not allowed to call static_key_slow_dec() from irq context
+- * If net_disable_timestamp() is called from irq context, defer the
+- * static_key_slow_dec() calls.
+- */
+ static atomic_t netstamp_needed_deferred;
+-#endif
+-
+-void net_enable_timestamp(void)
++static void netstamp_clear(struct work_struct *work)
+ {
+-#ifdef HAVE_JUMP_LABEL
+ int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+- if (deferred) {
+- while (--deferred)
+- static_key_slow_dec(&netstamp_needed);
+- return;
+- }
++ while (deferred--)
++ static_key_slow_dec(&netstamp_needed);
++}
++static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
++
++void net_enable_timestamp(void)
++{
+ static_key_slow_inc(&netstamp_needed);
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+@@ -1721,12 +1716,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+- if (in_interrupt()) {
+- atomic_inc(&netstamp_needed_deferred);
+- return;
+- }
+-#endif
++ /* net_disable_timestamp() can be called from non process context */
++ atomic_inc(&netstamp_needed_deferred);
++ schedule_work(&netstamp_work);
++#else
+ static_key_slow_dec(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_disable_timestamp);
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 715e5d1..7506c03 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
+ opt = ireq->ipv6_opt;
+ if (!opt)
+ opt = rcu_dereference(np->opt);
+- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
++ err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
+ rcu_read_unlock();
+ err = net_xmit_eval(err);
+ }
+@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(skb, dst);
+- ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
++ ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
+ DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+ return;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index da38621..0f99297 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
+ if (err) {
+ dev_warn(ds->dev, "Failed to create slave %d: %d\n",
+ index, err);
++ ds->ports[index].netdev = NULL;
+ return err;
+ }
+
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 02acfff..24d7aff 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
+ dev->header_ops = &eth_header_ops;
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
++ dev->min_header_len = ETH_HLEN;
+ dev->mtu = ETH_DATA_LEN;
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 1000; /* Ethernet wants good queues */
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 72d6f05..ae20616 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
+ goto validate_return_locked;
+ }
+
++ if (opt_iter + 1 == opt_len) {
++ err_offset = opt_iter;
++ goto validate_return_locked;
++ }
+ tag_len = tag[1];
+ if (tag_len > (opt_len - opt_iter)) {
+ err_offset = opt_iter + 1;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 32a08bc..1bc623d 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
+ psf->sf_crcount = im->crcount;
+ }
+ in_dev_put(pmc->interface);
++ kfree(pmc);
+ }
+ spin_unlock_bh(&im->lock);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 877bdb0..e5c1dbe 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1606,6 +1606,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ sk->sk_protocol = ip_hdr(skb)->protocol;
+ sk->sk_bound_dev_if = arg->bound_dev_if;
+ sk->sk_sndbuf = sysctl_wmem_default;
++ sk->sk_mark = fl4.flowi4_mark;
+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+ len, 0, &ipc, &rt, MSG_DONTWAIT);
+ if (unlikely(err)) {
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index f226f408..65336f3 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1215,7 +1215,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+ pktinfo->ipi_ifindex = 0;
+ pktinfo->ipi_spec_dst.s_addr = 0;
+ }
+- skb_dst_drop(skb);
++ /* We need to keep the dst for __ip_options_echo()
++ * We could restrict the test to opt.ts_needtime || opt.srr,
++ * but the following is good enough as IP options are not often used.
++ */
++ if (unlikely(IPCB(skb)->opt.optlen))
++ skb_dst_force(skb);
++ else
++ skb_dst_drop(skb);
+ }
+
+ int ip_setsockopt(struct sock *sk, int level,
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 96b8e2b..105c074 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+ {
+ struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+
++ if (!skb)
++ return 0;
+ pfh->wcheck = csum_partial((char *)&pfh->icmph,
+ sizeof(struct icmphdr), pfh->wcheck);
+ pfh->icmph.checksum = csum_fold(pfh->wcheck);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 814af89..6a90a0e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -772,6 +772,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ ret = -EAGAIN;
+ break;
+ }
++ /* if __tcp_splice_read() got nothing while we have
++ * an skb in receive queue, we do not want to loop.
++ * This might happen with URG data.
++ */
++ if (!skb_queue_empty(&sk->sk_receive_queue))
++ break;
+ sk_wait_data(sk, &timeo, NULL);
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(timeo);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 896e9df..65d6189 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2436,9 +2436,11 @@ u32 __tcp_select_window(struct sock *sk)
+ int full_space = min_t(int, tp->window_clamp, allowed_space);
+ int window;
+
+- if (mss > full_space)
++ if (unlikely(mss > full_space)) {
+ mss = full_space;
+-
++ if (mss <= 0)
++ return 0;
++ }
+ if (free_space < (full_space >> 1)) {
+ icsk->icsk_ack.quick = 0;
+
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index 532c3ef..798a095 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -173,7 +173,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
+ /* Restore final destination back after routing done */
+ fl6.daddr = sk->sk_v6_daddr;
+
+- res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++ res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
+ np->tclass);
+ rcu_read_unlock();
+ return res;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index d7d6d3a..0a59220 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+
+
+ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+- u8 type, u8 code, int offset, __be32 info)
++ u8 type, u8 code, int offset, __be32 info)
+ {
+- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+- __be16 *p = (__be16 *)(skb->data + offset);
+- int grehlen = offset + 4;
++ const struct gre_base_hdr *greh;
++ const struct ipv6hdr *ipv6h;
++ int grehlen = sizeof(*greh);
+ struct ip6_tnl *t;
++ int key_off = 0;
+ __be16 flags;
++ __be32 key;
+
+- flags = p[0];
+- if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+- if (flags&(GRE_VERSION|GRE_ROUTING))
+- return;
+- if (flags&GRE_KEY) {
+- grehlen += 4;
+- if (flags&GRE_CSUM)
+- grehlen += 4;
+- }
++ if (!pskb_may_pull(skb, offset + grehlen))
++ return;
++ greh = (const struct gre_base_hdr *)(skb->data + offset);
++ flags = greh->flags;
++ if (flags & (GRE_VERSION | GRE_ROUTING))
++ return;
++ if (flags & GRE_CSUM)
++ grehlen += 4;
++ if (flags & GRE_KEY) {
++ key_off = grehlen + offset;
++ grehlen += 4;
+ }
+
+- /* If only 8 bytes returned, keyed message will be dropped here */
+- if (!pskb_may_pull(skb, grehlen))
++ if (!pskb_may_pull(skb, offset + grehlen))
+ return;
+ ipv6h = (const struct ipv6hdr *)skb->data;
+- p = (__be16 *)(skb->data + offset);
++ greh = (const struct gre_base_hdr *)(skb->data + offset);
++ key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
+
+ t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+- flags & GRE_KEY ?
+- *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+- p[1]);
++ key, greh->protocol);
+ if (!t)
+ return;
+
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 59eb4ed..9a87bfb 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -163,7 +163,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ * which are using proper atomic operations or spinlocks.
+ */
+ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+- struct ipv6_txoptions *opt, int tclass)
++ __u32 mark, struct ipv6_txoptions *opt, int tclass)
+ {
+ struct net *net = sock_net(sk);
+ const struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -230,7 +230,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->priority = sk->sk_priority;
+- skb->mark = sk->sk_mark;
++ skb->mark = mark;
+
+ mtu = dst_mtu(dst);
+ if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index f95437f..f6ba452 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
+
+ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ {
+- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
+- __u8 nexthdr = ipv6h->nexthdr;
+- __u16 off = sizeof(*ipv6h);
++ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
++ unsigned int nhoff = raw - skb->data;
++ unsigned int off = nhoff + sizeof(*ipv6h);
++ u8 next, nexthdr = ipv6h->nexthdr;
+
+ while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+- __u16 optlen = 0;
+ struct ipv6_opt_hdr *hdr;
+- if (raw + off + sizeof(*hdr) > skb->data &&
+- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
++ u16 optlen;
++
++ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
+ break;
+
+- hdr = (struct ipv6_opt_hdr *) (raw + off);
++ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+ if (frag_hdr->frag_off)
+@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ } else {
+ optlen = ipv6_optlen(hdr);
+ }
++ /* cache hdr->nexthdr, since pskb_may_pull() might
++ * invalidate hdr
++ */
++ next = hdr->nexthdr;
+ if (nexthdr == NEXTHDR_DEST) {
+- __u16 i = off + 2;
++ u16 i = 2;
++
++ /* Remember : hdr is no longer valid at this point. */
++ if (!pskb_may_pull(skb, off + optlen))
++ break;
++
+ while (1) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+
+ /* No more room for encapsulation limit */
+- if (i + sizeof (*tel) > off + optlen)
++ if (i + sizeof(*tel) > optlen)
+ break;
+
+- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
++ tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
+ /* return index of option if found and valid */
+ if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
+ tel->length == 1)
+- return i;
++ return i + off - nhoff;
+ /* else jump to next option */
+ if (tel->type)
+ i += tel->length + 2;
+@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ i++;
+ }
+ }
+- nexthdr = hdr->nexthdr;
++ nexthdr = next;
+ off += optlen;
+ }
+ return 0;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 14a3903..1bdc703 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
+ static void mld_ifc_timer_expire(unsigned long data);
+ static void mld_ifc_event(struct inet6_dev *idev);
+ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
+-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
++static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
+ static void mld_clear_delrec(struct inet6_dev *idev);
+ static bool mld_in_v1_mode(const struct inet6_dev *idev);
+ static int sf_setstate(struct ifmcaddr6 *pmc);
+@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
+ dev_mc_del(dev, buf);
+ }
+
+- if (mc->mca_flags & MAF_NOREPORT)
+- goto done;
+ spin_unlock_bh(&mc->mca_lock);
++ if (mc->mca_flags & MAF_NOREPORT)
++ return;
+
+ if (!mc->idev->dead)
+ igmp6_leave_group(mc);
+@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
+ spin_lock_bh(&mc->mca_lock);
+ if (del_timer(&mc->mca_timer))
+ atomic_dec(&mc->mca_refcnt);
+-done:
+- ip6_mc_clear_src(mc);
+ spin_unlock_bh(&mc->mca_lock);
+ }
+
+@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ spin_unlock_bh(&idev->mc_lock);
+ }
+
+-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
++static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ {
+ struct ifmcaddr6 *pmc, *pmc_prev;
+- struct ip6_sf_list *psf, *psf_next;
++ struct ip6_sf_list *psf;
++ struct in6_addr *pmca = &im->mca_addr;
+
+ spin_lock_bh(&idev->mc_lock);
+ pmc_prev = NULL;
+@@ -768,14 +767,21 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+ }
+ spin_unlock_bh(&idev->mc_lock);
+
++ spin_lock_bh(&im->mca_lock);
+ if (pmc) {
+- for (psf = pmc->mca_tomb; psf; psf = psf_next) {
+- psf_next = psf->sf_next;
+- kfree(psf);
++ im->idev = pmc->idev;
++ im->mca_crcount = idev->mc_qrv;
++ im->mca_sfmode = pmc->mca_sfmode;
++ if (pmc->mca_sfmode == MCAST_INCLUDE) {
++ im->mca_tomb = pmc->mca_tomb;
++ im->mca_sources = pmc->mca_sources;
++ for (psf = im->mca_sources; psf; psf = psf->sf_next)
++ psf->sf_crcount = im->mca_crcount;
+ }
+ in6_dev_put(pmc->idev);
+ kfree(pmc);
+ }
++ spin_unlock_bh(&im->mca_lock);
+ }
+
+ static void mld_clear_delrec(struct inet6_dev *idev)
+@@ -904,7 +910,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+ mca_get(mc);
+ write_unlock_bh(&idev->lock);
+
+- mld_del_delrec(idev, &mc->mca_addr);
++ mld_del_delrec(idev, mc);
+ igmp6_group_added(mc);
+ ma_put(mc);
+ return 0;
+@@ -927,6 +933,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
+ write_unlock_bh(&idev->lock);
+
+ igmp6_group_dropped(ma);
++ ip6_mc_clear_src(ma);
+
+ ma_put(ma);
+ return 0;
+@@ -2501,15 +2508,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ /* Withdraw multicast list */
+
+ read_lock_bh(&idev->lock);
+- mld_ifc_stop_timer(idev);
+- mld_gq_stop_timer(idev);
+- mld_dad_stop_timer(idev);
+
+ for (i = idev->mc_list; i; i = i->next)
+ igmp6_group_dropped(i);
+- read_unlock_bh(&idev->lock);
+
+- mld_clear_delrec(idev);
++ /* Should stop timer after group drop. or we will
++ * start timer again in mld_ifc_event()
++ */
++ mld_ifc_stop_timer(idev);
++ mld_gq_stop_timer(idev);
++ mld_dad_stop_timer(idev);
++ read_unlock_bh(&idev->lock);
+ }
+
+ static void ipv6_mc_reset(struct inet6_dev *idev)
+@@ -2531,8 +2540,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
+
+ read_lock_bh(&idev->lock);
+ ipv6_mc_reset(idev);
+- for (i = idev->mc_list; i; i = i->next)
++ for (i = idev->mc_list; i; i = i->next) {
++ mld_del_delrec(idev, i);
+ igmp6_group_added(i);
++ }
+ read_unlock_bh(&idev->lock);
+ }
+
+@@ -2565,6 +2576,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
+
+ /* Deactivate timers */
+ ipv6_mc_down(idev);
++ mld_clear_delrec(idev);
+
+ /* Delete all-nodes address. */
+ /* We cannot call ipv6_dev_mc_dec() directly, our caller in
+@@ -2579,11 +2591,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
+ write_lock_bh(&idev->lock);
+ while ((i = idev->mc_list) != NULL) {
+ idev->mc_list = i->next;
+- write_unlock_bh(&idev->lock);
+
+- igmp6_group_dropped(i);
++ write_unlock_bh(&idev->lock);
+ ma_put(i);
+-
+ write_lock_bh(&idev->lock);
+ }
+ write_unlock_bh(&idev->lock);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index b1cdf80..40d7405 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1390,6 +1390,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+ if (err) {
+ free_percpu(dev->tstats);
++ dev->tstats = NULL;
+ return err;
+ }
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index b9f1fee..6673965 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -467,7 +467,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
+ opt = ireq->ipv6_opt;
+ if (!opt)
+ opt = rcu_dereference(np->opt);
+- err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
++ err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
+ rcu_read_unlock();
+ err = net_xmit_eval(err);
+ }
+@@ -837,7 +837,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
+ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ if (!IS_ERR(dst)) {
+ skb_dst_set(buff, dst);
+- ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
++ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
+ TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ if (rst)
+ TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+@@ -987,6 +987,16 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ return 0; /* don't send reset */
+ }
+
++static void tcp_v6_restore_cb(struct sk_buff *skb)
++{
++ /* We need to move header back to the beginning if xfrm6_policy_check()
++ * and tcp_v6_fill_cb() are going to be called again.
++ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
++ */
++ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
++ sizeof(struct inet6_skb_parm));
++}
++
+ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst,
+@@ -1178,8 +1188,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ sk_gfp_mask(sk, GFP_ATOMIC));
+ consume_skb(ireq->pktopts);
+ ireq->pktopts = NULL;
+- if (newnp->pktoptions)
++ if (newnp->pktoptions) {
++ tcp_v6_restore_cb(newnp->pktoptions);
+ skb_set_owner_r(newnp->pktoptions, newsk);
++ }
+ }
+ }
+
+@@ -1194,16 +1206,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ return NULL;
+ }
+
+-static void tcp_v6_restore_cb(struct sk_buff *skb)
+-{
+- /* We need to move header back to the beginning if xfrm6_policy_check()
+- * and tcp_v6_fill_cb() are going to be called again.
+- * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+- */
+- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+- sizeof(struct inet6_skb_parm));
+-}
+-
+ /* The socket must have it's spinlock held when we get
+ * here, unless it is a TCP_LISTEN socket.
+ *
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 2599af6..181e755c 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+ const struct l2tp_nl_cmd_ops *ops);
+ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+ /* Session reference counts. Incremented when code obtains a reference
+ * to a session.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 8938b6b..c0f0750 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -11,6 +11,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <asm/ioctls.h>
+ #include <linux/icmp.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+@@ -560,6 +561,30 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
+ return err ? err : copied;
+ }
+
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
++{
++ struct sk_buff *skb;
++ int amount;
++
++ switch (cmd) {
++ case SIOCOUTQ:
++ amount = sk_wmem_alloc_get(sk);
++ break;
++ case SIOCINQ:
++ spin_lock_bh(&sk->sk_receive_queue.lock);
++ skb = skb_peek(&sk->sk_receive_queue);
++ amount = skb ? skb->len : 0;
++ spin_unlock_bh(&sk->sk_receive_queue.lock);
++ break;
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return put_user(amount, (int __user *)arg);
++}
++EXPORT_SYMBOL(l2tp_ioctl);
++
+ static struct proto l2tp_ip_prot = {
+ .name = "L2TP/IP",
+ .owner = THIS_MODULE,
+@@ -568,7 +593,7 @@ static struct proto l2tp_ip_prot = {
+ .bind = l2tp_ip_bind,
+ .connect = l2tp_ip_connect,
+ .disconnect = l2tp_ip_disconnect,
+- .ioctl = udp_ioctl,
++ .ioctl = l2tp_ioctl,
+ .destroy = l2tp_ip_destroy_sock,
+ .setsockopt = ip_setsockopt,
+ .getsockopt = ip_getsockopt,
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index aa821cb..1a65c9a 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -729,7 +729,7 @@ static struct proto l2tp_ip6_prot = {
+ .bind = l2tp_ip6_bind,
+ .connect = l2tp_ip6_connect,
+ .disconnect = l2tp_ip6_disconnect,
+- .ioctl = udp_ioctl,
++ .ioctl = l2tp_ioctl,
+ .destroy = l2tp_ip6_destroy_sock,
+ .setsockopt = ipv6_setsockopt,
+ .getsockopt = ipv6_getsockopt,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 94e4a59..458722b 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2813,7 +2813,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int offset = 0;
+ struct packet_sock *po = pkt_sk(sk);
+- int hlen, tlen;
++ int hlen, tlen, linear;
+ int extra_len = 0;
+
+ /*
+@@ -2874,8 +2874,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ err = -ENOBUFS;
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
+- __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
++ linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
++ linear = max(linear, min_t(int, len, dev->hard_header_len));
++ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto out_unlock;
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index f935429..b12bc2a 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -16,16 +16,11 @@
+ #include <net/sch_generic.h>
+ #include <net/pkt_cls.h>
+
+-struct cls_mall_filter {
++struct cls_mall_head {
+ struct tcf_exts exts;
+ struct tcf_result res;
+ u32 handle;
+- struct rcu_head rcu;
+ u32 flags;
+-};
+-
+-struct cls_mall_head {
+- struct cls_mall_filter *filter;
+ struct rcu_head rcu;
+ };
+
+@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+ {
+ struct cls_mall_head *head = rcu_dereference_bh(tp->root);
+- struct cls_mall_filter *f = head->filter;
+
+- if (tc_skip_sw(f->flags))
++ if (tc_skip_sw(head->flags))
+ return -1;
+
+- return tcf_exts_exec(skb, &f->exts, res);
++ return tcf_exts_exec(skb, &head->exts, res);
+ }
+
+ static int mall_init(struct tcf_proto *tp)
+ {
+- struct cls_mall_head *head;
+-
+- head = kzalloc(sizeof(*head), GFP_KERNEL);
+- if (!head)
+- return -ENOBUFS;
+-
+- rcu_assign_pointer(tp->root, head);
+-
+ return 0;
+ }
+
+-static void mall_destroy_filter(struct rcu_head *head)
++static void mall_destroy_rcu(struct rcu_head *rcu)
+ {
+- struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
++ struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
++ rcu);
+
+- tcf_exts_destroy(&f->exts);
+-
+- kfree(f);
++ tcf_exts_destroy(&head->exts);
++ kfree(head);
+ }
+
+ static int mall_replace_hw_filter(struct tcf_proto *tp,
+- struct cls_mall_filter *f,
++ struct cls_mall_head *head,
+ unsigned long cookie)
+ {
+ struct net_device *dev = tp->q->dev_queue->dev;
+@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
+ offload.type = TC_SETUP_MATCHALL;
+ offload.cls_mall = &mall_offload;
+ offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
+- offload.cls_mall->exts = &f->exts;
++ offload.cls_mall->exts = &head->exts;
+ offload.cls_mall->cookie = cookie;
+
+ return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
+ }
+
+ static void mall_destroy_hw_filter(struct tcf_proto *tp,
+- struct cls_mall_filter *f,
++ struct cls_mall_head *head,
+ unsigned long cookie)
+ {
+ struct net_device *dev = tp->q->dev_queue->dev;
+@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
+ {
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+ struct net_device *dev = tp->q->dev_queue->dev;
+- struct cls_mall_filter *f = head->filter;
+
+- if (!force && f)
+- return false;
++ if (!head)
++ return true;
+
+- if (f) {
+- if (tc_should_offload(dev, tp, f->flags))
+- mall_destroy_hw_filter(tp, f, (unsigned long) f);
++ if (tc_should_offload(dev, tp, head->flags))
++ mall_destroy_hw_filter(tp, head, (unsigned long) head);
+
+- call_rcu(&f->rcu, mall_destroy_filter);
+- }
+- kfree_rcu(head, rcu);
++ call_rcu(&head->rcu, mall_destroy_rcu);
+ return true;
+ }
+
+ static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
+ {
+- struct cls_mall_head *head = rtnl_dereference(tp->root);
+- struct cls_mall_filter *f = head->filter;
+-
+- if (f && f->handle == handle)
+- return (unsigned long) f;
+- return 0;
++ return 0UL;
+ }
+
+ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+ };
+
+ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+- struct cls_mall_filter *f,
++ struct cls_mall_head *head,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est, bool ovr)
+ {
+@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+ return err;
+
+ if (tb[TCA_MATCHALL_CLASSID]) {
+- f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+- tcf_bind_filter(tp, &f->res, base);
++ head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
++ tcf_bind_filter(tp, &head->res, base);
+ }
+
+- tcf_exts_change(tp, &f->exts, &e);
++ tcf_exts_change(tp, &head->exts, &e);
+
+ return 0;
+ }
+@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ unsigned long *arg, bool ovr)
+ {
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+- struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
+ struct net_device *dev = tp->q->dev_queue->dev;
+- struct cls_mall_filter *f;
+ struct nlattr *tb[TCA_MATCHALL_MAX + 1];
++ struct cls_mall_head *new;
+ u32 flags = 0;
+ int err;
+
+ if (!tca[TCA_OPTIONS])
+ return -EINVAL;
+
+- if (head->filter)
+- return -EBUSY;
+-
+- if (fold)
+- return -EINVAL;
++ if (head)
++ return -EEXIST;
+
+ err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
+ tca[TCA_OPTIONS], mall_policy);
+@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ return -EINVAL;
+ }
+
+- f = kzalloc(sizeof(*f), GFP_KERNEL);
+- if (!f)
++ new = kzalloc(sizeof(*new), GFP_KERNEL);
++ if (!new)
+ return -ENOBUFS;
+
+- tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
++ tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
+
+ if (!handle)
+ handle = 1;
+- f->handle = handle;
+- f->flags = flags;
++ new->handle = handle;
++ new->flags = flags;
+
+- err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
++ err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
+ if (err)
+ goto errout;
+
+ if (tc_should_offload(dev, tp, flags)) {
+- err = mall_replace_hw_filter(tp, f, (unsigned long) f);
++ err = mall_replace_hw_filter(tp, new, (unsigned long) new);
+ if (err) {
+ if (tc_skip_sw(flags))
+ goto errout;
+@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ }
+ }
+
+- *arg = (unsigned long) f;
+- rcu_assign_pointer(head->filter, f);
+-
++ *arg = (unsigned long) head;
++ rcu_assign_pointer(tp->root, new);
++ if (head)
++ call_rcu(&head->rcu, mall_destroy_rcu);
+ return 0;
+
+ errout:
+- kfree(f);
++ kfree(new);
+ return err;
+ }
+
+ static int mall_delete(struct tcf_proto *tp, unsigned long arg)
+ {
+- struct cls_mall_head *head = rtnl_dereference(tp->root);
+- struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
+- struct net_device *dev = tp->q->dev_queue->dev;
+-
+- if (tc_should_offload(dev, tp, f->flags))
+- mall_destroy_hw_filter(tp, f, (unsigned long) f);
+-
+- RCU_INIT_POINTER(head->filter, NULL);
+- tcf_unbind_filter(tp, &f->res);
+- call_rcu(&f->rcu, mall_destroy_filter);
+- return 0;
++ return -EOPNOTSUPP;
+ }
+
+ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+ {
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+- struct cls_mall_filter *f = head->filter;
+
+ if (arg->count < arg->skip)
+ goto skip;
+- if (arg->fn(tp, (unsigned long) f, arg) < 0)
++ if (arg->fn(tp, (unsigned long) head, arg) < 0)
+ arg->stop = 1;
+ skip:
+ arg->count++;
+@@ -255,28 +218,28 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+ static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *t)
+ {
+- struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
++ struct cls_mall_head *head = (struct cls_mall_head *) fh;
+ struct nlattr *nest;
+
+- if (!f)
++ if (!head)
+ return skb->len;
+
+- t->tcm_handle = f->handle;
++ t->tcm_handle = head->handle;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+- if (f->res.classid &&
+- nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
++ if (head->res.classid &&
++ nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
+ goto nla_put_failure;
+
+- if (tcf_exts_dump(skb, &f->exts))
++ if (tcf_exts_dump(skb, &head->exts))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+
+- if (tcf_exts_dump_stats(skb, &f->exts) < 0)
++ if (tcf_exts_dump_stats(skb, &head->exts) < 0)
+ goto nla_put_failure;
+
+ return skb->len;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 176af30..6a2532d 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
+ SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+ rcu_read_lock();
+- res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
++ res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
++ np->tclass);
+ rcu_read_unlock();
+ return res;
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ca12aa3..6cbe5bd 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7427,7 +7427,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ */
+ release_sock(sk);
+ current_timeo = schedule_timeout(current_timeo);
+- BUG_ON(sk != asoc->base.sk);
++ if (sk != asoc->base.sk)
++ goto do_error;
+ lock_sock(sk);
+
+ *timeo_p = current_timeo;