1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
diff --git a/modules/linux/vmxnet/vmxnet.c b/modules/linux/vmxnet/vmxnet.c
index a6f5740..3c75bb2 100644
--- a/modules/linux/vmxnet/vmxnet.c
+++ b/modules/linux/vmxnet/vmxnet.c
@@ -989,7 +989,7 @@ vmxnet_probe_device(struct pci_dev *pdev, // IN: vmxnet PCI device
.ndo_start_xmit = &vmxnet_start_tx,
.ndo_stop = &vmxnet_close,
.ndo_get_stats = &vmxnet_get_stats,
- .ndo_set_multicast_list = &vmxnet_set_multicast_list,
+ .ndo_set_rx_mode = &vmxnet_set_multicast_list,
.ndo_change_mtu = &vmxnet_change_mtu,
# ifdef VMW_HAVE_POLL_CONTROLLER
.ndo_poll_controller = vmxnet_netpoll,
@@ -2033,21 +2033,23 @@ vmxnet_map_pkt(struct sk_buff *skb,
offset -= skb_headlen(skb);
for ( ; nextFrag < skb_shinfo(skb)->nr_frags; nextFrag++){
+ int fragSize;
frag = &skb_shinfo(skb)->frags[nextFrag];
+ fragSize = skb_frag_size(frag);
// skip those frags that are completely copied
- if (offset >= frag->size){
- offset -= frag->size;
+ if (offset >= fragSize){
+ offset -= fragSize;
} else {
// map the part of the frag that is not copied
dma = pci_map_page(lp->pdev,
- frag->page,
+ frag->page.p,
frag->page_offset + offset,
- frag->size - offset,
+ fragSize - offset,
PCI_DMA_TODEVICE);
- VMXNET_FILL_SG(xre->sg.sg[nextSg], dma, frag->size - offset);
+ VMXNET_FILL_SG(xre->sg.sg[nextSg], dma, fragSize - offset);
VMXNET_LOG("vmxnet_map_tx: txRing[%u].sg[%d] -> frag[%d]+%u (%uB)\n",
- dd->txDriverNext, nextSg, nextFrag, offset, frag->size - offset);
+ dd->txDriverNext, nextSg, nextFrag, offset, fragSize - offset);
nextSg++;
nextFrag++;
@@ -2058,11 +2060,14 @@ vmxnet_map_pkt(struct sk_buff *skb,
// map the remaining frags, we might need to use additional tx entries
for ( ; nextFrag < skb_shinfo(skb)->nr_frags; nextFrag++) {
+ int fragSize;
frag = &skb_shinfo(skb)->frags[nextFrag];
+ fragSize = skb_frag_size(frag);
+
dma = pci_map_page(lp->pdev,
- frag->page,
+ frag->page.p,
frag->page_offset,
- frag->size,
+ fragSize,
PCI_DMA_TODEVICE);
if (nextSg == VMXNET2_SG_DEFAULT_LENGTH) {
@@ -2091,9 +2096,9 @@ vmxnet_map_pkt(struct sk_buff *skb,
nextSg = 0;
}
- VMXNET_FILL_SG(xre->sg.sg[nextSg], dma, frag->size);
+ VMXNET_FILL_SG(xre->sg.sg[nextSg], dma, fragSize);
VMXNET_LOG("vmxnet_map_tx: txRing[%u].sg[%d] -> frag[%d] (%uB)\n",
- dd->txDriverNext, nextSg, nextFrag, frag->size);
+ dd->txDriverNext, nextSg, nextFrag, fragSize);
nextSg++;
}
@@ -2548,7 +2553,7 @@ vmxnet_rx_frags(Vmxnet_Private *lp, struct sk_buff *skb)
if (UNLIKELY(newPage == NULL)) {
skb_shinfo(skb)->nr_frags = numFrags;
skb->len += skb->data_len;
- skb->truesize += skb->data_len;
+ skb->truesize += PAGE_SIZE;
compat_dev_kfree_skb(skb, FREE_WRITE);
@@ -2558,10 +2563,12 @@ vmxnet_rx_frags(Vmxnet_Private *lp, struct sk_buff *skb)
}
pci_unmap_page(pdev, rre2->paddr, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- skb_shinfo(skb)->frags[numFrags].page = lp->rxPages[dd->rxDriverNext2];
+ __skb_frag_set_page(&skb_shinfo(skb)->frags[numFrags],
+ lp->rxPages[dd->rxDriverNext2]);
skb_shinfo(skb)->frags[numFrags].page_offset = 0;
skb_shinfo(skb)->frags[numFrags].size = rre2->actualLength;
skb->data_len += rre2->actualLength;
+ skb->truesize += PAGE_SIZE;
numFrags++;
/* refill the buffer */
@@ -2579,7 +2586,7 @@ vmxnet_rx_frags(Vmxnet_Private *lp, struct sk_buff *skb)
VMXNET_ASSERT(numFrags > 0);
skb_shinfo(skb)->nr_frags = numFrags;
skb->len += skb->data_len;
- skb->truesize += skb->data_len;
+ skb->truesize += PAGE_SIZE;
VMXNET_LOG("vmxnet_rx: %dB from rxRing[%d](%dB)+rxRing2[%d, %d)(%dB)\n",
skb->len, dd->rxDriverNext, skb_headlen(skb),
firstFrag, dd->rxDriverNext2, skb->data_len);
|