summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-10-18 23:00:24 +0200
committerDavid S. Miller <davem@davemloft.net>2011-10-19 09:10:46 +0200
commit9e903e085262ffbf1fc44a17ac06058aca03524a (patch)
tree4acefc97ba38c1733474d25c0b2053b56af97db1 /drivers/net
parentxfrm6: Don't call icmpv6_send on local error (diff)
downloadlinux-9e903e085262ffbf1fc44a17ac06058aca03524a.tar.xz
linux-9e903e085262ffbf1fc44a17ac06058aca03524a.zip
net: add skb frag size accessors
To ease skb->truesize sanitization, its better to be able to localize all references to skb frags size. Define accessors : skb_frag_size() to fetch frag size, and skb_frag_size_{set|add|sub}() to manipulate it. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/alteon/acenic.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c26
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c14
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c18
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c4
63 files changed, 251 insertions, 248 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 9ca45dcba755..b42c06baba89 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2182,12 +2182,12 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(pci_map_single(
VORTEX_PCI(vp),
(void *)skb_frag_address(frag),
- frag->size, PCI_DMA_TODEVICE));
+ skb_frag_size(frag), PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
else
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
}
}
#else
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 11f8858c786d..20ea07508ac7 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -810,15 +810,15 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
- for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
txd = (struct tx_desc *) (txRing->ringBase +
txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
- len = frag->size;
+ len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d6b015598569..6d9f6911000f 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1256,12 +1256,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
} else {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
- status |= this_frag->size;
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
+ status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev,
skb_frag_address(this_frag),
- this_frag->size,
+ skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
@@ -1378,7 +1378,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
np->dirty_tx++;
entry++;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 6715bf54f04e..442fefa4f2ca 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -198,7 +198,7 @@ static void greth_clean_rings(struct greth_private *greth)
dma_unmap_page(greth->dev,
greth_read_bd(&tx_bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
@@ -517,7 +517,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
- status |= frag->size & GRETH_BD_LEN;
+ status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
@@ -531,7 +531,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth_write_bd(&bdp->stat, status);
- dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
@@ -713,7 +713,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b1a4e8204437..f872748ab4e6 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2478,18 +2478,18 @@ restart:
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- len += frag->size;
+ len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- flagsize = (frag->size << 16);
+ flagsize = skb_frag_size(frag) << 16;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2508,7 +2508,7 @@ restart:
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
- dma_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_len_set(info, maplen, skb_frag_size(frag));
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 12a0b30319db..02c7ed8d9eca 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2179,7 +2179,7 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
- buffer_info->length = frag->size;
+ buffer_info->length = skb_frag_size(frag);
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, 0,
buffer_info->length,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 97c45a4b855a..95483bcac1d0 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1593,7 +1593,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 proto_hdr_len = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- fg_size = skb_shinfo(skb)->frags[i].size;
+ fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
@@ -1744,12 +1744,12 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i;
u16 seg_num;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
for (i = 0; i < seg_num; i++) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7381a49fefb4..0405261efb5c 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2267,11 +2267,11 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i, nseg;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
@@ -2356,7 +2356,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
int count = 1;
int ret_val;
struct tx_packet_desc *ptpd;
- u16 frag_size;
u16 vlan_tag;
unsigned int nr_frags = 0;
unsigned int mss = 0;
@@ -2372,10 +2371,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
- frag_size = skb_shinfo(skb)->frags[f].size;
- if (frag_size)
- count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
- ATL1_MAX_TX_BUF_LEN;
+ unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
+ count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
}
mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6ff7636e73a2..965c7235804d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2871,7 +2871,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -3049,7 +3049,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
} else {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[i - 1];
- frag->size -= tail;
+ skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
return 0;
@@ -5395,7 +5395,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[k].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[k]),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(skb);
@@ -6530,13 +6530,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->is_gso = skb_is_gso(skb);
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
txbd = &txr->tx_desc_ring[ring_prod];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping))
@@ -6594,7 +6594,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e575e89c7d46..dd8ee56396b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2363,7 +2363,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Calculate the first sum - it's special */
for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
wnd_sum +=
- skb_shinfo(skb)->frags[frag_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
/* If there was data on linear skb data - check it */
if (first_bd_sz > 0) {
@@ -2379,14 +2379,14 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
check all windows */
for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
wnd_sum +=
- skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
if (unlikely(wnd_sum < lso_mss)) {
to_copy = 1;
break;
}
wnd_sum -=
- skb_shinfo(skb)->frags[wnd_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
}
} else {
/* in non-LSO too fragmented packet should always
@@ -2796,8 +2796,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
- DMA_TO_DEVICE);
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
@@ -2821,8 +2821,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- tx_data_bd->nbytes = cpu_to_le16(frag->size);
- le16_add_cpu(&pkt_size, frag->size);
+ tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
+ le16_add_cpu(&pkt_size, skb_frag_size(frag));
nbd++;
DP(NETIF_MSG_TX_QUEUED,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fe712f955110..b89027c61937 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5356,7 +5356,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pci_unmap_page(tp->pdev,
dma_unmap_addr(ri, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
while (ri->fragmented) {
@@ -6510,14 +6510,14 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
}
for (i = 0; i < last; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
@@ -6777,7 +6777,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 2f4ced66612a..5d7872ecff52 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -116,7 +116,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
for (j = 0; j < frag; j++) {
dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
- skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+ skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
dma_unmap_addr_set(&array[index], dma_addr, 0);
BNA_QE_INDX_ADD(index, 1, depth);
}
@@ -2741,8 +2741,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis_used = 1;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = frag->size;
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u16 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
unmap_prod = unmap_q->producer_index;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 0a511c4a0472..f9b602300040 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1135,8 +1135,8 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
len -= SGE_TX_DESC_MAX_PLEN;
}
for (i = 0; nfrags--; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1278,9 +1278,9 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
desc_mapping = mapping;
- desc_len = frag->size;
+ desc_len = skb_frag_size(frag);
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
&desc_mapping, &desc_len,
@@ -1290,7 +1290,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
nfrags == 0);
ce->skb = NULL;
dma_unmap_addr_set(ce, dma_addr, mapping);
- dma_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 2f46b37e5d16..cfb60e1f51da 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -254,7 +254,7 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
while (frag_idx < nfrags && curflit < WR_FLITS) {
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
- skb_shinfo(skb)->frags[frag_idx].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
PCI_DMA_TODEVICE);
j ^= 1;
if (j == 0) {
@@ -977,11 +977,11 @@ static inline unsigned int make_sgl(const struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
- sgp->len[j] = cpu_to_be32(frag->size);
+ sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
@@ -1544,7 +1544,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++)
- pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -2118,7 +2118,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
rx_frag->page_offset = sd->pg_chunk.offset + offset;
- rx_frag->size = len;
+ skb_frag_size_set(rx_frag, len);
skb->len += len;
skb->data_len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 56adf448b9fe..14f31d3a18d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset,
+ skb_frag_size(fp), DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
@@ -224,7 +224,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
unwind:
while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
@@ -243,7 +243,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++)
- dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
/**
@@ -717,7 +717,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++;
} else {
- sgl->len0 = htonl(si->frags[0].size);
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]);
}
@@ -732,13 +732,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]);
}
if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
}
@@ -1417,7 +1417,7 @@ static inline void copy_frags(struct skb_shared_info *ssi,
/* usually there's just one frag */
ssi->frags[0].page = gl->frags[0].page;
ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
- ssi->frags[0].size = gl->frags[0].size - offset;
+ skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset);
ssi->nr_frags = gl->nfrags;
n = gl->nfrags - 1;
if (n)
@@ -1718,8 +1718,8 @@ static int process_responses(struct sge_rspq *q, int budget)
bufsz = get_buf_size(rsd);
fp->page = rsd->page;
fp->page_offset = q->offset;
- fp->size = min(bufsz, len);
- len -= fp->size;
+ skb_frag_size_set(fp, min(bufsz, len));
+ len -= skb_frag_size(fp);
if (!len)
break;
unmap_rx_buf(q->adap, &rxq->fl);
@@ -1731,7 +1731,7 @@ static int process_responses(struct sge_rspq *q, int budget)
*/
dma_sync_single_for_cpu(q->adap->pdev_dev,
get_buf_addr(rsd),
- fp->size, DMA_FROM_DEVICE);
+ skb_frag_size(fp), DMA_FROM_DEVICE);
si.va = page_address(si.frags[0].page) +
si.frags[0].page_offset;
@@ -1740,7 +1740,7 @@ static int process_responses(struct sge_rspq *q, int budget)
si.nfrags = frags + 1;
ret = q->handler(q, q->cur_desc, &si);
if (likely(ret == 0))
- q->offset += ALIGN(fp->size, FL_ALIGN);
+ q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
else
restore_rx_bufs(&si, &rxq->fl, frags);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index cffb328c46c3..c2d456d90c00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset,
+ skb_frag_size(fp), DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
@@ -305,7 +305,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
unwind:
while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
@@ -899,7 +899,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++;
} else {
- sgl->len0 = htonl(si->frags[0].size);
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]);
}
@@ -915,13 +915,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]);
}
if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
}
@@ -1399,7 +1399,7 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
ssi = skb_shinfo(skb);
ssi->frags[0].page = gl->frags[0].page;
ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
- ssi->frags[0].size = gl->frags[0].size - pull_len;
+ skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len);
if (gl->nfrags > 1)
memcpy(&ssi->frags[1], &gl->frags[1],
(gl->nfrags-1) * sizeof(skb_frag_t));
@@ -1451,7 +1451,7 @@ static inline void copy_frags(struct skb_shared_info *si,
/* usually there's just one frag */
si->frags[0].page = gl->frags[0].page;
si->frags[0].page_offset = gl->frags[0].page_offset + offset;
- si->frags[0].size = gl->frags[0].size - offset;
+ skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset);
si->nr_frags = gl->nfrags;
n = gl->nfrags - 1;
@@ -1702,8 +1702,8 @@ int process_responses(struct sge_rspq *rspq, int budget)
bufsz = get_buf_size(sdesc);
fp->page = sdesc->page;
fp->page_offset = rspq->offset;
- fp->size = min(bufsz, len);
- len -= fp->size;
+ skb_frag_size_set(fp, min(bufsz, len));
+ len -= skb_frag_size(fp);
if (!len)
break;
unmap_rx_buf(rspq->adapter, &rxq->fl);
@@ -1717,7 +1717,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
*/
dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
get_buf_addr(sdesc),
- fp->size, DMA_FROM_DEVICE);
+ skb_frag_size(fp), DMA_FROM_DEVICE);
gl.va = (page_address(gl.frags[0].page) +
gl.frags[0].page_offset);
prefetch(gl.va);
@@ -1728,7 +1728,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
*/
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
if (likely(ret == 0))
- rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 1bc908f595de..c3786fda11db 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -599,16 +599,16 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
unsigned int len_left, int loopback)
{
- skb_frag_t *frag;
+ const skb_frag_t *frag;
/* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
+ len_left -= skb_frag_size(frag);
enic_queue_wq_desc_cont(wq, skb,
skb_frag_dma_map(&enic->pdev->dev,
- frag, 0, frag->size,
+ frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE),
- frag->size,
+ skb_frag_size(frag),
(len_left == 0), /* EOP? */
loopback);
}
@@ -717,8 +717,8 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
* for additional data fragments
*/
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
- frag_len_left = frag->size;
+ len_left -= skb_frag_size(frag);
+ frag_len_left = skb_frag_size(frag);
offset = 0;
while (frag_len_left) {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 679b8041e43a..706fc5989939 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -636,17 +636,17 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag =
+ const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, frag->size);
+ wrb_fill(wrb, busaddr, skb_frag_size(frag));
be_dws_cpu_to_le(wrb, sizeof(*wrb));
queue_head_inc(txq);
- copied += frag->size;
+ copied += skb_frag_size(frag);
}
if (dummy_wrb) {
@@ -1069,7 +1069,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1095,13 +1095,13 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
@@ -1176,11 +1176,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
index_inc(&rxcp->rxq_idx, rxq->len);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index adb462d0b8d3..0d4d4f68d4ed 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1676,7 +1676,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
/* copy sg1entry data */
sg1entry->l_key = lkey;
- sg1entry->len = frag->size;
+ sg1entry->len = skb_frag_size(frag);
sg1entry->vaddr =
ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
@@ -1689,7 +1689,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sgentry = &sg_list[i - sg1entry_contains_frag_data];
sgentry->l_key = lkey;
- sgentry->len = frag->size;
+ sgentry->len = frag_size(frag);
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3a033d9de5..ed79b2d3ad3e 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1453,7 +1453,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
goto undo_frame;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4da972eaabb4..b1cd41b9c61c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1014,15 +1014,15 @@ retry_bounce:
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed_frags;
- descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
descs[i+1].fields.address = dma_addr;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7b54d7246150..cf480b554622 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2894,10 +2894,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
@@ -3183,7 +3183,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->pcix_82544)
count += nr_frags;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 035ce73c388e..680312710a78 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4673,10 +4673,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
@@ -4943,7 +4943,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->hw.mac.tx_pkt_filtering)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 837adbbce772..f9b818267de8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4268,7 +4268,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
i = 0;
}
- size = frag->size;
+ size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 23cc40f22d6f..1bd9abddcc59 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2045,7 +2045,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
count++;
i++;
@@ -2053,7 +2053,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
i = 0;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 88558b1aac07..e21148f8b160 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1383,10 +1383,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8075d11b4cde..09b8e88b2999 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6545,9 +6545,9 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
frag = &skb_shinfo(skb)->frags[f];
#ifdef IXGBE_FCOE
- size = min_t(unsigned int, data_len, frag->size);
+ size = min_t(unsigned int, data_len, skb_frag_size(frag));
#else
- size = frag->size;
+ size = skb_frag_size(frag);
#endif
data_len -= size;
f++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4930c4605493..5e92cc2079bd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2912,10 +2912,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)frag->size, total);
+ len = min((unsigned int)skb_frag_size(frag), total);
offset = 0;
while (len) {
@@ -3096,7 +3096,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += TXD_USE_COUNT(skb_headlen(skb));
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 48a0a23f342f..7a0c746f2749 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1920,7 +1920,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u32 len;
for (i = 0 ; i < nr_frags ; ++i) {
@@ -1930,7 +1930,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
- frag->page_offset, frag->size, hidma);
+ frag->page_offset, skb_frag_size(frag), hidma);
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f6821aa5ffbf..194a03113802 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -713,8 +713,9 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
int frag;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (fragp->size <= 8 && fragp->page_offset & 7)
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
return 1;
}
@@ -751,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
}
desc->l4i_chk = 0;
- desc->byte_cnt = this_frag->size;
+ desc->byte_cnt = skb_frag_size(this_frag);
desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
this_frag, 0,
- this_frag->size,
+ skb_frag_size(this_frag),
DMA_TO_DEVICE);
}
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 297730359b79..c7b60839ac99 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2770,10 +2770,10 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
control |= BMU_STFWD;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
e = e->next;
e->skb = skb;
@@ -2783,9 +2783,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
dma_unmap_addr_set(e, mapaddr, map);
- dma_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_len_set(e, maplen, skb_frag_size(frag));
- tf->control = BMU_OWN | BMU_SW | control | frag->size;
+ tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
}
tf->control |= BMU_EOF | BMU_IRQ_EOF;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 92634907bf8d..7b083c438a14 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1225,10 +1225,10 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
@@ -1239,7 +1239,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
map_page_error:
while (--i >= 0) {
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1263,7 +1263,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1936,7 +1936,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&hw->pdev->dev, mapping))
goto mapping_unwind;
@@ -1952,11 +1952,11 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
dma_unmap_addr_set(re, mapaddr, mapping);
- dma_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_len_set(re, maplen, skb_frag_size(frag));
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
- le->length = cpu_to_le16(frag->size);
+ le->length = cpu_to_le16(skb_frag_size(frag));
le->ctrl = ctrl;
le->opcode = OP_BUFFER | HW_OWNER;
}
@@ -2484,7 +2484,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
} else {
size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size;
+ skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += PAGE_SIZE;
skb->len += size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 37cc9e5c56be..46a0df9afc3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -135,7 +135,7 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frags[i].size = priv->frag_info[i].frag_size;
+ skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -194,7 +194,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
dma = be64_to_cpu(rx_desc->data[nr].addr);
en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]),
PCI_DMA_FROMDEVICE);
put_page(skb_frags[nr].page);
}
@@ -421,7 +421,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
/* Save page reference in skb */
skb_frags_rx[nr].page = skb_frags[nr].page;
- skb_frags_rx[nr].size = skb_frags[nr].size;
+ skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr]));
skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
dma = be64_to_cpu(rx_desc->data[nr].addr);
@@ -430,13 +430,13 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
- skb_frags_rx[nr - 1].size = length -
- priv->frag_info[nr - 1].frag_prefix_size;
+ skb_frag_size_set(&skb_frags_rx[nr - 1],
+ length - priv->frag_info[nr - 1].frag_prefix_size);
return nr;
fail:
@@ -506,7 +506,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
- skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6e03de034ac7..2a192c2f207d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -226,7 +226,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data[i].addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
/* Stamp the freed descriptor */
@@ -256,7 +256,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
++data;
}
}
@@ -550,7 +550,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
- skb_shinfo(skb)->frags[0].size);
+ skb_frag_size(&skb_shinfo(skb)->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
@@ -570,7 +570,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
- fragptr, skb_shinfo(skb)->frags[0].size);
+ fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
}
wmb();
@@ -757,11 +757,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
- data->byte_count = cpu_to_be32(frag->size);
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 710c4aead146..7ece990381c8 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4700,7 +4700,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
++hw->tx_int_cnt;
dma_buf = DMA_BUFFER(desc);
- dma_buf->len = this_frag->size;
+ dma_buf->len = skb_frag_size(this_frag);
dma_buf->dma = pci_map_single(
hw_priv->pdev,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 26637279cd67..c970a48436dc 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1216,7 +1216,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
skb_frags = skb_shinfo(skb)->frags;
while (len > 0) {
memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
- len -= rx_frags->size;
+ len -= skb_frag_size(rx_frags);
skb_frags++;
rx_frags++;
skb_shinfo(skb)->nr_frags++;
@@ -1228,7 +1228,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
* manually */
skb_copy_to_linear_data(skb, va, hlen);
skb_shinfo(skb)->frags[0].page_offset += hlen;
- skb_shinfo(skb)->frags[0].size -= hlen;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
skb->data_len -= hlen;
skb->tail += hlen;
skb_pull(skb, MXGEFW_PAD);
@@ -1345,9 +1345,9 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
__skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
rx_frags[i].page_offset = rx->info[idx].page_offset;
if (remainder < MYRI10GE_ALLOC_SIZE)
- rx_frags[i].size = remainder;
+ skb_frag_size_set(&rx_frags[i], remainder);
else
- rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
+ skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
@@ -1355,7 +1355,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
/* opaque, will come back in get_frag_header */
@@ -1382,7 +1382,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
/* Attach the pages to the skb, and trim off any padding */
myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
- if (skb_shinfo(skb)->frags[0].size <= 0) {
+ if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) {
skb_frag_unref(skb, 0);
skb_shinfo(skb)->nr_frags = 0;
}
@@ -2926,7 +2926,7 @@ again:
idx = (count + tx->req) & tx->mask;
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
- len = frag->size;
+ len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 73616b911327..2b8f64ddfb55 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1161,11 +1161,11 @@ again:
break;
buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
- len = frag->size;
+ len = skb_frag_size(frag);
frag++;
nr_frags--;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bdd3e6a330cd..c27fb3dda9f4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2350,12 +2350,12 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
if (frg_cnt) {
txds++;
for (j = 0; j < frg_cnt; j++, txds++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
if (!txds->Buffer_Pointer)
break;
pci_unmap_page(nic->pdev,
(dma_addr_t)txds->Buffer_Pointer,
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
@@ -4185,16 +4185,16 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
frg_cnt = skb_shinfo(skb)->nr_frags;
/* For fragmented SKB. */
for (i = 0; i < frg_cnt; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* A '0' length fragment will be ignored */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
txdp++;
txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a66f8fc0401e..671e166b5af1 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -585,7 +585,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev,
txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -920,11 +920,11 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
frag = &skb_shinfo(skb)->frags[0];
for (i = 0; i < frg_cnt; i++) {
/* ignore 0 length fragment */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
@@ -936,7 +936,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
txdl_priv->dma_buffers[j] = dma_pointer;
vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- frag->size);
+ skb_frag_size(frag));
frag += 1;
}
@@ -979,7 +979,7 @@ _exit1:
for (; j < i; j++) {
pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -1050,7 +1050,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index d7763ab841d8..1e37eb98c4e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2099,8 +2099,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2138,8 +2140,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the fragments */
for (i = 0; i < fragments; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
@@ -2211,8 +2213,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2253,7 +2257,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the fragments */
for (i = 0; i < fragments; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c6f005684677..49b549ff2c78 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -300,9 +300,9 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
for (f = 0; f < nfrags; f++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
+ pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE);
}
dev_kfree_skb_irq(skb);
@@ -1506,8 +1506,8 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
- map_size[i+1] = frag->size;
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ map_size[i+1] = skb_frag_size(frag);
if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
nfrags = i;
goto out_err_nolock;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index e2ba78be1c2a..8cf3173ba488 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1905,13 +1905,13 @@ netxen_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -1962,7 +1962,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
frag = &skb_shinfo(skb)->frags[i];
- delta += frag->size;
+ delta += skb_frag_size(frag);
}
if (!__pskb_pull_tail(skb, delta))
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 46f9b6499f9b..a4bdff438a5e 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2388,7 +2388,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
seg++;
}
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
@@ -2401,9 +2401,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
- oal_entry->len = cpu_to_le32(frag->size);
+ oal_entry->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
}
/* Terminate the last segment. */
oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index eac19e7d2761..106503f118f6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2135,13 +2135,13 @@ qlcnic_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -2221,7 +2221,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
- delta += skb_shinfo(skb)->frags[i].size;
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f2d9bb78ec7f..c92afcd912e2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1431,7 +1431,7 @@ static int ql_map_send(struct ql_adapter *qdev,
map_idx++;
}
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
@@ -1443,10 +1443,10 @@ static int ql_map_send(struct ql_adapter *qdev,
}
tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(frag->size);
+ tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- frag->size);
+ skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5dcd5be03f31..ee5da9293ce0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -777,12 +777,12 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
u32 ctrl;
dma_addr_t mapping;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2ce60709a455..aa39e771175c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5413,7 +5413,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
- skb_frag_t *frag = info->frags + cur_frag;
+ const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
u32 status, len;
void *addr;
@@ -5421,7 +5421,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = (entry + 1) % NUM_TX_DESC;
txd = tp->TxDescArray + entry;
- len = frag->size;
+ len = skb_frag_size(frag);
addr = skb_frag_address(frag);
mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 91a6b7123539..adbda182f159 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -481,7 +481,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
skb_frag_set_page(skb, 0, page);
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(efx, rx_buf);
- skb_shinfo(skb)->frags[0].size = rx_buf->len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
skb_shinfo(skb)->nr_frags = 1;
skb->len = rx_buf->len;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3964a62dde8b..df88c5430f95 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -238,7 +238,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (i >= skb_shinfo(skb)->nr_frags)
break;
fragment = &skb_shinfo(skb)->frags[i];
- len = fragment->size;
+ len = skb_frag_size(fragment);
i++;
/* Map for DMA */
unmap_single = false;
@@ -926,11 +926,11 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = false;
- st->unmap_len = frag->size;
- st->in_len = frag->size;
+ st->unmap_len = skb_frag_size(frag);
+ st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ee6b6b0198..87a6b2e59e04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1106,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index d9460d81a137..fd40988c19a6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2051,7 +2051,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = off;
- frag->size = hlen - swivel;
+ skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
@@ -2075,7 +2075,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = 0;
- frag->size = hlen;
+ skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2826,9 +2826,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
entry = TX_DESC_NEXT(ring, entry);
for (frag = 0; frag < nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- len = fragp->size;
+ len = skb_frag_size(fragp);
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 23740e848ac9..73c708107a37 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3594,7 +3594,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
tb = &rp->tx_buffs[idx];
BUG_ON(tb->skb != NULL);
np->ops->unmap_page(np->device, tb->mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
idx = NEXT_TX(rp, idx);
}
@@ -6727,9 +6727,9 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
frag->page_offset, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 6b62a73227c2..ceab215bb4a3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1065,12 +1065,12 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
u64 this_ctrl;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
0, len, DMA_TO_DEVICE);
this_ctrl = ctrl;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 869d47be54b4..c517dac02ae1 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2305,10 +2305,10 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
this_txflags = tx_flags;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index c77e3bf4750a..3a90af6d111c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1493,12 +1493,12 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[i];
- db->wptr->len = frag->size;
+ db->wptr->len = skb_frag_size(frag);
db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
pbl++;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 1e2af96fc29c..78e3fb226cce 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1713,7 +1713,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
frags[n].cpa_lo = cpa;
frags[n].cpa_hi = cpa >> 32;
- frags[n].length = f->size;
+ frags[n].length = skb_frag_size(f);
frags[n].hash_for_home = hash_for_home;
n++;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a03996cf88ed..a8df7eca0956 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -709,13 +709,13 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- data->txring[tx].len = frag->size;
+ data->txring[tx].len = skb_frag_size(frag);
}
if (i == frags - 1)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index b47bce1a2e2a..4535d7cc848e 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2554,16 +2554,16 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* Handle fragments */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
}
tdinfo->nskb_dma = i + 1;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 66e3c36c3733..85ba4d9ac170 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -716,8 +716,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
skb_frag_address(frag),
- frag->size, DMA_TO_DEVICE);
- cur_p->len = frag->size;
+ frag_size(frag), DMA_TO_DEVICE);
+ cur_p->len = frag_size(frag);
cur_p->app0 = 0;
frag++;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8225f3b31d1..0d4841bed0f9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -147,14 +147,14 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
skb_frag_t *f;
f = &skb_shinfo(skb)->frags[i];
- f->size = min((unsigned)PAGE_SIZE - offset, *len);
+ skb_frag_size_set(f, min((unsigned)PAGE_SIZE - offset, *len));
f->page_offset = offset;
__skb_frag_set_page(f, page);
- skb->data_len += f->size;
- skb->len += f->size;
+ skb->data_len += skb_frag_size(f);
+ skb->len += skb_frag_size(f);
skb_shinfo(skb)->nr_frags++;
- *len -= f->size;
+ *len -= skb_frag_size(f);
}
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 902f284fd054..b771ebac0f01 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -656,8 +656,8 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
__skb_frag_set_page(frag, rbi->page);
frag->page_offset = 0;
- frag->size = rcd->len;
- skb->data_len += frag->size;
+ skb_frag_size_set(frag, rcd->len);
+ skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
@@ -745,21 +745,21 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
- tbi->len = frag->size;
+ tbi->len = skb_frag_size(frag);
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
- gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
+ gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 8d70b44fcd8a..d5508957200e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -334,7 +334,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_shinfo(skb)->frags[i].size;
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long bytes;
while (size > 0) {
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
@@ -526,7 +526,7 @@ static int netbk_gop_skb(struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6e5d4c09e5d7..226faab23603 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -467,7 +467,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
- tx->size = frag->size;
+ tx->size = skb_frag_size(frag);
tx->flags = 0;
}
@@ -965,7 +965,7 @@ err:
if (rx->status > len) {
skb_shinfo(skb)->frags[0].page_offset =
rx->offset + len;
- skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
skb->data_len = rx->status - len;
} else {
__skb_fill_page_desc(skb, 0, NULL, 0, 0);