Commit 5b057c6b authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller
Browse files

[NET]: Avoid allocating skb in skb_pad


First of all it is unnecessary to allocate a new skb in skb_pad since
the existing one is not shared.  More importantly, our hard_start_xmit
interface does not allow a new skb to be allocated since that breaks
requeueing.

This patch uses pskb_expand_head to expand the existing skb and linearize
it if needed.  Actually, someone should sift through every instance of
skb_pad on a non-linear skb as they do not fit the reasons why this was
originally created.

Incidentally, this fixes a minor bug when the skb is cloned (tcpdump,
TCP, etc.).  As it is skb_pad will simply write over a cloned skb.  Because
of the position of the write it is unlikely to cause problems but still
it's best if we don't do it.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5fa21d82
......@@ -1031,8 +1031,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
return 1;
}
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL) {
if (skb_padto(skb, ETH_ZLEN)) {
netif_wake_queue(dev);
return 0;
}
......
......@@ -1070,8 +1070,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, (unsigned int)skb->data));
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -573,8 +573,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN) {
len = ETH_ZLEN;
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
}
......
......@@ -607,8 +607,7 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* FIXME: is the 79C960 new enough to do its own padding right ? */
if (skb->len < ETH_ZLEN)
{
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
len = ETH_ZLEN;
}
......
......@@ -700,8 +700,7 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
}
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
goto out;
}
......
......@@ -518,8 +518,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
length = (length + 1) & ~1;
if (length != skb->len) {
skb = skb_padto(skb, length);
if (skb == NULL)
if (skb_padto(skb, length))
goto out;
}
......
......@@ -804,8 +804,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
++len;
if (len > skb->len) {
skb = skb_padto(skb, len);
if (skb == NULL)
if (skb_padto(skb, len))
return 0;
}
......
......@@ -2915,8 +2915,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ring;
skb = skb_padto(skb, cp->min_frame_size);
if (!skb)
if (skb_padto(skb, cp->min_frame_size))
return 0;
/* XXX: we need some higher-level QoS hooks to steer packets to
......
......@@ -885,8 +885,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skblen;
if (len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
len = ETH_ZLEN;
}
......
......@@ -938,11 +938,8 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < 1)
goto out;
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
goto out;
}
if (skb_padto(skb, ETH_ZLEN))
goto out;
netif_stop_queue(dev);
......
......@@ -1154,8 +1154,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -677,8 +677,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev)
#endif
if (buf->len < ETH_ZLEN) {
buf = skb_padto(buf, ETH_ZLEN);
if (buf == NULL)
if (skb_padto(buf, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -1027,11 +1027,8 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 ctrl_word;
unsigned long flags;
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
return 0;
}
if (skb_padto(skb, ETH_ZLEN))
return 0;
/* Caution: the write order is important here, set the field with the
"ownership" bit last. */
......
......@@ -1064,8 +1064,7 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -1487,11 +1487,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
if (skb->len <= 0)
return 0;
if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
return 0;
}
if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
return 0;
/* Get Tx ring tail pointer */
if (lp->txrtail->next == lp->txrhead) {
......
......@@ -968,8 +968,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* The old LANCE chips doesn't automatically pad buffers to min. size. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
goto out;
lp->tx_ring[entry].length = -ETH_ZLEN;
}
......
......@@ -1083,8 +1083,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, skb->data));
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -877,8 +877,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
length = skb->len;
if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
......@@ -1939,8 +1939,7 @@ again:
/* pad frames to at least ETH_ZLEN bytes */
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL) {
if (skb_padto(skb, ETH_ZLEN)) {
/* The packet is gone, so we must
* return 0 */
mgp->stats.tx_dropped += 1;
......
......@@ -831,8 +831,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length < ETH_ZLEN)
{
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
if (skb_padto(skb, ETH_ZLEN))
return 0;
length = ETH_ZLEN;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment