Commit 09075ef0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge commit master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 of HEAD

* HEAD:
  [AX.25]: Use kzalloc
  [ATM] net/atm/clip.c: fix PROC_FS=n compile
  [PKT_SCHED]: act_api: Fix module leak while flushing actions
  [NET]: Fix IPv4/DECnet routing rule dumping
  [NET] gso: Fix up GSO packets with broken checksums
  [NET] gso: Add skb_is_gso
  [IRDA]: fix drivers/net/irda/ali-ircc.c:ali_ircc_init()
  [ATM]: fix possible recursive locking in skb_migrate()
  [ATM]: Typo in drivers/atm/Kconfig...
  [TG3]: add amd8131 to "write reorder" chipsets
  [NET]: Fix network device interface printk message priority
parents c87fed15 1b30dd35
......@@ -398,7 +398,7 @@ config ATM_FORE200E_USE_TASKLET
default n
help
This defers work to be done by the interrupt handler to a
tasklet instead of hanlding everything at interrupt time. This
tasklet instead of handling everything at interrupt time. This
may improve the responsive of the host.
config ATM_FORE200E_TX_RETRY
......
......@@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp)
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
......
......@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
......
......@@ -2524,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
......@@ -2649,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_shinfo(skb)->gso_size) {
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
......@@ -2937,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_shinfo(skb)->gso_size)
if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++;
#endif
......
......@@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
......
......@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void)
{
ali_chip_t *chip;
chipio_t info;
int ret = -ENODEV;
int ret;
int cfg, cfg_base;
int reg, revision;
int i = 0;
......@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void)
return ret;
}
ret = -ENODEV;
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
......
......@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint16_t ipcse, tucse, mss;
int err;
if(likely(skb_shinfo(skb)->gso_size)) {
if (likely(skb_is_gso(skb))) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
......
......@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
#ifdef LOOPBACK_TSO
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
......
......@@ -2116,7 +2116,7 @@ abort_linearize:
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);
......
......@@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
++count;
if (skb->ip_summed == CHECKSUM_HW)
......
......@@ -10078,6 +10078,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
......
......@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
if(skb_tso_size(skb))
if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
......@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
if(skb_tso_size(skb)) {
if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;
......
......@@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
queue = card->qdio.out_qs
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
large_send = card->options.large_send;
/*are we able to do TSO ? If so ,prepare and send it from here */
......
......@@ -549,6 +549,7 @@ struct packet_type {
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
......@@ -1001,13 +1002,14 @@ static inline int net_gso_ok(int features, int gso_type)
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
skb_shinfo(skb)->gso_type : 0);
return net_gso_ok(features, skb_shinfo(skb)->gso_type);
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return !skb_gso_ok(skb, dev->features);
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
unlikely(skb->ip_summed != CHECKSUM_HW));
}
#endif /* __KERNEL__ */
......
......@@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
......@@ -36,6 +36,7 @@
struct net_protocol {
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int no_policy;
......@@ -51,6 +52,7 @@ struct inet6_protocol
int type, int code, int offset,
__u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
......
......@@ -1086,6 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops;
extern int tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_PROC_FS
......
......@@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = {
static int __init atm_clip_init(void)
{
struct proc_dir_entry *p;
neigh_table_init_no_netlink(&clip_tbl);
clip_tbl_hook = &clip_tbl;
......@@ -972,9 +971,15 @@ static int __init atm_clip_init(void)
setup_timer(&idle_timer, idle_timer_check, 0);
p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
if (p)
p->proc_fops = &arp_seq_fops;
#ifdef CONFIG_PROC_FS
{
struct proc_dir_entry *p;
p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
if (p)
p->proc_fops = &arp_seq_fops;
}
#endif
return 0;
}
......
......@@ -25,22 +25,27 @@
/*
* skb_migrate appends the list at "from" to "to", emptying "from" in the
* process. skb_migrate is atomic with respect to all other skb operations on
* "from" and "to". Note that it locks both lists at the same time, so beware
* of potential deadlocks.
* "from" and "to". Note that it locks both lists at the same time, so to deal
* with the lock ordering, the locks are taken in address order.
*
* This function should live in skbuff.c or skbuff.h.
*/
void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
{
unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to;
struct sk_buff *prev;
spin_lock_irqsave(&from->lock,flags);
spin_lock(&to->lock);
if ((unsigned long) from < (unsigned long) to) {
spin_lock_irqsave(&from->lock, flags);
spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irqsave(&to->lock, flags);
spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
}
prev = from->prev;
from->next->prev = to->prev;
prev->next = skb_to;
......@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
from->prev = skb_from;
from->next = skb_from;
from->qlen = 0;
spin_unlock_irqrestore(&from->lock,flags);
spin_unlock_irqrestore(&from->lock, flags);
}
......
......@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
memset(ax25, 0x00, sizeof(*ax25));
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment