summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2005-07-06 00:03:46 +0200
committerDavid S. Miller <davem@davemloft.net>2005-07-06 00:03:46 +0200
commitbc971dee6ece1fd0d431948924becd9c50e7b778 (patch)
treee1f500970d3397adc14c4a286b81f8375f333af0
parent[IPV4]: More broken memory allocation fixes for fib_trie (diff)
downloadlinux-bc971dee6ece1fd0d431948924becd9c50e7b778.tar.xz
linux-bc971dee6ece1fd0d431948924becd9c50e7b778.zip
[SHAPER]: Switch to spinlocks.
Dave, you were right and the sleeping locks in shaper were broken. Markus Kanet noticed this and also tested the patch below that switches locking to spinlocks. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/shaper.c42
-rw-r--r--include/linux/if_shaper.h2
2 files changed, 17 insertions, 27 deletions
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index 20edeb345792..3ad0b6751f6f 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct shaper *shaper = dev->priv;
struct sk_buff *ptr;
-
- if (down_trylock(&shaper->sem))
- return -1;
-
+
+ spin_lock(&shaper->lock);
ptr=shaper->sendq.prev;
/*
@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
shaper->stats.collisions++;
}
shaper_kick(shaper);
- up(&shaper->sem);
+ spin_unlock(&shaper->lock);
return 0;
}
@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long data)
{
struct shaper *shaper = (struct shaper *)data;
- if (!down_trylock(&shaper->sem)) {
- shaper_kick(shaper);
- up(&shaper->sem);
- } else
- mod_timer(&shaper->timer, jiffies);
+ spin_lock(&shaper->lock);
+ shaper_kick(shaper);
+ spin_unlock(&shaper->lock);
}
/*
@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *shaper)
/*
- * Flush the shaper queues on a closedown
- */
-
-static void shaper_flush(struct shaper *shaper)
-{
- struct sk_buff *skb;
-
- down(&shaper->sem);
- while((skb=skb_dequeue(&shaper->sendq))!=NULL)
- dev_kfree_skb(skb);
- shaper_kick(shaper);
- up(&shaper->sem);
-}
-
-/*
* Bring the interface up. We just disallow this until a
* bind.
*/
@@ -375,7 +356,15 @@ static int shaper_open(struct net_device *dev)
static int shaper_close(struct net_device *dev)
{
struct shaper *shaper=dev->priv;
- shaper_flush(shaper);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_lock_bh(&shaper->lock);
+ shaper_kick(shaper);
+ spin_unlock_bh(&shaper->lock);
+
del_timer_sync(&shaper->timer);
return 0;
}
@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_device *dev)
init_timer(&sh->timer);
sh->timer.function=shaper_timer;
sh->timer.data=(unsigned long)sh;
+ spin_lock_init(&sh->lock);
}
/*
diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
index 004e6f09a6e2..68c896a36a34 100644
--- a/include/linux/if_shaper.h
+++ b/include/linux/if_shaper.h
@@ -23,7 +23,7 @@ struct shaper
__u32 shapeclock;
unsigned long recovery; /* Time we can next clock a packet out on
an empty queue */
- struct semaphore sem;
+ spinlock_t lock;
struct net_device_stats stats;
struct net_device *dev;
int (*hard_start_xmit) (struct sk_buff *skb,