From 9e29db0e3645cafa980e68a9c717a761448389e1 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 15 Apr 2010 16:38:14 -0400 Subject: RDS: Use a generation counter to avoid rds_send_xmit loop rds_send_xmit is required to loop around after it releases the lock because someone else could done a trylock, found someone working on the list and backed off. But, once we drop our lock, it is possible that someone else does come in and make progress on the list. We should detect this and not loop around if another process is actually working on the list. This patch adds a generation counter that is bumped every time we get the lock and do some send work. If the retry notices someone else has bumped the generation counter, it does not need to loop around and continue working. Signed-off-by: Chris Mason Signed-off-by: Andy Grover --- net/rds/send.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net/rds/send.c') diff --git a/net/rds/send.c b/net/rds/send.c index de5693cdcefb..663fd60b40cf 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -112,6 +112,7 @@ int rds_send_xmit(struct rds_connection *conn) unsigned int tmp; struct scatterlist *sg; int ret = 0; + int gen = 0; LIST_HEAD(to_be_dropped); restart: @@ -134,6 +135,8 @@ restart: if (conn->c_trans->xmit_prepare) conn->c_trans->xmit_prepare(conn); + gen = atomic_inc_return(&conn->c_send_generation); + /* * spin trying to push headers and data down the connection until * the connection doesn't make forward progress. @@ -359,13 +362,13 @@ restart: if (ret == 0) { /* A simple bit test would be way faster than taking the * spin lock */ - spin_lock_irqsave(&conn->c_lock, flags); + smp_mb(); if (!list_empty(&conn->c_send_queue)) { rds_stats_inc(s_send_lock_queue_raced); - spin_unlock_irqrestore(&conn->c_lock, flags); - goto restart; + if (gen == atomic_read(&conn->c_send_generation)) { + goto restart; + } } - spin_unlock_irqrestore(&conn->c_lock, flags); } out: return ret; -- cgit v1.2.3