summaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2007-02-26 20:06:41 +0100
committerJeff Garzik <jeff@garzik.org>2007-02-27 10:21:44 +0100
commit63b66d12de57d8455615d9f619e18824137ed547 (patch)
tree2230d079a80369c6e3587f63042250399ee70852 /drivers/net/qla3xxx.c
parentqla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_fre... (diff)
downloadlinux-63b66d12de57d8455615d9f619e18824137ed547.tar.xz
linux-63b66d12de57d8455615d9f619e18824137ed547.zip
qla3xxx: bugfix tx reset after stress conditions.
To Reproduce the Problem: To reproduce this panic consistently, we run an intensive network application like 'netperf' and then switch to a different console. After waiting for a couple of seconds, you will see a tx reset has occured. Reason: We enable interrupts even if we were not running. Solution: Now we will enable interrupts only after we are ready to give up the poll routine. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rwxr-xr-xdrivers/net/qla3xxx.c69
1 files changed, 41 insertions, 28 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7a2f01a22cbf..5bf446f7be15 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1921,10 +1921,11 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
struct net_rsp_iocb *net_rsp;
struct net_device *ndev = qdev->ndev;
unsigned long hw_flags;
+ int work_done = 0;
/* While there are entries in the completion queue. */
while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
- qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
+ qdev->rsp_consumer_index) && (work_done < work_to_do)) {
net_rsp = qdev->rsp_current;
switch (net_rsp->opcode) {
@@ -1975,37 +1976,41 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
} else {
qdev->rsp_current++;
}
+
+ work_done = *tx_cleaned + *rx_cleaned;
}
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if(work_done) {
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- ql_update_lrg_bufq_prod_index(qdev);
+ ql_update_lrg_bufq_prod_index(qdev);
- if (qdev->small_buf_release_cnt >= 16) {
- while (qdev->small_buf_release_cnt >= 16) {
- qdev->small_buf_q_producer_index++;
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
- if (qdev->small_buf_q_producer_index ==
- NUM_SBUFQ_ENTRIES)
- qdev->small_buf_q_producer_index = 0;
- qdev->small_buf_release_cnt -= 8;
- }
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
- ql_write_common_reg(qdev,
- &port_regs->CommonRegs.
- rxSmallQProducerIndex,
- qdev->small_buf_q_producer_index);
- }
+ wmb();
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
- ql_write_common_reg(qdev,
- &port_regs->CommonRegs.rspQConsumerIndex,
- qdev->rsp_consumer_index);
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
- if (unlikely(netif_queue_stopped(qdev->ndev))) {
- if (netif_queue_stopped(qdev->ndev) &&
- (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
- netif_wake_queue(qdev->ndev);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (unlikely(netif_queue_stopped(qdev->ndev))) {
+ if (netif_queue_stopped(qdev->ndev) &&
+ (atomic_read(&qdev->tx_count) >
+ (NUM_REQ_Q_ENTRIES / 4)))
+ netif_wake_queue(qdev->ndev);
+ }
}
return *tx_cleaned + *rx_cleaned;
@@ -2016,6 +2021,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
struct ql3_adapter *qdev = netdev_priv(ndev);
int work_to_do = min(*budget, ndev->quota);
int rx_cleaned = 0, tx_cleaned = 0;
+ unsigned long hw_flags;
+ struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
if (!netif_carrier_ok(ndev))
goto quit_polling;
@@ -2027,6 +2034,13 @@ static int ql_poll(struct net_device *ndev, int *budget)
if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
quit_polling:
netif_rx_complete(ndev);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.rspQConsumerIndex,
+ qdev->rsp_consumer_index);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
ql_enable_interrupts(qdev);
return 0;
}
@@ -2079,11 +2093,10 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
- ql_disable_interrupts(qdev);
- if (likely(netif_rx_schedule_prep(ndev)))
+ if (likely(netif_rx_schedule_prep(ndev))) {
+ ql_disable_interrupts(qdev);
__netif_rx_schedule(ndev);
- else
- ql_enable_interrupts(qdev);
+ }
} else {
return IRQ_NONE;
}