summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2011-03-11 21:38:19 +0100
committerJohn W. Linville <linville@tuxdriver.com>2011-03-14 19:46:58 +0100
commit86271e460a66003dc1f4cbfd845adafb790b7587 (patch)
treeaac8bd62df92a0a8975d0d1604cd9bb62a7c5f5a /drivers
parentath9k: fix stopping tx dma on reset (diff)
downloadlinux-86271e460a66003dc1f4cbfd845adafb790b7587.tar.xz
linux-86271e460a66003dc1f4cbfd845adafb790b7587.zip
ath9k: fix the .flush driver op implementation
This patch simplifies the flush op and reuses ath_drain_all_txq for flushing out pending frames if necessary. It also uses a global timeout of 200ms instead of the per-queue 60ms timeout. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c28
3 files changed, 34 insertions, 51 deletions
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c718ab512a97..099bd4183ad0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -189,7 +189,6 @@ struct ath_txq {
u32 axq_ampdu_depth;
bool stopped;
bool axq_tx_inprogress;
- bool txq_flush_inprogress;
struct list_head axq_acq;
struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
struct list_head txq_fifo_pending;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2e228aada1a9..115f162c617a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2128,56 +2128,42 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
-#define ATH_FLUSH_TIMEOUT 60 /* ms */
struct ath_softc *sc = hw->priv;
- struct ath_txq *txq = NULL;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int i, j, npend = 0;
+ int timeout = 200; /* ms */
+ int i, j;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work);
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
- txq = &sc->tx.txq[i];
+ if (drop)
+ timeout = 1;
- if (!drop) {
- for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
- if (!ath9k_has_pending_frames(sc, txq))
- break;
- usleep_range(1000, 2000);
- }
- }
+ for (j = 0; j < timeout; j++) {
+ int npend = 0;
+
+ if (j)
+ usleep_range(1000, 2000);
- if (drop || ath9k_has_pending_frames(sc, txq)) {
- ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
- txq->axq_qnum);
- spin_lock_bh(&txq->axq_lock);
- txq->txq_flush_inprogress = true;
- spin_unlock_bh(&txq->axq_lock);
-
- ath9k_ps_wakeup(sc);
- ath9k_hw_stoptxdma(ah, txq->axq_qnum);
- npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
- ath9k_ps_restore(sc);
- if (npend)
- break;
-
- ath_draintxq(sc, txq, false);
- txq->txq_flush_inprogress = false;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
}
+
+ if (!npend)
+ goto out;
}
- if (npend) {
+ if (!ath_drain_all_txq(sc, false))
ath_reset(sc, false);
- txq->txq_flush_inprogress = false;
- }
+out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index bb1d29e90eb1..f977f804c68a 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2012,8 +2012,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
- if (sc->sc_flags & SC_OP_TXAGGR &&
- !txq->txq_flush_inprogress)
+ if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
break;
@@ -2094,7 +2093,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
- if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
+ if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
@@ -2265,18 +2264,17 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
spin_lock_bh(&txq->axq_lock);
- if (!txq->txq_flush_inprogress) {
- if (!list_empty(&txq->txq_fifo_pending)) {
- INIT_LIST_HEAD(&bf_head);
- bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head,
- &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
- ath_tx_txqaddbuf(sc, txq, &bf_head);
- } else if (sc->sc_flags & SC_OP_TXAGGR)
- ath_txq_schedule(sc, txq);
- }
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+
spin_unlock_bh(&txq->axq_lock);
}
}