summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/wl12xx/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/wl12xx/main.c')
-rw-r--r--drivers/net/wireless/wl12xx/main.c127
1 files changed, 79 insertions, 48 deletions
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index ba34ac3a440d..f408c5a84cc9 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -635,16 +635,39 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime);
}
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
+
+ /* Pass all received frames to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+ ieee80211_rx_ni(wl->hw, skb);
+
+ /* Return sent skbs to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+ ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, netstack_work);
+
+ do {
+ wl1271_flush_deferred_work(wl);
+ } while (skb_queue_len(&wl->deferred_rx_queue));
+}
-static void wl1271_irq_work(struct work_struct *work)
+#define WL1271_IRQ_MAX_LOOPS 256
+
+irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
- unsigned long flags;
- struct wl1271 *wl =
- container_of(work, struct wl1271, irq_work);
+ struct wl1271 *wl = (struct wl1271 *)cookie;
+ bool done = false;
+ unsigned int defer_count;
mutex_lock(&wl->mutex);
@@ -653,26 +676,27 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, true);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- spin_lock_irqsave(&wl->wl_lock, flags);
- while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
- clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- loopcount--;
+ while (!done && loopcount--) {
+ /*
+ * In order to avoid a race with the hardirq, clear the flag
+ * before acknowledging the chip. Since the mutex is held,
+ * wl1271_ps_elp_wakeup cannot be called concurrently.
+ */
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr);
+ intr &= WL1271_INTR_MASK;
if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- spin_lock_irqsave(&wl->wl_lock, flags);
+ done = true;
continue;
}
- intr &= WL1271_INTR_MASK;
-
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
@@ -682,7 +706,7 @@ static void wl1271_irq_work(struct work_struct *work)
goto out;
}
- if (intr & WL1271_ACX_INTR_DATA) {
+ if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
wl1271_rx(wl, &wl->fw_status->common);
@@ -701,6 +725,12 @@ static void wl1271_irq_work(struct work_struct *work)
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
+
+ /* Make sure the deferred queues don't get too long */
+ defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+ skb_queue_len(&wl->deferred_rx_queue);
+ if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+ wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -719,21 +749,16 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
}
- if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- else
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
+
+ return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -974,7 +999,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -983,7 +1007,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@@ -1010,14 +1036,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
goto out;
}
- wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex);
out:
@@ -1169,7 +1196,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
break;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -1178,7 +1204,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@@ -1244,12 +1272,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF;
- wl1271_disable_interrupts(wl);
-
mutex_unlock(&wl->mutex);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
@@ -1525,7 +1553,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1681,7 +1709,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1910,7 +1938,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_unlock;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@@ -2013,7 +2041,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2039,7 +2067,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2067,7 +2095,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2546,7 +2574,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2601,7 +2629,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0;
} else {
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2647,7 +2675,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2736,7 +2764,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
@@ -2779,7 +2807,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2812,7 +2840,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3176,7 +3204,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF)
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3376,9 +3404,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
+ skb_queue_head_init(&wl->deferred_rx_queue);
+ skb_queue_head_init(&wl->deferred_tx_queue);
+
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
- INIT_WORK(&wl->irq_work, wl1271_irq_work);
+ INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);