summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2023-01-10 10:31:26 +0100
committerJakub Kicinski <kuba@kernel.org>2023-01-12 06:14:41 +0100
commitd4f12a8271fb724b68af6fe8ef54f0f2e63da74d (patch)
treedb7c8359cebd3892ef268628c111413e57823dba /drivers
parentdevlink: keep the instance mutex alive until references are gone (diff)
downloadlinux-d4f12a8271fb724b68af6fe8ef54f0f2e63da74d.tar.xz
linux-d4f12a8271fb724b68af6fe8ef54f0f2e63da74d.zip
net: ethernet: mtk_wed: get rid of queue lock for rx queue
Queue spinlock is currently held in mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill routines for MTK Wireless Ethernet Dispatcher MCU rx queue. mtk_wed_wo_queue_refill() is running during initialization and in rx tasklet while mtk_wed_wo_queue_rx_clean() is running in mtk_wed_wo_hw_deinit() during hw de-init phase after rx tasklet has been disabled. Since mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill routines can't run concurrently get rid of spinlock for mcu rx queue. Reviewed-by: Alexander Duyck <alexanderduyck@fb.com> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Link: https://lore.kernel.org/r/36ec3b729542ea60898471d890796f745479ba32.1673342990.git.lorenzo@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index a0a39643caf7..d32b86499896 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
- spin_lock_bh(&q->lock);
while (q->queued < q->n_desc) {
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
q->queued++;
n_buf++;
}
- spin_unlock_bh(&q->lock);
return n_buf;
}
@@ -316,7 +314,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
- spin_lock_bh(&q->lock);
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -325,7 +322,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;