diff options
author | Bo Jiao <Bo.Jiao@mediatek.com> | 2023-10-20 12:30:57 +0200 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2023-12-07 18:50:20 +0100 |
commit | 950d0abb5cd94f2b0710c5c42ac4398c91a7ff22 (patch) | |
tree | 5e7fa426479d889e35314e313b01358318f1d27e /drivers/net/wireless/mediatek/mt76/dma.c | |
parent | wifi: mt76: mt7996: use u16 for val field in mt7996_mcu_set_rro signature (diff) | |
download | linux-950d0abb5cd94f2b0710c5c42ac4398c91a7ff22.tar.xz linux-950d0abb5cd94f2b0710c5c42ac4398c91a7ff22.zip |
wifi: mt76: mt7996: add wed rx support
Similar to MT7915, enable Wireless Ethernet Ditpatcher for MT7996
to offload traffic received from the WLAN nic and transmitted on the
LAN one
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r-- | drivers/net/wireless/mediatek/mt76/dma.c | 149 |
1 files changed, 111 insertions, 38 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 12615b49c0d7..7c76afdaef68 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -189,7 +189,10 @@ static void mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) { Q_WRITE(q, desc_base, q->desc_dma); - Q_WRITE(q, ring_size, q->ndesc); + if (q->flags & MT_QFLAG_WED_RRO_EN) + Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); + else + Q_WRITE(q, ring_size, q->ndesc); q->head = Q_READ(q, dma_idx); q->tail = q->head; } @@ -198,14 +201,16 @@ static void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool reset_idx) { - int i; - if (!q || !q->ndesc) return; - /* clear descriptors */ - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + if (!mt76_queue_is_wed_rro_ind(q)) { + int i; + + /* clear descriptors */ + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + } if (reset_idx) { Q_WRITE(q, cpu_idx, 0); @@ -224,13 +229,22 @@ static int mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_buf *buf, void *data) { - struct mt76_desc *desc = &q->desc[q->head]; struct mt76_queue_entry *entry = &q->entry[q->head]; struct mt76_txwi_cache *txwi = NULL; + struct mt76_desc *desc; u32 buf1 = 0, ctrl; int idx = q->head; int rx_token; + if (mt76_queue_is_wed_rro_ind(q)) { + struct mt76_wed_rro_desc *rro_desc; + + rro_desc = (struct mt76_wed_rro_desc *)q->desc; + data = &rro_desc[q->head]; + goto done; + } + + desc = &q->desc[q->head]; ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); if (mt76_queue_is_wed_rx(q)) { @@ -253,6 +267,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); WRITE_ONCE(desc->info, 0); +done: entry->dma_addr[0] = buf->addr; entry->dma_len[0] = buf->len; entry->txwi = txwi; @@ -401,19 +416,26 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, { struct mt76_queue_entry *e = &q->entry[idx]; struct mt76_desc *desc = &q->desc[idx]; - void *buf; + u32 ctrl, desc_info, buf1; + void *buf = e->buf; + + if (mt76_queue_is_wed_rro_ind(q)) + goto done; + ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); if (len) { - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); } + desc_info = le32_to_cpu(desc->info); if (info) - *info = le32_to_cpu(desc->info); + *info = desc_info; + + buf1 = le32_to_cpu(desc->buf1); + mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info); if (mt76_queue_is_wed_rx(q)) { - u32 buf1 = le32_to_cpu(desc->buf1); u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); @@ -429,23 +451,16 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, t->ptr = NULL; mt76_put_rxwi(dev, t); - - if (drop) { - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); - - *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | - MT_DMA_CTL_DROP)); - + if (drop) *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); - } } else { - buf = e->buf; - e->buf = NULL; dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], SKB_WITH_OVERHEAD(q->buf_size), page_pool_get_dma_dir(q->page_pool)); } +done: + e->buf = NULL; return buf; } @@ -459,11 +474,16 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, if (!q->queued) return NULL; - if (flush) - q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); - else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + if (mt76_queue_is_wed_rro_data(q)) return NULL; + if (!mt76_queue_is_wed_rro_ind(q)) { + if (flush) + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + return NULL; + } + q->tail = (q->tail + 1) % q->ndesc; q->queued--; @@ -615,11 +635,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, spin_lock_bh(&q->lock); while (q->queued < q->ndesc - 1) { + struct mt76_queue_buf qbuf = {}; enum dma_data_direction dir; - struct mt76_queue_buf qbuf; dma_addr_t addr; int offset; - void *buf; + void *buf = NULL; + + if (mt76_queue_is_wed_rro_ind(q)) + goto done; buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); if (!buf) @@ -630,6 +653,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, dma_sync_single_for_device(dev->dma_dev, addr, len, dir); qbuf.addr = addr + q->buf_offset; +done: qbuf.len = len - q->buf_offset; qbuf.skip_unmap = false; if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { @@ -639,7 +663,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, frames++; } - if (frames) + if (frames || mt76_queue_is_wed_rx(q)) mt76_dma_kick_queue(dev, q); spin_unlock_bh(&q->lock); @@ -650,8 +674,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED - int ret, type, ring; - u8 flags; + int ret = 0, type, ring; + u16 flags; if (!q || !q->ndesc) return -EINVAL; @@ -678,7 +702,6 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) q->flags = 0; mt76_dma_queue_reset(dev, q); mt76_dma_rx_fill(dev, q, false); - q->flags = flags; ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); if (!ret) @@ -690,9 +713,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) if (!ret) q->wed_regs = q->wed->rx_ring[ring].reg_base; break; + case MT76_WED_RRO_Q_DATA: + q->flags &= ~MT_QFLAG_WED; + __mt76_dma_queue_reset(dev, q, false); + mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs); + q->head = q->ndesc - 1; + q->queued = q->head; + break; + case MT76_WED_RRO_Q_MSDU_PG: + q->flags &= ~MT_QFLAG_WED; + __mt76_dma_queue_reset(dev, q, false); + mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs); + q->head = q->ndesc - 1; + q->queued = q->head; + break; + case MT76_WED_RRO_Q_IND: + q->flags &= ~MT_QFLAG_WED; + mt76_dma_queue_reset(dev, q); + mt76_dma_rx_fill(dev, q, false); + mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); + break; default: ret = -EINVAL; + break; } + q->flags = flags; return ret; #else @@ -716,11 +761,26 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, q->buf_size = bufsize; q->hw_idx = idx; - size = q->ndesc * sizeof(struct mt76_desc); - q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); + size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) + : sizeof(struct mt76_desc); + q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, + &q->desc_dma, GFP_KERNEL); if (!q->desc) return -ENOMEM; + if (mt76_queue_is_wed_rro_ind(q)) { + struct mt76_wed_rro_desc *rro_desc; + int i; + + rro_desc = (struct mt76_wed_rro_desc *)q->desc; + for (i = 0; i < q->ndesc; i++) { + struct mt76_wed_rro_ind *cmd; + + cmd = (struct mt76_wed_rro_ind *)&rro_desc[i]; + cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; + } + } + size = q->ndesc * sizeof(*q->entry); q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); if (!q->entry) @@ -734,8 +794,13 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, if (ret) return ret; - if (!mt76_queue_is_wed_tx_free(q)) - mt76_dma_queue_reset(dev, q); + if (mtk_wed_device_active(&dev->mmio.wed)) { + if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || + mt76_queue_is_wed_tx_free(q)) + return 0; + } + + mt76_dma_queue_reset(dev, q); return 0; } @@ -757,7 +822,8 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) if (!buf) break; - mt76_put_page_pool_buf(buf, false); + if (!mt76_queue_is_wed_rro(q)) + mt76_put_page_pool_buf(buf, false); } while (1); spin_lock_bh(&q->lock); @@ -773,13 +839,16 @@ static void mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; - int i; if (!q->ndesc) return; - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + if (!mt76_queue_is_wed_rro_ind(q)) { + int i; + + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + } mt76_dma_rx_cleanup(dev, q); @@ -991,6 +1060,10 @@ void mt76_dma_cleanup(struct mt76_dev *dev) mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; + if (mtk_wed_device_active(&dev->mmio.wed) && + mt76_queue_is_wed_rro(q)) + continue; + netif_napi_del(&dev->napi[i]); mt76_dma_rx_cleanup(dev, q); |