summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mediatek/mtk_eth_soc.c
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2021-05-11 13:35:52 +0200
committerMaxime Ripard <maxime@cerno.tech>2021-05-11 13:35:52 +0200
commitc55b44c9386f3ee1b08752638559f19deaf6040d (patch)
treec843a21f45180387fcd9eb2625cc9d1f166a3156 /drivers/net/ethernet/mediatek/mtk_eth_soc.c
parentMAINTAINERS: Update my e-mail (diff)
parentLinux 5.13-rc1 (diff)
downloadlinux-c55b44c9386f3ee1b08752638559f19deaf6040d.tar.xz
linux-c55b44c9386f3ee1b08752638559f19deaf6040d.zip
Merge drm/drm-fixes into drm-misc-fixes
Start this new release drm-misc-fixes branch Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_eth_soc.c')
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c315
1 files changed, 210 insertions, 105 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 01d3ee4b5829..ed4eacef17ce 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -19,6 +19,8 @@
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
+#include <linux/jhash.h>
+#include <net/dsa.h>
#include "mtk_eth_soc.h"
@@ -85,7 +87,7 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return 0;
if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
break;
- usleep_range(10, 20);
+ cond_resched();
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
@@ -776,13 +778,18 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
struct mtk_rx_dma *dma_rxd)
{
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ if (!(rxd->rxd2 & RX_DMA_DONE))
+ return false;
+
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+
+ return true;
}
/* the qdma core needs scratch memory to be setup */
@@ -857,7 +864,8 @@ static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
}
-static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -889,8 +897,12 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
tx_buf->flags = 0;
if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
- dev_kfree_skb_any(tx_buf->skb);
+ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
+ if (napi)
+ napi_consume_skb(tx_buf->skb, napi);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
+ }
tx_buf->skb = NULL;
}
@@ -1068,7 +1080,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
@@ -1125,17 +1137,6 @@ static void mtk_wake_queue(struct mtk_eth *eth)
}
}
-static void mtk_stop_queue(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i])
- continue;
- netif_stop_queue(eth->netdev[i]);
- }
-}
-
static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -1156,7 +1157,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- mtk_stop_queue(eth);
+ netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(&eth->page_lock);
@@ -1182,7 +1183,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
- mtk_stop_queue(eth);
+ netif_stop_queue(dev);
spin_unlock(&eth->page_lock);
@@ -1238,17 +1239,19 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
+ struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
- int done = 0;
+ int done = 0, bytes = 0;
while (done < budget) {
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
+ u32 hash;
int mac;
ring = mtk_get_rx_ring(eth);
@@ -1259,18 +1262,16 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd = &ring->dma[idx];
data = ring->data[idx];
- mtk_rx_get_desc(&trxd, rxd);
- if (!(trxd.rxd2 & RX_DMA_DONE))
+ if (!mtk_rx_get_desc(&trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
+ (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
mac = 0;
- } else {
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
- }
+ else
+ mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1298,17 +1299,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
+ dma_unmap_single(eth->dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- skb_free_frag(new_data);
+ skb_free_frag(data);
netdev->stats.rx_dropped++;
- goto release_desc;
+ goto skip_rx;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(eth->dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
@@ -1317,14 +1319,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
+ bytes += pktlen;
+
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY) {
+ hash = jhash_1word(hash, 0);
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ }
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
+ (trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
+skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
@@ -1348,6 +1358,12 @@ rx_done:
mtk_update_rx_cpu_idx(eth);
}
+ eth->rx_packets += done;
+ eth->rx_bytes += bytes;
+ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
+ &dim_sample);
+ net_dim(&eth->rx_dim, dim_sample);
+
return done;
}
@@ -1360,7 +1376,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
struct mtk_tx_buf *tx_buf;
u32 cpu, dma;
- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+ cpu = ring->last_free_ptr;
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
desc = mtk_qdma_phys_to_virt(ring, cpu);
@@ -1386,7 +1402,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -1394,6 +1410,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
cpu = next_cpu;
}
+ ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
return budget;
@@ -1423,7 +1440,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
budget--;
}
- mtk_tx_unmap(eth, tx_buf);
+ mtk_tx_unmap(eth, tx_buf, true);
desc = &ring->dma[cpu];
ring->last_free = desc;
@@ -1440,6 +1457,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct dim_sample dim_sample = {};
unsigned int done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
int total = 0, i;
@@ -1457,8 +1475,14 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
continue;
netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
total += done[i];
+ eth->tx_packets += done[i];
+ eth->tx_bytes += bytes[i];
}
+ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
+ &dim_sample);
+ net_dim(&eth->tx_dim, dim_sample);
+
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
@@ -1480,7 +1504,6 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
- u32 status, mask;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
@@ -1489,22 +1512,20 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, eth->tx_int_status_reg);
- mask = mtk_r32(eth, eth->tx_int_mask_reg);
dev_info(eth->dev,
- "done tx %d, intr 0x%08x/0x%x\n",
- tx_done, status, mask);
+ "done tx %d, intr 0x%08x/0x%x\n", tx_done,
+ mtk_r32(eth, eth->tx_int_status_reg),
+ mtk_r32(eth, eth->tx_int_mask_reg));
}
if (tx_done == budget)
return budget;
- status = mtk_r32(eth, eth->tx_int_status_reg);
- if (status & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
return budget;
- napi_complete(napi);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ if (napi_complete_done(napi, tx_done))
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
@@ -1512,35 +1533,33 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, mask;
- int rx_done = 0;
- int remain_budget = budget;
+ int rx_done_total = 0;
mtk_handle_status_irq(eth);
-poll_again:
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
- rx_done = mtk_poll_rx(napi, remain_budget, eth);
+ do {
+ int rx_done;
- if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
- mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
- dev_info(eth->dev,
- "done rx %d, intr 0x%08x/0x%x\n",
- rx_done, status, mask);
- }
- if (rx_done == remain_budget)
- return budget;
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
- status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
- if (status & MTK_RX_DONE_INT) {
- remain_budget -= rx_done;
- goto poll_again;
- }
- napi_complete(napi);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ if (unlikely(netif_msg_intr(eth))) {
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n", rx_done,
+ mtk_r32(eth, MTK_PDMA_INT_STATUS),
+ mtk_r32(eth, MTK_PDMA_INT_MASK));
+ }
- return rx_done + budget - remain_budget;
+ if (rx_done_total == budget)
+ return budget;
+
+ } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+
+ if (napi_complete_done(napi, rx_done_total))
+ mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+
+ return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
@@ -1587,6 +1606,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
@@ -1600,9 +1620,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_DRX_PTR);
+ mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
MTK_QTX_CFG(0));
} else {
@@ -1625,7 +1643,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i], false);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -2015,25 +2033,22 @@ static int mtk_set_features(struct net_device *dev, netdev_features_t features)
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
- unsigned long t_start = jiffies;
+ unsigned int reg;
+ int ret;
+ u32 val;
- while (1) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
- } else {
- if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
- }
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ reg = MTK_QDMA_GLO_CFG;
+ else
+ reg = MTK_PDMA_GLO_CFG;
- if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
- break;
- }
+ ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
+ !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
+ 5, MTK_DMA_BUSY_TIMEOUT_US);
+ if (ret)
+ dev_err(eth->dev, "DMA init timeout\n");
- dev_err(eth->dev, "DMA init timeout\n");
- return -1;
+ return ret;
}
static int mtk_dma_init(struct mtk_eth *eth)
@@ -2133,6 +2148,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
@@ -2145,6 +2161,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
__napi_schedule(&eth->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
@@ -2197,7 +2214,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
MTK_RX_BT_32DWORDS,
MTK_QDMA_GLO_CFG);
@@ -2233,6 +2250,9 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
+ if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ val |= MTK_GDMA_SPECIAL_TAG;
+
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
}
/* Reset and enable PSE */
@@ -2255,12 +2275,17 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- int err = mtk_start_dma(eth);
+ u32 gdm_config = MTK_GDMA_TO_PDMA;
+ int err;
+ err = mtk_start_dma(eth);
if (err)
return err;
- mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
+ if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
+ gdm_config = MTK_GDMA_TO_PPE;
+
+ mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
@@ -2321,12 +2346,18 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
+ cancel_work_sync(&eth->rx_dim.work);
+ cancel_work_sync(&eth->tx_dim.work);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
+ if (eth->soc->offload_version)
+ mtk_ppe_stop(&eth->ppe);
+
return 0;
}
@@ -2370,6 +2401,64 @@ err_disable_clks:
return ret;
}
+static void mtk_dim_rx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val &= MTK_PDMA_DELAY_TX_MASK;
+ val |= MTK_PDMA_DELAY_RX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
+
+ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void mtk_dim_tx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val &= MTK_PDMA_DELAY_RX_MASK;
+ val |= MTK_PDMA_DELAY_TX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
+
+ mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
static int mtk_hw_init(struct mtk_eth *eth)
{
int i, val, ret;
@@ -2391,9 +2480,6 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
- /* enable interrupt delay for RX */
- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
-
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
@@ -2432,11 +2518,11 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- /* enable interrupt delay for RX */
- mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
/* disable delay and normal interrupt */
- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
@@ -2473,14 +2559,11 @@ static int __init mtk_init(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- const char *mac_addr;
-
- mac_addr = of_get_mac_address(mac->of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(dev->dev_addr, mac_addr);
+ int ret;
- /* If the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ if (ret) {
+ /* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
dev_err(eth->dev, "generated random MAC address %pM\n",
dev->dev_addr);
@@ -2832,6 +2915,7 @@ static const struct net_device_ops mtk_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -2973,6 +3057,13 @@ static int mtk_probe(struct platform_device *pdev)
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
+ spin_lock_init(&eth->dim_lock);
+
+ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+
+ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
@@ -3088,6 +3179,17 @@ static int mtk_probe(struct platform_device *pdev)
goto err_free_dev;
}
+ if (eth->soc->offload_version) {
+ err = mtk_ppe_init(&eth->ppe, eth->dev,
+ eth->base + MTK_ETH_PPE_BASE, 2);
+ if (err)
+ goto err_free_dev;
+
+ err = mtk_eth_offload_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
+
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
@@ -3162,6 +3264,7 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7622_data = {
@@ -3170,6 +3273,7 @@ static const struct mtk_soc_data mt7622_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7623_data = {
@@ -3177,6 +3281,7 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .offload_version = 2,
};
static const struct mtk_soc_data mt7629_data = {