diff options
Diffstat (limited to 'drivers/net/ethernet/mscc')
-rw-r--r-- | drivers/net/ethernet/mscc/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot.c | 728 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot.h | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_devlink.c | 885 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_flower.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_io.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_mrp.c | 175 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_net.c | 602 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_vcap.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_vcap.h | 295 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_vsc7514.c | 331 |
12 files changed, 2224 insertions, 855 deletions
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index ee7bb7e24e8e..c0ede0ca7115 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -14,6 +14,7 @@ if NET_VENDOR_MICROSEMI # Users should depend on NET_SWITCHDEV, HAS_IOMEM config MSCC_OCELOT_SWITCH_LIB select REGMAP_MMIO + select PACKING select PHYLIB tristate help diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 58f94c3d80f9..722c27694b21 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -6,7 +6,9 @@ mscc_ocelot_switch_lib-y := \ ocelot_police.o \ ocelot_vcap.o \ ocelot_flower.o \ - ocelot_ptp.o + ocelot_ptp.o \ + ocelot_devlink.o +mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o mscc_ocelot-y := \ ocelot_vsc7514.o \ diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ff87a0bc089c..46e5c9136bac 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -4,6 +4,7 @@ * * Copyright (c) 2017 Microsemi Corporation */ +#include <linux/dsa/ocelot.h> #include <linux/if_bridge.h> #include <soc/mscc/ocelot_vcap.h> #include "ocelot.h" @@ -221,25 +222,20 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, } int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, - bool vlan_aware, struct switchdev_trans *trans) + bool vlan_aware) { + struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_vcap_filter *filter; u32 val; - if (switchdev_trans_ph_prepare(trans)) { - struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; - struct ocelot_vcap_filter *filter; - - list_for_each_entry(filter, &block->rules, list) { - if (filter->ingress_port_mask & BIT(port) && - filter->action.vid_replace_ena) { - dev_err(ocelot->dev, - "Cannot change VLAN state with vlan modify rules active\n"); - return -EBUSY; - } + list_for_each_entry(filter, &block->rules, list) { + if (filter->ingress_port_mask & BIT(port) && + filter->action.vid_replace_ena) { + dev_err(ocelot->dev, + "Cannot change VLAN state with vlan modify rules active\n"); + return -EBUSY; } - - return 0; } ocelot_port->vlan_aware = vlan_aware; @@ -375,6 +371,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot) } } +static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) +{ + return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); +} + +int ocelot_port_flush(struct ocelot *ocelot, int port) +{ + int err, val; + + /* Disable dequeuing from the egress queues */ + ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE, port); + + /* Disable flow control */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); + + /* Disable priority flow control */ + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0); + + /* Wait at least the time it takes to receive a frame of maximum length + * at the port. + * Worst-case delays for 10 kilobyte jumbo frames are: + * 8 ms on a 10M port + * 800 μs on a 100M port + * 80 μs on a 1G port + * 32 μs on a 2.5G port + */ + usleep_range(8000, 10000); + + /* Disable half duplex backpressure. */ + ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE, + SYS_FRONT_PORT_MODE, port); + + /* Flush the queues associated with the port. */ + ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA, + REW_PORT_CFG, port); + + /* Enable dequeuing from the egress queues. */ + ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE, + port); + + /* Wait until flushing is complete. */ + err = read_poll_timeout(ocelot_read_eq_avail, val, !val, + 100, 2000000, false, ocelot, port); + + /* Clear flushing again. */ + ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port); + + return err; +} +EXPORT_SYMBOL(ocelot_port_flush); + void ocelot_adjust_link(struct ocelot *ocelot, int port, struct phy_device *phydev) { @@ -579,6 +629,229 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) } EXPORT_SYMBOL(ocelot_get_txtstamp); +static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, + u32 *rval) +{ + u32 bytes_valid, val; + + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_NOT_READY) { + if (ifh) + return -EIO; + + do { + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + } while (val == XTR_NOT_READY); + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_ESCAPE) + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return 4; + default: + *rval = val; + + return 4; + } +} + +static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) +{ + int i, err = 0; + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { + err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]); + if (err != 4) + return (err < 0) ? err : -EIO; + } + + return 0; +} + +int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + u64 tod_in_ns, full_ts_in_ns, cpuq; + u64 timestamp, src_port, len; + u32 xfh[OCELOT_TAG_LEN / 4]; + struct net_device *dev; + struct timespec64 ts; + struct sk_buff *skb; + int sz, buf_len; + u32 val, *buf; + int err; + + err = ocelot_xtr_poll_xfh(ocelot, grp, xfh); + if (err) + return err; + + ocelot_xfh_get_src_port(xfh, &src_port); + ocelot_xfh_get_len(xfh, &len); + ocelot_xfh_get_rew_val(xfh, ×tamp); + ocelot_xfh_get_cpuq(xfh, &cpuq); + + if (WARN_ON(src_port >= ocelot->num_phys_ports)) + return -EINVAL; + + dev = ocelot->ops->port_to_netdev(ocelot, src_port); + if (!dev) + return -EINVAL; + + skb = netdev_alloc_skb(dev, len); + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + return -ENOMEM; + } + + buf_len = len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); + + len = 0; + do { + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + if (sz < 0) { + err = sz; + goto out_free_skb; + } + *buf++ = val; + len += sz; + } while (len < buf_len); + + /* Read the FCS */ + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + if (sz < 0) { + err = sz; + goto out_free_skb; + } + + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; + + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + + if (ocelot->ptp) { + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + + tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + if ((tod_in_ns & 0xffffffff) < timestamp) + full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | + timestamp; + else + full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | + timestamp; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = full_ts_in_ns; + } + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded. + */ + if (ocelot->bridge_mask & BIT(src_port)) + skb->offload_fwd_mark = 1; + + skb->protocol = eth_type_trans(skb, dev); + +#if IS_ENABLED(CONFIG_BRIDGE_MRP) + if (skb->protocol == cpu_to_be16(ETH_P_MRP) && + cpuq & BIT(OCELOT_MRP_CPUQ)) + skb->offload_fwd_mark = 0; +#endif + + *nskb = skb; + + return 0; + +out_free_skb: + kfree_skb(skb); + return err; +} +EXPORT_SYMBOL(ocelot_xtr_poll_frame); + +bool ocelot_can_inject(struct ocelot *ocelot, int grp) +{ + u32 val = ocelot_read(ocelot, QS_INJ_STATUS); + + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp)))) + return false; + if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))) + return false; + + return true; +} +EXPORT_SYMBOL(ocelot_can_inject); + +void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, + u32 rew_op, struct sk_buff *skb) +{ + u32 ifh[OCELOT_TAG_LEN / 4] = {0}; + unsigned int i, count, last; + + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + + ocelot_ifh_set_bypass(ifh, 1); + ocelot_ifh_set_dest(ifh, BIT_ULL(port)); + ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); + ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + ocelot_ifh_set_rew_op(ifh, rew_op); + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) + ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); + + count = DIV_ROUND_UP(skb->len, 4); + last = skb->len % 4; + for (i = 0; i < count; i++) + ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); + + /* Add padding */ + while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + i++; + } + + /* Indicate EOF and valid bytes in last word */ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | + QS_INJ_CTRL_EOF, + QS_INJ_CTRL, grp); + + /* Add dummy CRC */ + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + skb_tx_timestamp(skb); + + skb->dev->stats.tx_packets++; + skb->dev->stats.tx_bytes += skb->len; +} +EXPORT_SYMBOL(ocelot_port_inject_frame); + +void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) +{ + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) + ocelot_read_rix(ocelot, QS_XTR_RD, grp); +} +EXPORT_SYMBOL(ocelot_drain_cpu_queue); + int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid) { @@ -894,10 +1167,103 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_get_ts_info); +static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, + bool only_active_ports) +{ + u32 mask = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->bond == bond) { + if (only_active_ports && !ocelot_port->lag_tx_active) + continue; + + mask |= BIT(port); + } + } + + return mask; +} + +static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) +{ + u32 mask = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->is_dsa_8021q_cpu) + mask |= BIT(port); + } + + return mask; +} + +void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) +{ + unsigned long cpu_fwd_mask; + int port; + + /* If a DSA tag_8021q CPU exists, it needs to be included in the + * regular forwarding path of the front ports regardless of whether + * those are bridged or standalone. + * If DSA tag_8021q is not used, this returns 0, which is fine because + * the hardware-based CPU port module can be a destination for packets + * even if it isn't part of PGID_SRC. + */ + cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot); + + /* Apply FWD mask. The loop is needed to add/remove the current port as + * a source for the other ports. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + unsigned long mask; + + if (!ocelot_port) { + /* Unused ports can't send anywhere */ + mask = 0; + } else if (ocelot_port->is_dsa_8021q_cpu) { + /* The DSA tag_8021q CPU ports need to be able to + * forward packets to all other ports except for + * themselves + */ + mask = GENMASK(ocelot->num_phys_ports - 1, 0); + mask &= ~cpu_fwd_mask; + } else if (ocelot->bridge_fwd_mask & BIT(port)) { + struct net_device *bond = ocelot_port->bond; + + mask = ocelot->bridge_fwd_mask & ~BIT(port); + if (bond) { + mask &= ~ocelot_get_bond_mask(ocelot, bond, + false); + } + } else { + /* Standalone ports forward only to DSA tag_8021q CPU + * ports (if those exist), or to the hardware CPU port + * module otherwise. + */ + mask = cpu_fwd_mask; + } + + ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); + } +} +EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); + void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; u32 port_cfg; - int p, i; if (!(BIT(port) & ocelot->bridge_mask)) return; @@ -909,7 +1275,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) ocelot->bridge_fwd_mask |= BIT(port); fallthrough; case BR_STATE_LEARNING: - port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; + if (ocelot_port->learn_ena) + port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; break; default: @@ -920,32 +1287,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port); - /* Apply FWD mask. The loop is needed to add/remove the current port as - * a source for the other ports. - */ - for (p = 0; p < ocelot->num_phys_ports; p++) { - if (ocelot->bridge_fwd_mask & BIT(p)) { - unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p); - - for (i = 0; i < ocelot->num_phys_ports; i++) { - unsigned long bond_mask = ocelot->lags[i]; - - if (!bond_mask) - continue; - - if (bond_mask & BIT(p)) { - mask &= ~bond_mask; - break; - } - } - - ocelot_write_rix(ocelot, mask, - ANA_PGID_PGID, PGID_SRC + p); - } else { - ocelot_write_rix(ocelot, 0, - ANA_PGID_PGID, PGID_SRC + p); - } - } + ocelot_apply_bridge_fwd_mask(ocelot); } EXPORT_SYMBOL(ocelot_bridge_stp_state_set); @@ -1192,7 +1534,6 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, struct net_device *bridge) { struct ocelot_vlan pvid = {0}, native_vlan = {0}; - struct switchdev_trans trans; int ret; ocelot->bridge_mask &= ~BIT(port); @@ -1200,13 +1541,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; - trans.ph_prepare = true; - ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans); - if (ret) - return ret; - - trans.ph_prepare = false; - ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans); + ret = ocelot_port_vlan_filtering(ocelot, port, false); if (ret) return ret; @@ -1219,6 +1554,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_leave); static void ocelot_set_aggr_pgids(struct ocelot *ocelot) { + unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); int i, port, lag; /* Reset destination and aggregation PGIDS */ @@ -1229,22 +1565,40 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), ANA_PGID_PGID, i); - /* Now, set PGIDs for each LAG */ + /* The visited ports bitmask holds the list of ports offloading any + * bonding interface. Initially we mark all these ports as unvisited, + * then every time we visit a port in this bitmask, we know that it is + * the lowest numbered port, i.e. the one whose logical ID == physical + * port ID == LAG ID. So we mark as visited all further ports in the + * bitmask that are offloading the same bonding interface. This way, + * we set up the aggregation PGIDs only once per bonding interface. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->bond) + continue; + + visited &= ~BIT(port); + } + + /* Now, set PGIDs for each active LAG */ for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + struct net_device *bond = ocelot->ports[lag]->bond; + int num_active_ports = 0; unsigned long bond_mask; - int aggr_count = 0; u8 aggr_idx[16]; - bond_mask = ocelot->lags[lag]; - if (!bond_mask) + if (!bond || (visited & BIT(lag))) continue; + bond_mask = ocelot_get_bond_mask(ocelot, bond, true); + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { // Destination mask ocelot_write_rix(ocelot, bond_mask, ANA_PGID_PGID, port); - aggr_idx[aggr_count] = port; - aggr_count++; + aggr_idx[num_active_ports++] = port; } for_each_aggr_pgid(ocelot, i) { @@ -1252,63 +1606,74 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); ac &= ~bond_mask; - ac |= BIT(aggr_idx[i % aggr_count]); + /* Don't do division by zero if there was no active + * port. Just make all aggregation codes zero. + */ + if (num_active_ports) + ac |= BIT(aggr_idx[i % num_active_ports]); ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); } - } -} - -static void ocelot_setup_lag(struct ocelot *ocelot, int lag) -{ - unsigned long bond_mask = ocelot->lags[lag]; - unsigned int p; - for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) { - u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + /* Mark all ports in the same LAG as visited to avoid applying + * the same config again. + */ + for (port = lag; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; - port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + if (!ocelot_port) + continue; - /* Use lag port as logical port for port i */ - ocelot_write_gix(ocelot, port_cfg | - ANA_PORT_PORT_CFG_PORTID_VAL(lag), - ANA_PORT_PORT_CFG, p); + if (ocelot_port->bond == bond) + visited |= BIT(port); + } } } -int ocelot_port_lag_join(struct ocelot *ocelot, int port, - struct net_device *bond) +/* When offloading a bonding interface, the switch ports configured under the + * same bond must have the same logical port ID, equal to the physical port ID + * of the lowest numbered physical port in that bond. Otherwise, in standalone/ + * bridged mode, each port has a logical port ID equal to its physical port ID. + */ +static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) { - struct net_device *ndev; - u32 bond_mask = 0; - int lag, lp; + int port; - rcu_read_lock(); - for_each_netdev_in_bond_rcu(bond, ndev) { - struct ocelot_port_private *priv = netdev_priv(ndev); + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct net_device *bond; - bond_mask |= BIT(priv->chip_port); - } - rcu_read_unlock(); + if (!ocelot_port) + continue; - lp = __ffs(bond_mask); + bond = ocelot_port->bond; + if (bond) { + int lag = __ffs(ocelot_get_bond_mask(ocelot, bond, + false)); - /* If the new port is the lowest one, use it as the logical port from - * now on - */ - if (port == lp) { - lag = port; - ocelot->lags[port] = bond_mask; - bond_mask &= ~BIT(port); - if (bond_mask) { - lp = __ffs(bond_mask); - ocelot->lags[lp] = 0; + ocelot_rmw_gix(ocelot, + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG_PORTID_VAL_M, + ANA_PORT_PORT_CFG, port); + } else { + ocelot_rmw_gix(ocelot, + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG_PORTID_VAL_M, + ANA_PORT_PORT_CFG, port); } - } else { - lag = lp; - ocelot->lags[lp] |= BIT(port); } +} + +int ocelot_port_lag_join(struct ocelot *ocelot, int port, + struct net_device *bond, + struct netdev_lag_upper_info *info) +{ + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return -EOPNOTSUPP; - ocelot_setup_lag(ocelot, lag); + ocelot->ports[port]->bond = bond; + + ocelot_setup_logical_port_ids(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot); ocelot_set_aggr_pgids(ocelot); return 0; @@ -1318,33 +1683,24 @@ EXPORT_SYMBOL(ocelot_port_lag_join); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond) { - u32 port_cfg; - int i; - - /* Remove port from any lag */ - for (i = 0; i < ocelot->num_phys_ports; i++) - ocelot->lags[i] &= ~BIT(port); + ocelot->ports[port]->bond = NULL; - /* if it was the logical port of the lag, move the lag config to the - * next port - */ - if (ocelot->lags[port]) { - int n = __ffs(ocelot->lags[port]); + ocelot_setup_logical_port_ids(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_set_aggr_pgids(ocelot); +} +EXPORT_SYMBOL(ocelot_port_lag_leave); - ocelot->lags[n] = ocelot->lags[port]; - ocelot->lags[port] = 0; +void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; - ocelot_setup_lag(ocelot, n); - } - - port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port); - port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; - ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port), - ANA_PORT_PORT_CFG, port); + ocelot_port->lag_tx_active = lag_tx_active; + /* Rebalance the LAGs */ ocelot_set_aggr_pgids(ocelot); } -EXPORT_SYMBOL(ocelot_port_lag_leave); +EXPORT_SYMBOL(ocelot_port_lag_change); /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. @@ -1362,9 +1718,9 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) if (port == ocelot->npi) { maxlen += OCELOT_TAG_LEN; - if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT) + if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) maxlen += OCELOT_SHORT_PREFIX_LEN; - else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG) + else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) maxlen += OCELOT_LONG_PREFIX_LEN; } @@ -1379,7 +1735,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) pause_stop); /* Tail dropping watermarks */ - atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) / + atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / OCELOT_BUFFER_CELL_SZ; atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ; ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); @@ -1394,9 +1750,9 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port) if (port == ocelot->npi) { max_mtu -= OCELOT_TAG_LEN; - if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT) + if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) max_mtu -= OCELOT_SHORT_PREFIX_LEN; - else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG) + else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) max_mtu -= OCELOT_LONG_PREFIX_LEN; } @@ -1404,6 +1760,86 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port) } EXPORT_SYMBOL(ocelot_get_max_mtu); +static void ocelot_port_set_learning(struct ocelot *ocelot, int port, + bool enabled) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 val = 0; + + if (enabled) + val = ANA_PORT_PORT_CFG_LEARN_ENA; + + ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA, + ANA_PORT_PORT_CFG, port); + + ocelot_port->learn_ena = enabled; +} + +static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC); +} + +static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); +} + +static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC); +} + +int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); + +void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_LEARNING) + ocelot_port_set_learning(ocelot, port, + !!(flags.val & BR_LEARNING)); + + if (flags.mask & BR_FLOOD) + ocelot_port_set_ucast_flood(ocelot, port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_MCAST_FLOOD) + ocelot_port_set_mcast_flood(ocelot, port, + !!(flags.val & BR_MCAST_FLOOD)); + + if (flags.mask & BR_BCAST_FLOOD) + ocelot_port_set_bcast_flood(ocelot, port, + !!(flags.val & BR_BCAST_FLOOD)); +} +EXPORT_SYMBOL(ocelot_port_bridge_flags); + void ocelot_init_port(struct ocelot *ocelot, int port) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -1453,6 +1889,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port) REW_PORT_VLAN_CFG_PORT_TPID_M, REW_PORT_VLAN_CFG, port); + /* Disable source address learning for standalone mode */ + ocelot_port_set_learning(ocelot, port, false); + /* Enable vcap lookups */ ocelot_vcap_enable(ocelot, port); } @@ -1481,9 +1920,9 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot) ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* CPU port Injection/Extraction configuration */ ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, - ocelot->xtr_prefix); + OCELOT_TAG_PREFIX_NONE); ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, - ocelot->inj_prefix); + OCELOT_TAG_PREFIX_NONE); /* Configure the CPU port to be VLAN aware */ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | @@ -1492,6 +1931,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot) ANA_PORT_VLAN_CFG, cpu); } +static void ocelot_detect_features(struct ocelot *ocelot) +{ + int mmgt, eq_ctrl; + + /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds + * the number of 240-byte free memory words (aka 4-cell chunks) and not + * 192 bytes as the documentation incorrectly says. + */ + mmgt = ocelot_read(ocelot, SYS_MMGT); + ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); + + eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL); + ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); +} + int ocelot_init(struct ocelot *ocelot) { char queue_name[32]; @@ -1506,11 +1960,6 @@ int ocelot_init(struct ocelot *ocelot) } } - ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, - sizeof(u32), GFP_KERNEL); - if (!ocelot->lags) - return -ENOMEM; - ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); @@ -1534,6 +1983,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); INIT_LIST_HEAD(&ocelot->pgids); + ocelot_detect_features(ocelot); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); ocelot_vcap_init(ocelot); @@ -1553,7 +2003,10 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | ANA_AGGR_CFG_AC_DMAC_ENA | ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | - ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG); + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA | + ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA | + ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, + ANA_AGGR_CFG); /* Set MAC age time to default value. The entry is aged after * 2*AGE_PERIOD @@ -1572,7 +2025,7 @@ int ocelot_init(struct ocelot *ocelot) /* Setup flooding PGIDs */ for (i = 0; i < ocelot->num_flooding_pgids; i++) ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | - ANA_FLOODING_FLD_BROADCAST(PGID_MC) | + ANA_FLOODING_FLD_BROADCAST(PGID_BC) | ANA_FLOODING_FLD_UNICAST(PGID_UC), ANA_FLOODING, i); ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | @@ -1593,15 +2046,18 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); } - /* Allow broadcast MAC frames. */ for_each_nonreserved_multicast_dest_pgid(ocelot, i) { u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); } - ocelot_write_rix(ocelot, - ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), - ANA_PGID_PGID, PGID_MC); + /* Allow broadcast and unknown L2 multicast to the CPU. */ + ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID, PGID_BC); ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 291d39d49c4e..db6b1a4c3926 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -32,15 +32,6 @@ #define OCELOT_PTP_QUEUE_SZ 128 -struct frame_info { - u32 len; - u16 port; - u16 vid; - u8 tag_type; - u16 rew_op; - u32 timestamp; /* rew_val */ -}; - struct ocelot_port_tc { bool block_shared; unsigned long offload_cnt; @@ -109,10 +100,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, unsigned int vid, enum macaccess_entry_type type); int ocelot_mact_forget(struct ocelot *ocelot, const unsigned char mac[ETH_ALEN], unsigned int vid); -int ocelot_port_lag_join(struct ocelot *ocelot, int port, - struct net_device *bond); -void ocelot_port_lag_leave(struct ocelot *ocelot, int port, - struct net_device *bond); struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port); int ocelot_netdev_to_port(struct net_device *dev); @@ -121,13 +108,16 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy); - -void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, - enum ocelot_tag_prefix injection, - enum ocelot_tag_prefix extraction); +void ocelot_release_port(struct ocelot_port *ocelot_port); +int ocelot_devlink_init(struct ocelot *ocelot); +void ocelot_devlink_teardown(struct ocelot *ocelot); +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour); +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port); extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; +extern const struct devlink_ops ocelot_devlink_ops; #endif diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c new file mode 100644 index 000000000000..edafbd37d12c --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_devlink.c @@ -0,0 +1,885 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright 2020-2021 NXP Semiconductors + */ +#include <net/devlink.h> +#include "ocelot.h" + +/* The queue system tracks four resource consumptions: + * Resource 0: Memory tracked per source port + * Resource 1: Frame references tracked per source port + * Resource 2: Memory tracked per destination port + * Resource 3: Frame references tracked per destination port + */ +#define OCELOT_RESOURCE_SZ 256 +#define OCELOT_NUM_RESOURCES 4 + +#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ) +#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ) +#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ) +#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ) + +/* For each resource type there are 4 types of watermarks: + * Q_RSRV: reservation per QoS class per port + * PRIO_SHR: sharing watermark per QoS class across all ports + * P_RSRV: reservation per port + * COL_SHR: sharing watermark per color (drop precedence) across all ports + */ +#define xxx_Q_RSRV_x 0 +#define xxx_PRIO_SHR_x 216 +#define xxx_P_RSRV_x 224 +#define xxx_COL_SHR_x 254 + +/* Reservation Watermarks + * ---------------------- + * + * For setting up the reserved areas, egress watermarks exist per port and per + * QoS class for both ingress and egress. + */ + +/* Amount of packet buffer + * | per QoS class + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * BUF_Q_RSRV_E + */ +#define BUF_Q_RSRV_E(port, prio) \ + (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * BUF_P_RSRV_E + */ +#define BUF_P_RSRV_E(port) \ + (BUF_xxxx_E + xxx_P_RSRV_x + (port)) + +/* Amount of packet buffer + * | per QoS class + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * BUF_Q_RSRV_I + */ +#define BUF_Q_RSRV_I(port, prio) \ + (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * BUF_P_RSRV_I + */ +#define BUF_P_RSRV_I(port) \ + (BUF_xxxx_I + xxx_P_RSRV_x + (port)) + +/* Amount of frame references + * | per QoS class + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * REF_Q_RSRV_E + */ +#define REF_Q_RSRV_E(port, prio) \ + (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * REF_P_RSRV_E + */ +#define REF_P_RSRV_E(port) \ + (REF_xxxx_E + xxx_P_RSRV_x + (port)) + +/* Amount of frame references + * | per QoS class + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * REF_Q_RSRV_I + */ +#define REF_Q_RSRV_I(port, prio) \ + (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * REF_P_RSRV_I + */ +#define REF_P_RSRV_I(port) \ + (REF_xxxx_I + xxx_P_RSRV_x + (port)) + +/* Sharing Watermarks + * ------------------ + * + * The shared memory area is shared between all ports. + */ + +/* Amount of buffer + * | per QoS class + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * V V v v + * BUF_PRIO_SHR_E + */ +#define BUF_PRIO_SHR_E(prio) \ + (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio)) + +/* Amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * V V v v + * BUF_COL_SHR_E + */ +#define BUF_COL_SHR_E(dp) \ + (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of buffer + * | per QoS class + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * V V v v + * BUF_PRIO_SHR_I + */ +#define BUF_PRIO_SHR_I(prio) \ + (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio)) + +/* Amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * V V v v + * BUF_COL_SHR_I + */ +#define BUF_COL_SHR_I(dp) \ + (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of frame references + * | per QoS class + * | | from the shared area + * | | | for egress traffic + * | | | | + * V V v v + * REF_PRIO_SHR_E + */ +#define REF_PRIO_SHR_E(prio) \ + (REF_xxxx_E + xxx_PRIO_SHR_x + (prio)) + +/* Amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for egress traffic + * | | | | + * V V v v + * REF_COL_SHR_E + */ +#define REF_COL_SHR_E(dp) \ + (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of frame references + * | per QoS class + * | | from the shared area + * | | | for ingress traffic + * | | | | + * V V v v + * REF_PRIO_SHR_I + */ +#define REF_PRIO_SHR_I(prio) \ + (REF_xxxx_I + xxx_PRIO_SHR_x + (prio)) + +/* Amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for ingress traffic + * | | | | + * V V v v + * REF_COL_SHR_I + */ +#define REF_COL_SHR_I(dp) \ + (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp))) + +static u32 ocelot_wm_read(struct ocelot *ocelot, int index) +{ + int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index); + + return ocelot->ops->wm_dec(wm); +} + +static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val) +{ + u32 wm = ocelot->ops->wm_enc(val); + + ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index); +} + +static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse, + u32 *maxuse) +{ + int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index); + + return ocelot->ops->wm_stat(res_stat, inuse, maxuse); +} + +/* The hardware comes out of reset with strange defaults: the sum of all + * reservations for frame memory is larger than the total buffer size. + * One has to wonder how can the reservation watermarks still guarantee + * anything under congestion. + * Bring some sense into the hardware by changing the defaults to disable all + * reservations and rely only on the sharing watermark for frames with drop + * precedence 0. The user can still explicitly request reservations per port + * and per port-tc through devlink-sb. + */ +static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot, + int port) +{ + int prio; + + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0); + ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0); + ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0); + ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0); + } + + ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0); + ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0); + ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0); + ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0); +} + +/* We want the sharing watermarks to consume all nonreserved resources, for + * efficient resource utilization (a single traffic flow should be able to use + * up the entire buffer space and frame resources as long as there's no + * interference). + * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2 + * per color (drop precedence). + * The trouble with configuring these sharing watermarks is that: + * (1) There's a risk that we overcommit the resources if we configure + * (a) all 8 per-TC sharing watermarks to the max + * (b) all 2 per-color sharing watermarks to the max + * (2) There's a risk that we undercommit the resources if we configure + * (a) all 8 per-TC sharing watermarks to "max / 8" + * (b) all 2 per-color sharing watermarks to "max / 2" + * So for Linux, let's just disable the sharing watermarks per traffic class + * (setting them to 0 will make them always exceeded), and rely only on the + * sharing watermark for drop priority 0. So frames with drop priority set to 1 + * by QoS classification or policing will still be allowed, but only as long as + * the port and port-TC reservations are not exceeded. + */ +static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot) +{ + int prio; + + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0); + ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0); + ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0); + ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0); + } +} + +static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i, + u32 *buf_rsrv_e) +{ + int port, prio; + + *buf_rsrv_i = 0; + *buf_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + *buf_rsrv_i += ocelot_wm_read(ocelot, + BUF_Q_RSRV_I(port, prio)); + *buf_rsrv_e += ocelot_wm_read(ocelot, + BUF_Q_RSRV_E(port, prio)); + } + + *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port)); + *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port)); + } + + *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ; + *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ; +} + +static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i, + u32 *ref_rsrv_e) +{ + int port, prio; + + *ref_rsrv_i = 0; + *ref_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + *ref_rsrv_i += ocelot_wm_read(ocelot, + REF_Q_RSRV_I(port, prio)); + *ref_rsrv_e += ocelot_wm_read(ocelot, + REF_Q_RSRV_E(port, prio)); + } + + *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port)); + *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port)); + } +} + +/* Calculate all reservations, then set up the sharing watermark for DP=0 to + * consume the remaining resources up to the pool's configured size. + */ +static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + u32 buf_shr_i, buf_shr_e; + u32 ref_shr_i, ref_shr_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] - + buf_rsrv_i; + buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] - + buf_rsrv_e; + ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] - + ref_rsrv_i; + ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] - + ref_rsrv_e; + + buf_shr_i /= OCELOT_BUFFER_CELL_SZ; + buf_shr_e /= OCELOT_BUFFER_CELL_SZ; + + ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i); + ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e); + ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e); + ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i); + ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0); + ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0); + ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0); + ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0); +} + +/* Ensure that all reservations can be enforced */ +static int ocelot_watermark_validate(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress frame reservations exceed pool size"); + return -ERANGE; + } + if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) { + NL_SET_ERR_MSG_MOD(extack, + "Egress frame reservations exceed pool size"); + return -ERANGE; + } + if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress reference reservations exceed pool size"); + return -ERANGE; + } + if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) { + NL_SET_ERR_MSG_MOD(extack, + "Egress reference reservations exceed pool size"); + return -ERANGE; + } + + return 0; +} + +/* The hardware works like this: + * + * Frame forwarding decision taken + * | + * v + * +--------------------+--------------------+--------------------+ + * | | | | + * v v v v + * Ingress memory Egress memory Ingress frame Egress frame + * check check reference check reference check + * | | | | + * v v v v + * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok + *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok| + * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok| + * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok| + * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v v v v v v v v + * fail success fail success fail success fail success + * | | | | | | | | + * v v v v v v v v + * +-----+----+ +-----+----+ +-----+----+ +-----+-----+ + * | | | | + * +-------> OR <-------+ +-------> OR <-------+ + * | | + * v v + * +----------------> AND <-----------------+ + * | + * v + * FIFO drop / accept + * + * We are modeling each of the 4 parallel lookups as a devlink-sb pool. + * At least one (ingress or egress) memory pool and one (ingress or egress) + * frame reference pool need to have resources for frame acceptance to succeed. + * + * The following watermarks are controlled explicitly through devlink-sb: + * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E + * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E + * The following watermarks are controlled implicitly through devlink-sb: + * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E + * The following watermarks are unused and disabled: + * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E + * + * This function overrides the hardware defaults with more sane ones (no + * reservations by default, let sharing use all resources) and disables the + * unused watermarks. + */ +static void ocelot_watermark_init(struct ocelot *ocelot) +{ + int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0); + int port; + + ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE); + + for (port = 0; port <= ocelot->num_phys_ports; port++) + ocelot_disable_reservation_watermarks(ocelot, port); + + ocelot_disable_tc_sharing_watermarks(ocelot); + ocelot_setup_sharing_watermarks(ocelot); +} + +/* Pool size and type are fixed up at runtime. Keeping this structure to + * look up the cell size multipliers. + */ +static const struct devlink_sb_pool_info ocelot_sb_pool[] = { + [OCELOT_SB_BUF] = { + .cell_size = OCELOT_BUFFER_CELL_SZ, + .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC, + }, + [OCELOT_SB_REF] = { + .cell_size = 1, + .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC, + }, +}; + +/* Returns the pool size configured through ocelot_sb_pool_set */ +int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + if (sb_index >= OCELOT_SB_NUM) + return -ENODEV; + if (pool_index >= OCELOT_SB_POOL_NUM) + return -ENODEV; + + *pool_info = ocelot_sb_pool[sb_index]; + pool_info->size = ocelot->pool_size[sb_index][pool_index]; + if (pool_index) + pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS; + else + pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_pool_get); + +/* The pool size received here configures the total amount of resources used on + * ingress (or on egress, depending upon the pool index). The pool size, minus + * the values for the port and port-tc reservations, is written into the + * COL_SHR(dp=0) sharing watermark. + */ +int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + u32 old_pool_size; + int err; + + if (sb_index >= OCELOT_SB_NUM) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid sb, use 0 for buffers and 1 for frame references"); + return -ENODEV; + } + if (pool_index >= OCELOT_SB_POOL_NUM) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid pool, use 0 for ingress and 1 for egress"); + return -ENODEV; + } + if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) { + NL_SET_ERR_MSG_MOD(extack, + "Only static threshold supported"); + return -EOPNOTSUPP; + } + + old_pool_size = ocelot->pool_size[sb_index][pool_index]; + ocelot->pool_size[sb_index][pool_index] = size; + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot->pool_size[sb_index][pool_index] = old_pool_size; + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_pool_set); + +/* This retrieves the configuration made with ocelot_sb_port_pool_set */ +int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + return -ENODEV; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_port_pool_get); + +/* This configures the P_RSRV per-port reserved resource watermark */ +int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer"); + return -ENODEV; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_port_pool_set); + +/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */ +int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + return -ENODEV; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + *p_pool_index = 0; + else + *p_pool_index = 1; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get); + +/* This configures the Q_RSRV per-port-tc reserved resource watermark */ +int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + /* Paranoid check? */ + if (pool_index == OCELOT_SB_POOL_ING && + pool_type != DEVLINK_SB_POOL_TYPE_INGRESS) + return -EINVAL; + if (pool_index == OCELOT_SB_POOL_EGR && + pool_type != DEVLINK_SB_POOL_TYPE_EGRESS) + return -EINVAL; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer"); + return -ENODEV; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set); + +/* The hardware does not support atomic snapshots, we'll read out the + * occupancy registers individually and have this as just a stub. + */ +int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index) +{ + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_snapshot); + +/* The watermark occupancy registers are cleared upon read, + * so let's read them. + */ +int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index) +{ + u32 inuse, maxuse; + int port, prio; + + switch (sb_index) { + case OCELOT_SB_BUF: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, BUF_P_RSRV_I(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, BUF_P_RSRV_E(port), + &inuse, &maxuse); + } + break; + case OCELOT_SB_REF: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, REF_P_RSRV_I(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, REF_P_RSRV_E(port), + &inuse, &maxuse); + } + break; + default: + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_max_clear); + +/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */ +int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + return -ENODEV; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get); + +/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */ +int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + return -ENODEV; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get); + +int ocelot_devlink_sb_register(struct ocelot *ocelot) +{ + int err; + + err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF, + ocelot->packet_buffer_size, 1, 1, + OCELOT_NUM_TC, OCELOT_NUM_TC); + if (err) + return err; + + err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF, + ocelot->num_frame_refs, 1, 1, + OCELOT_NUM_TC, OCELOT_NUM_TC); + if (err) { + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF); + return err; + } + + ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size; + ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size; + ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs; + ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs; + + ocelot_watermark_init(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_devlink_sb_register); + +void ocelot_devlink_sb_unregister(struct ocelot *ocelot) +{ + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF); + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF); +} +EXPORT_SYMBOL(ocelot_devlink_sb_unregister); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 729495a1a77e..c3ac026f6aea 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -622,7 +622,8 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress, int ret; filter->prio = f->common.prio; - filter->id = f->cookie; + filter->id.cookie = f->cookie; + filter->id.tc_offload = true; ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter); if (ret) @@ -717,7 +718,7 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, block = &ocelot->block[block_id]; - filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie); + filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); if (!filter) return 0; @@ -741,7 +742,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, block = &ocelot->block[block_id]; - filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie); + filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY) return 0; diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index 0acb45948418..ea4e83410fe4 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) } EXPORT_SYMBOL(ocelot_port_writel); +void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) +{ + u32 cur = ocelot_port_readl(port, reg); + + ocelot_port_writel(port, (cur & (~mask)) | val, reg); +} +EXPORT_SYMBOL(ocelot_port_rmwl); + u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, u32 reg, u32 offset) { diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c new file mode 100644 index 000000000000..683da320bfd8 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * + * This contains glue logic between the switchdev driver operations and the + * mscc_ocelot_switch_lib. + * + * Copyright (c) 2017, 2019 Microsemi Corporation + * Copyright 2020-2021 NXP Semiconductors + */ + +#include <linux/if_bridge.h> +#include <linux/mrp_bridge.h> +#include <soc/mscc/ocelot_vcap.h> +#include <uapi/linux/mrp_bridge.h> +#include "ocelot.h" +#include "ocelot_vcap.h" + +static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *filter; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port, + false); + if (!filter) + return 0; + + return ocelot_vcap_filter_del(ocelot, filter); +} + +int ocelot_mrp_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct net_device *dev; + + if (!ocelot_port) + return -EOPNOTSUPP; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + dev = priv->dev; + + if (mrp->p_port != dev && mrp->s_port != dev) + return 0; + + if (ocelot->mrp_ring_id != 0 && + ocelot->mrp_s_port && + ocelot->mrp_p_port) + return -EINVAL; + + if (mrp->p_port == dev) + ocelot->mrp_p_port = dev; + + if (mrp->s_port == dev) + ocelot->mrp_s_port = dev; + + ocelot->mrp_ring_id = mrp->ring_id; + + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_add); + +int ocelot_mrp_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct net_device *dev; + + if (!ocelot_port) + return -EOPNOTSUPP; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + dev = priv->dev; + + if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + return 0; + + if (ocelot->mrp_ring_id == 0 && + !ocelot->mrp_s_port && + !ocelot->mrp_p_port) + return -EINVAL; + + if (ocelot_mrp_del_vcap(ocelot, priv->chip_port)) + return -EINVAL; + + if (ocelot->mrp_p_port == dev) + ocelot->mrp_p_port = NULL; + + if (ocelot->mrp_s_port == dev) + ocelot->mrp_s_port = NULL; + + ocelot->mrp_ring_id = 0; + + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_del); + +int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_vcap_filter *filter; + struct ocelot_port_private *priv; + struct net_device *dev; + int err; + + if (!ocelot_port) + return -EOPNOTSUPP; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + dev = priv->dev; + + if (ocelot->mrp_ring_id != mrp->ring_id) + return -EINVAL; + + if (!mrp->sw_backup) + return -EOPNOTSUPP; + + if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + return 0; + + filter = kzalloc(sizeof(*filter), GFP_ATOMIC); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = 1; + filter->id.cookie = priv->chip_port; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(priv->chip_port); + *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP); + *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); + filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + filter->action.port_mask = 0x0; + filter->action.cpu_copy_ena = true; + filter->action.cpu_qu_num = OCELOT_MRP_CPUQ; + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} +EXPORT_SYMBOL(ocelot_mrp_add_ring_role); + +int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct net_device *dev; + + if (!ocelot_port) + return -EOPNOTSUPP; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + dev = priv->dev; + + if (ocelot->mrp_ring_id != mrp->ring_id) + return -EINVAL; + + if (!mrp->sw_backup) + return -EOPNOTSUPP; + + if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev) + return 0; + + return ocelot_mrp_del_vcap(ocelot, priv->chip_port); +} +EXPORT_SYMBOL(ocelot_mrp_del_ring_role); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 42230f92ca9c..12cb6867a2d0 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1,13 +1,191 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Microsemi Ocelot Switch driver * + * This contains glue logic between the switchdev driver operations and the + * mscc_ocelot_switch_lib. + * * Copyright (c) 2017, 2019 Microsemi Corporation + * Copyright 2020-2021 NXP Semiconductors */ #include <linux/if_bridge.h> +#include <net/pkt_cls.h> #include "ocelot.h" #include "ocelot_vcap.h" +static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp) +{ + return devlink_priv(dlp->devlink); +} + +static int devlink_port_to_port(struct devlink_port *dlp) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + + return dlp - ocelot->devlink_ports; +} + +static int ocelot_devlink_sb_pool_get(struct devlink *dl, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); +} + +static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, + threshold_type, extack); +} + +static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, + p_threshold); +} + +static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, + threshold, extack); +} + +static int +ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_pool_index, + p_threshold); +} + +static int +ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, + pool_type, pool_index, threshold, + extack); +} + +static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_snapshot(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_max_clear(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, + u16 pool_index, u32 *p_cur, + u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, + p_cur, p_max); +} + +static int +ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, + tc_index, pool_type, + p_cur, p_max); +} + +const struct devlink_ops ocelot_devlink_ops = { + .sb_pool_get = ocelot_devlink_sb_pool_get, + .sb_pool_set = ocelot_devlink_sb_pool_set, + .sb_port_pool_get = ocelot_devlink_sb_port_pool_get, + .sb_port_pool_set = ocelot_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot, + .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get, +}; + +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + int id_len = sizeof(ocelot->base_mac); + struct devlink *dl = ocelot->devlink; + struct devlink_port_attrs attrs = {}; + + memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len); + attrs.switch_id.id_len = id_len; + attrs.phys.port_number = port; + attrs.flavour = flavour; + + devlink_port_attrs_set(dlp, &attrs); + + return devlink_port_register(dl, dlp, port); +} + +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + + devlink_port_unregister(dlp); +} + +static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return &ocelot->devlink_ports[port]; +} + int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, struct flow_cls_offload *f, bool ingress) @@ -310,53 +488,20 @@ static int ocelot_port_stop(struct net_device *dev) return 0; } -/* Generate the IFH for frame injection - * - * The IFH is a 128bit-value - * bit 127: bypass the analyzer processing - * bit 56-67: destination mask - * bit 28-29: pop_cnt: 3 disables all rewriting of the frame - * bit 20-27: cpu extraction queue mask - * bit 16: tag type 0: C-tag, 1: S-tag - * bit 0-11: VID - */ -static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) -{ - ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21); - ifh[1] = (0xf00 & info->port) >> 8; - ifh[2] = (0xff & info->port) << 24; - ifh[3] = (info->tag_type << 16) | info->vid; - - return 0; -} - -static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); - struct skb_shared_info *shinfo = skb_shinfo(skb); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - u32 val, ifh[OCELOT_TAG_LEN / 4]; - struct frame_info info = {}; - u8 grp = 0; /* Send everything on CPU group 0 */ - unsigned int i, count, last; int port = priv->chip_port; + u32 rew_op = 0; - val = ocelot_read(ocelot, QS_INJ_STATUS); - if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || - (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))) + if (!ocelot_can_inject(ocelot, 0)) return NETDEV_TX_BUSY; - ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | - QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); - - info.port = BIT(port); - info.tag_type = IFH_TAG_TYPE_C; - info.vid = skb_vlan_tag_get(skb); - /* Check if timestamping is needed */ - if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) { - info.rew_op = ocelot_port->ptp_cmd; + if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + rew_op = ocelot_port->ptp_cmd; if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { struct sk_buff *clone; @@ -369,45 +514,11 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) ocelot_port_add_txtstamp_skb(ocelot, port, clone); - info.rew_op |= clone->cb[0] << 3; + rew_op |= clone->cb[0] << 3; } } - if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { - info.rew_op = ocelot_port->ptp_cmd; - if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) - info.rew_op |= skb->cb[0] << 3; - } - - ocelot_gen_ifh(ifh, &info); - - for (i = 0; i < OCELOT_TAG_LEN / 4; i++) - ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]), - QS_INJ_WR, grp); - - count = (skb->len + 3) / 4; - last = skb->len % 4; - for (i = 0; i < count; i++) - ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); - - /* Add padding */ - while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { - ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); - i++; - } - - /* Indicate EOF and valid bytes in last word */ - ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | - QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | - QS_INJ_CTRL_EOF, - QS_INJ_CTRL, grp); - - /* Add dummy CRC */ - ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); - skb_tx_timestamp(skb); - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); kfree_skb(skb); @@ -457,7 +568,7 @@ static void ocelot_mact_work(struct work_struct *work) break; default: break; - }; + } kfree(w); } @@ -525,20 +636,6 @@ static void ocelot_set_rx_mode(struct net_device *dev) __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); } -static int ocelot_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - int port = priv->chip_port; - int ret; - - ret = snprintf(buf, len, "p%d", port); - if (ret >= len) - return -EINVAL; - - return 0; -} - static int ocelot_port_set_mac_address(struct net_device *dev, void *p) { struct ocelot_port_private *priv = netdev_priv(dev); @@ -689,18 +786,6 @@ static int ocelot_set_features(struct net_device *dev, return 0; } -static int ocelot_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - - ppid->id_len = sizeof(ocelot->base_mac); - memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); - - return 0; -} - static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ocelot_port_private *priv = netdev_priv(dev); @@ -727,7 +812,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_stop = ocelot_port_stop, .ndo_start_xmit = ocelot_port_xmit, .ndo_set_rx_mode = ocelot_set_rx_mode, - .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, .ndo_set_mac_address = ocelot_port_set_mac_address, .ndo_get_stats64 = ocelot_get_stats64, .ndo_fdb_add = ocelot_port_fdb_add, @@ -736,9 +820,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, - .ndo_get_port_parent_id = ocelot_get_port_parent_id, .ndo_setup_tc = ocelot_setup_tc, .ndo_do_ioctl = ocelot_ioctl, + .ndo_get_devlink_port = ocelot_get_devlink_port, }; struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port) @@ -825,12 +909,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = { }; static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, - struct switchdev_trans *trans, u8 state) { - if (switchdev_trans_ph_prepare(trans)) - return; - ocelot_bridge_stp_state_set(ocelot, port, state); } @@ -859,7 +939,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc) static int ocelot_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, - struct switchdev_trans *trans) + struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; @@ -868,19 +948,24 @@ static int ocelot_port_attr_set(struct net_device *dev, switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - ocelot_port_attr_stp_state_set(ocelot, port, trans, - attr->u.stp_state); + ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - ocelot_port_vlan_filtering(ocelot, port, - attr->u.vlan_filtering, trans); + ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = ocelot_port_pre_bridge_flags(ocelot, port, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags); + break; default: err = -EOPNOTSUPP; break; @@ -890,73 +975,89 @@ static int ocelot_port_attr_set(struct net_device *dev, } static int ocelot_port_obj_add_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + const struct switchdev_obj_port_vlan *vlan) { + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; int ret; - u16 vid; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - - if (switchdev_trans_ph_prepare(trans)) - ret = ocelot_vlan_vid_prepare(dev, vid, pvid, - untagged); - else - ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged); - if (ret) - return ret; - } - return 0; + ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged); + if (ret) + return ret; + + return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged); } -static int ocelot_port_vlan_del_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan) +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) { - int ret; - u16 vid; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - ret = ocelot_vlan_vid_del(dev, vid); + return ocelot_port_mdb_add(ocelot, port, mdb); +} - if (ret) - return ret; - } +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; - return 0; + return ocelot_port_mdb_del(ocelot, port, mdb); } -static int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) +static int ocelot_port_obj_mrp_add(struct net_device *dev, + const struct switchdev_obj_mrp *mrp) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - if (switchdev_trans_ph_prepare(trans)) - return 0; + return ocelot_mrp_add(ocelot, port, mrp); +} - return ocelot_port_mdb_add(ocelot, port, mdb); +static int ocelot_port_obj_mrp_del(struct net_device *dev, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_mrp_del(ocelot, port, mrp); } -static int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb) +static int +ocelot_port_obj_mrp_add_ring_role(struct net_device *dev, + const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; int port = priv->chip_port; - return ocelot_port_mdb_del(ocelot, port, mdb); + return ocelot_mrp_add_ring_role(ocelot, port, mrp); +} + +static int +ocelot_port_obj_mrp_del_ring_role(struct net_device *dev, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_mrp_del_ring_role(ocelot, port, mrp); } static int ocelot_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans, struct netlink_ext_ack *extack) { int ret = 0; @@ -964,12 +1065,17 @@ static int ocelot_port_obj_add(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: ret = ocelot_port_obj_add_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj), - trans); + SWITCHDEV_OBJ_PORT_VLAN(obj)); break; case SWITCHDEV_OBJ_ID_PORT_MDB: - ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), - trans); + ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_MRP: + ret = ocelot_port_obj_mrp_add(dev, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + ret = ocelot_port_obj_mrp_add_ring_role(dev, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); break; default: return -EOPNOTSUPP; @@ -985,12 +1091,19 @@ static int ocelot_port_obj_del(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - ret = ocelot_port_vlan_del_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); + ret = ocelot_vlan_vid_del(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); break; + case SWITCHDEV_OBJ_ID_MRP: + ret = ocelot_port_obj_mrp_del(dev, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + ret = ocelot_port_obj_mrp_del_ring_role(dev, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; default: return -EOPNOTSUPP; } @@ -998,9 +1111,42 @@ static int ocelot_port_obj_del(struct net_device *dev, return ret; } -static int ocelot_netdevice_port_event(struct net_device *dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info) +static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags; + int err; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + + err = ocelot_port_bridge_join(ocelot, port, bridge); + if (err) + return err; + + ocelot_port_bridge_flags(ocelot, port, flags); + + return 0; +} + +static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags; + int err; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask & ~BR_LEARNING; + + err = ocelot_port_bridge_leave(ocelot, port, bridge); + + ocelot_port_bridge_flags(ocelot, port, flags); + + return err; +} + +static int ocelot_netdevice_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; @@ -1008,71 +1154,103 @@ static int ocelot_netdevice_port_event(struct net_device *dev, int port = priv->chip_port; int err = 0; - switch (event) { - case NETDEV_CHANGEUPPER: - if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_port_bridge_join(ocelot, port, - info->upper_dev); - } else { - err = ocelot_port_bridge_leave(ocelot, port, - info->upper_dev); - } - } - if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) - err = ocelot_port_lag_join(ocelot, port, + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) { + err = ocelot_netdevice_bridge_join(ocelot, port, info->upper_dev); - else - ocelot_port_lag_leave(ocelot, port, - info->upper_dev); + } else { + err = ocelot_netdevice_bridge_leave(ocelot, port, + info->upper_dev); + } + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) { + err = ocelot_port_lag_join(ocelot, port, + info->upper_dev, + info->upper_info); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_MOD(info->info.extack, + "Offloading not supported"); + err = 0; + } + } else { + ocelot_port_lag_leave(ocelot, port, + info->upper_dev); } - break; - default: - break; } - return err; + return notifier_from_errno(err); +} + +static int +ocelot_netdevice_lag_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, lower, iter) { + err = ocelot_netdevice_changeupper(lower, info); + if (err) + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_changelowerstate(struct net_device *dev, + struct netdev_lag_lower_state_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + bool is_active = info->link_up && info->tx_enabled; + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + if (!ocelot_port->bond) + return NOTIFY_DONE; + + if (ocelot_port->lag_tx_active == is_active) + return NOTIFY_DONE; + + ocelot_port_lag_change(ocelot, port, is_active); + + return NOTIFY_OK; } static int ocelot_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct netdev_notifier_changeupper_info *info = ptr; struct net_device *dev = netdev_notifier_info_to_dev(ptr); - int ret = 0; - if (event == NETDEV_PRECHANGEUPPER && - ocelot_netdevice_dev_check(dev) && - netif_is_lag_master(info->upper_dev)) { - struct netdev_lag_upper_info *lag_upper_info = info->upper_info; - struct netlink_ext_ack *extack; + switch (event) { + case NETDEV_CHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; - if (lag_upper_info && - lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { - extack = netdev_notifier_info_to_extack(&info->info); - NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + if (ocelot_netdevice_dev_check(dev)) + return ocelot_netdevice_changeupper(dev, info); - ret = -EINVAL; - goto notify; - } + if (netif_is_lag_master(dev)) + return ocelot_netdevice_lag_changeupper(dev, info); + + break; } + case NETDEV_CHANGELOWERSTATE: { + struct netdev_notifier_changelowerstate_info *info = ptr; - if (netif_is_lag_master(dev)) { - struct net_device *slave; - struct list_head *iter; + if (!ocelot_netdevice_dev_check(dev)) + break; - netdev_for_each_lower_dev(dev, slave, iter) { - ret = ocelot_netdevice_port_event(slave, event, info); - if (ret) - goto notify; - } - } else { - ret = ocelot_netdevice_port_event(dev, event, info); + return ocelot_netdevice_changelowerstate(dev, + info->lower_state_info); + } + default: + break; } -notify: - return notifier_from_errno(ret); + return NOTIFY_DONE; } struct notifier_block ocelot_netdevice_nb __read_mostly = { @@ -1171,7 +1349,19 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); free_netdev(dev); + ocelot->ports[port] = NULL; + return err; } - return err; + return 0; +} + +void ocelot_release_port(struct ocelot_port *ocelot_port) +{ + struct ocelot_port_private *priv = container_of(ocelot_port, + struct ocelot_port_private, + port); + + unregister_netdev(priv->dev); + free_netdev(priv->dev); } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index d8c778ee6f1b..37a232911395 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -959,6 +959,12 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, list_add(&filter->list, pos->prev); } +static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a, + const struct ocelot_vcap_filter *b) +{ + return !memcmp(&a->id, &b->id, sizeof(struct ocelot_vcap_id)); +} + static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, struct ocelot_vcap_filter *filter) { @@ -966,7 +972,7 @@ static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, int index = 0; list_for_each_entry(tmp, &block->rules, list) { - if (filter->id == tmp->id) + if (ocelot_vcap_filter_equal(filter, tmp)) return index; index++; } @@ -991,16 +997,19 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block, } struct ocelot_vcap_filter * -ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id) +ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie, + bool tc_offload) { struct ocelot_vcap_filter *filter; list_for_each_entry(filter, &block->rules, list) - if (filter->id == id) + if (filter->id.tc_offload == tc_offload && + filter->id.cookie == cookie) return filter; return NULL; } +EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id); /* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based * on destination and source MAC addresses, but only on higher-level protocol @@ -1150,6 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, vcap_entry_set(ocelot, index, filter); return 0; } +EXPORT_SYMBOL(ocelot_vcap_filter_add); static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, struct ocelot_vcap_block *block, @@ -1160,7 +1170,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, list_for_each_safe(pos, q, &block->rules) { tmp = list_entry(pos, struct ocelot_vcap_filter, list); - if (tmp->id == filter->id) { + if (ocelot_vcap_filter_equal(filter, tmp)) { if (tmp->block_id == VCAP_IS2 && tmp->action.police_ena) ocelot_vcap_policer_del(ocelot, block, @@ -1204,6 +1214,7 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, return 0; } +EXPORT_SYMBOL(ocelot_vcap_filter_del); int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *filter) diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h index 82fd10581a14..523611ccc48f 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.h +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -7,304 +7,13 @@ #define _MSCC_OCELOT_VCAP_H_ #include "ocelot.h" -#include "ocelot_police.h" -#include <net/sch_generic.h> -#include <net/pkt_cls.h> +#include <soc/mscc/ocelot_vcap.h> +#include <net/flow_offload.h> #define OCELOT_POLICER_DISCARD 0x17f -struct ocelot_ipv4 { - u8 addr[4]; -}; - -enum ocelot_vcap_bit { - OCELOT_VCAP_BIT_ANY, - OCELOT_VCAP_BIT_0, - OCELOT_VCAP_BIT_1 -}; - -struct ocelot_vcap_u8 { - u8 value[1]; - u8 mask[1]; -}; - -struct ocelot_vcap_u16 { - u8 value[2]; - u8 mask[2]; -}; - -struct ocelot_vcap_u24 { - u8 value[3]; - u8 mask[3]; -}; - -struct ocelot_vcap_u32 { - u8 value[4]; - u8 mask[4]; -}; - -struct ocelot_vcap_u40 { - u8 value[5]; - u8 mask[5]; -}; - -struct ocelot_vcap_u48 { - u8 value[6]; - u8 mask[6]; -}; - -struct ocelot_vcap_u64 { - u8 value[8]; - u8 mask[8]; -}; - -struct ocelot_vcap_u128 { - u8 value[16]; - u8 mask[16]; -}; - -struct ocelot_vcap_vid { - u16 value; - u16 mask; -}; - -struct ocelot_vcap_ipv4 { - struct ocelot_ipv4 value; - struct ocelot_ipv4 mask; -}; - -struct ocelot_vcap_udp_tcp { - u16 value; - u16 mask; -}; - -struct ocelot_vcap_port { - u8 value; - u8 mask; -}; - -enum ocelot_vcap_key_type { - OCELOT_VCAP_KEY_ANY, - OCELOT_VCAP_KEY_ETYPE, - OCELOT_VCAP_KEY_LLC, - OCELOT_VCAP_KEY_SNAP, - OCELOT_VCAP_KEY_ARP, - OCELOT_VCAP_KEY_IPV4, - OCELOT_VCAP_KEY_IPV6 -}; - -struct ocelot_vcap_key_vlan { - struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */ - struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ - enum ocelot_vcap_bit dei; /* DEI */ - enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ -}; - -struct ocelot_vcap_key_etype { - struct ocelot_vcap_u48 dmac; - struct ocelot_vcap_u48 smac; - struct ocelot_vcap_u16 etype; - struct ocelot_vcap_u16 data; /* MAC data */ -}; - -struct ocelot_vcap_key_llc { - struct ocelot_vcap_u48 dmac; - struct ocelot_vcap_u48 smac; - - /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */ - struct ocelot_vcap_u32 llc; -}; - -struct ocelot_vcap_key_snap { - struct ocelot_vcap_u48 dmac; - struct ocelot_vcap_u48 smac; - - /* SNAP header: Organization Code at byte 0, Type at byte 3 */ - struct ocelot_vcap_u40 snap; -}; - -struct ocelot_vcap_key_arp { - struct ocelot_vcap_u48 smac; - enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */ - enum ocelot_vcap_bit req; /* Opcode request/reply */ - enum ocelot_vcap_bit unknown; /* Opcode unknown */ - enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */ - enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */ - - /**< Protocol addr. length 4, hardware length 6 */ - enum ocelot_vcap_bit length; - - enum ocelot_vcap_bit ip; /* Protocol address type IP */ - enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */ - struct ocelot_vcap_ipv4 sip; /* Sender IP address */ - struct ocelot_vcap_ipv4 dip; /* Target IP address */ -}; - -struct ocelot_vcap_key_ipv4 { - enum ocelot_vcap_bit ttl; /* TTL zero */ - enum ocelot_vcap_bit fragment; /* Fragment */ - enum ocelot_vcap_bit options; /* Header options */ - struct ocelot_vcap_u8 ds; - struct ocelot_vcap_u8 proto; /* Protocol */ - struct ocelot_vcap_ipv4 sip; /* Source IP address */ - struct ocelot_vcap_ipv4 dip; /* Destination IP address */ - struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */ - struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */ - struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */ - enum ocelot_vcap_bit tcp_fin; - enum ocelot_vcap_bit tcp_syn; - enum ocelot_vcap_bit tcp_rst; - enum ocelot_vcap_bit tcp_psh; - enum ocelot_vcap_bit tcp_ack; - enum ocelot_vcap_bit tcp_urg; - enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */ - enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */ - enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ -}; - -struct ocelot_vcap_key_ipv6 { - struct ocelot_vcap_u8 proto; /* IPv6 protocol */ - struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */ - struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */ - enum ocelot_vcap_bit ttl; /* TTL zero */ - struct ocelot_vcap_u8 ds; - struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */ - struct ocelot_vcap_udp_tcp sport; - struct ocelot_vcap_udp_tcp dport; - enum ocelot_vcap_bit tcp_fin; - enum ocelot_vcap_bit tcp_syn; - enum ocelot_vcap_bit tcp_rst; - enum ocelot_vcap_bit tcp_psh; - enum ocelot_vcap_bit tcp_ack; - enum ocelot_vcap_bit tcp_urg; - enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */ - enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */ - enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ -}; - -enum ocelot_mask_mode { - OCELOT_MASK_MODE_NONE, - OCELOT_MASK_MODE_PERMIT_DENY, - OCELOT_MASK_MODE_POLICY, - OCELOT_MASK_MODE_REDIRECT, -}; - -enum ocelot_es0_tag { - OCELOT_NO_ES0_TAG, - OCELOT_ES0_TAG, - OCELOT_FORCE_PORT_TAG, - OCELOT_FORCE_UNTAG, -}; - -enum ocelot_tag_tpid_sel { - OCELOT_TAG_TPID_SEL_8021Q, - OCELOT_TAG_TPID_SEL_8021AD, -}; - -struct ocelot_vcap_action { - union { - /* VCAP ES0 */ - struct { - enum ocelot_es0_tag push_outer_tag; - enum ocelot_es0_tag push_inner_tag; - enum ocelot_tag_tpid_sel tag_a_tpid_sel; - int tag_a_vid_sel; - int tag_a_pcp_sel; - u16 vid_a_val; - u8 pcp_a_val; - u8 dei_a_val; - enum ocelot_tag_tpid_sel tag_b_tpid_sel; - int tag_b_vid_sel; - int tag_b_pcp_sel; - u16 vid_b_val; - u8 pcp_b_val; - u8 dei_b_val; - }; - - /* VCAP IS1 */ - struct { - bool vid_replace_ena; - u16 vid; - bool vlan_pop_cnt_ena; - int vlan_pop_cnt; - bool pcp_dei_ena; - u8 pcp; - u8 dei; - bool qos_ena; - u8 qos_val; - u8 pag_override_mask; - u8 pag_val; - }; - - /* VCAP IS2 */ - struct { - bool cpu_copy_ena; - u8 cpu_qu_num; - enum ocelot_mask_mode mask_mode; - unsigned long port_mask; - bool police_ena; - struct ocelot_policer pol; - u32 pol_ix; - }; - }; -}; - -struct ocelot_vcap_stats { - u64 bytes; - u64 pkts; - u64 used; -}; - -enum ocelot_vcap_filter_type { - OCELOT_VCAP_FILTER_DUMMY, - OCELOT_VCAP_FILTER_PAG, - OCELOT_VCAP_FILTER_OFFLOAD, -}; - -struct ocelot_vcap_filter { - struct list_head list; - - enum ocelot_vcap_filter_type type; - int block_id; - int goto_target; - int lookup; - u8 pag; - u16 prio; - u32 id; - - struct ocelot_vcap_action action; - struct ocelot_vcap_stats stats; - /* For VCAP IS1 and IS2 */ - unsigned long ingress_port_mask; - /* For VCAP ES0 */ - struct ocelot_vcap_port ingress_port; - struct ocelot_vcap_port egress_port; - - enum ocelot_vcap_bit dmac_mc; - enum ocelot_vcap_bit dmac_bc; - struct ocelot_vcap_key_vlan vlan; - - enum ocelot_vcap_key_type key_type; - union { - /* OCELOT_VCAP_KEY_ANY: No specific fields */ - struct ocelot_vcap_key_etype etype; - struct ocelot_vcap_key_llc llc; - struct ocelot_vcap_key_snap snap; - struct ocelot_vcap_key_arp arp; - struct ocelot_vcap_key_ipv4 ipv4; - struct ocelot_vcap_key_ipv6 ipv6; - } key; -}; - -int ocelot_vcap_filter_add(struct ocelot *ocelot, - struct ocelot_vcap_filter *rule, - struct netlink_ext_ack *extack); -int ocelot_vcap_filter_del(struct ocelot *ocelot, - struct ocelot_vcap_filter *rule); int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *rule); -struct ocelot_vcap_filter * -ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id); void ocelot_detect_vcap_constants(struct ocelot *ocelot); int ocelot_vcap_init(struct ocelot *ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 9cf2bc5f4289..4bd7e9d9ec61 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -4,6 +4,7 @@ * * Copyright (c) 2017 Microsemi Corporation */ +#include <linux/dsa/ocelot.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of_net.h> @@ -18,8 +19,6 @@ #include <soc/mscc/ocelot_hsio.h> #include "ocelot.h" -#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0)) - static const u32 ocelot_ana_regmap[] = { REG(ANA_ADVLEARN, 0x009000), REG(ANA_VLANMASK, 0x009004), @@ -517,7 +516,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) ocelot->map = ocelot_regmap; ocelot->stats_layout = ocelot_stats_layout; ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); - ocelot->shared_queue_sz = 224 * 1024; ocelot->num_mact_rows = 1024; ocelot->ops = ops; @@ -533,181 +531,28 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) return 0; } -static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info) -{ - u8 llen, wlen; - u64 ifh[2]; - - ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]); - ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]); - - wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8); - llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6); - - info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80; - - info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32); - - info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4); - - info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1); - info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12); - - return 0; -} - -static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, - u32 *rval) -{ - u32 val; - u32 bytes_valid; - - val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); - if (val == XTR_NOT_READY) { - if (ifh) - return -EIO; - - do { - val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); - } while (val == XTR_NOT_READY); - } - - switch (val) { - case XTR_ABORT: - return -EIO; - case XTR_EOF_0: - case XTR_EOF_1: - case XTR_EOF_2: - case XTR_EOF_3: - case XTR_PRUNED: - bytes_valid = XTR_VALID_BYTES(val); - val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); - if (val == XTR_ESCAPE) - *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); - else - *rval = val; - - return bytes_valid; - case XTR_ESCAPE: - *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); - - return 4; - default: - *rval = val; - - return 4; - } -} - static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) { struct ocelot *ocelot = arg; - int i = 0, grp = 0; - int err = 0; - - if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))) - return IRQ_NONE; + int grp = 0, err; - do { - struct skb_shared_hwtstamps *shhwtstamps; - struct ocelot_port_private *priv; - struct ocelot_port *ocelot_port; - u64 tod_in_ns, full_ts_in_ns; - struct frame_info info = {}; - struct net_device *dev; - u32 ifh[4], val, *buf; - struct timespec64 ts; - int sz, len, buf_len; + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { struct sk_buff *skb; - for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { - err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]); - if (err != 4) - break; - } - - if (err != 4) - break; - - /* At this point the IFH was read correctly, so it is safe to - * presume that there is no error. The err needs to be reset - * otherwise a frame could come in CPU queue between the while - * condition and the check for error later on. And in that case - * the new frame is just removed and not processed. - */ - err = 0; - - ocelot_parse_ifh(ifh, &info); - - ocelot_port = ocelot->ports[info.port]; - priv = container_of(ocelot_port, struct ocelot_port_private, - port); - dev = priv->dev; - - skb = netdev_alloc_skb(dev, info.len); - - if (unlikely(!skb)) { - netdev_err(dev, "Unable to allocate sk_buff\n"); - err = -ENOMEM; - break; - } - buf_len = info.len - ETH_FCS_LEN; - buf = (u32 *)skb_put(skb, buf_len); - - len = 0; - do { - sz = ocelot_rx_frame_word(ocelot, grp, false, &val); - *buf++ = val; - len += sz; - } while (len < buf_len); - - /* Read the FCS */ - sz = ocelot_rx_frame_word(ocelot, grp, false, &val); - /* Update the statistics if part of the FCS was read before */ - len -= ETH_FCS_LEN - sz; - - if (unlikely(dev->features & NETIF_F_RXFCS)) { - buf = (u32 *)skb_put(skb, ETH_FCS_LEN); - *buf = val; - } - - if (sz < 0) { - err = sz; - break; - } - - if (ocelot->ptp) { - ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); - - tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); - if ((tod_in_ns & 0xffffffff) < info.timestamp) - full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | - info.timestamp; - else - full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | - info.timestamp; - - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = full_ts_in_ns; - } + err = ocelot_xtr_poll_frame(ocelot, grp, &skb); + if (err) + goto out; - /* Everything we see on an interface that is in the HW bridge - * has already been forwarded. - */ - if (ocelot->bridge_mask & BIT(info.port)) - skb->offload_fwd_mark = 1; + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; - skb->protocol = eth_type_trans(skb, dev); if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); - dev->stats.rx_bytes += len; - dev->stats.rx_packets++; - } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)); + } - if (err) - while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) - ocelot_read_rix(ocelot, QS_XTR_RD, grp); +out: + if (err < 0) + ocelot_drain_cpu_queue(ocelot, 0); return IRQ_HANDLED; } @@ -764,9 +609,25 @@ static u16 ocelot_wm_enc(u16 value) return value; } +static u16 ocelot_wm_dec(u16 wm) +{ + if (wm & BIT(8)) + return (wm & GENMASK(7, 0)) * 16; + + return wm; +} + +static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & GENMASK(23, 12)) >> 12; + *maxuse = val & GENMASK(11, 0); +} + static const struct ocelot_ops ocelot_ops = { .reset = ocelot_reset, .wm_enc = ocelot_wm_enc, + .wm_dec = ocelot_wm_dec, + .wm_stat = ocelot_wm_stat, .port_to_netdev = ocelot_port_to_netdev, .netdev_to_port = ocelot_netdev_to_port, }; @@ -1036,12 +897,19 @@ static struct ptp_clock_info ocelot_ptp_clock_info = { .enable = ocelot_ptp_enable, }; +static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_port_devlink_teardown(ocelot, port); +} + static void mscc_ocelot_release_ports(struct ocelot *ocelot) { int port; for (port = 0; port < ocelot->num_phys_ports; port++) { - struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; ocelot_port = ocelot->ports[port]; @@ -1049,12 +917,7 @@ static void mscc_ocelot_release_ports(struct ocelot *ocelot) continue; ocelot_deinit_port(ocelot, port); - - priv = container_of(ocelot_port, struct ocelot_port_private, - port); - - unregister_netdev(priv->dev); - free_netdev(priv->dev); + ocelot_release_port(ocelot_port); } } @@ -1062,36 +925,55 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, struct device_node *ports) { struct ocelot *ocelot = platform_get_drvdata(pdev); + u32 devlink_ports_registered = 0; struct device_node *portnp; - int err; + int port, err; + u32 reg; ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, sizeof(struct ocelot_port *), GFP_KERNEL); if (!ocelot->ports) return -ENOMEM; + ocelot->devlink_ports = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports, + sizeof(*ocelot->devlink_ports), + GFP_KERNEL); + if (!ocelot->devlink_ports) + return -ENOMEM; + for_each_available_child_of_node(ports, portnp) { struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; struct device_node *phy_node; + struct devlink_port *dlp; phy_interface_t phy_mode; struct phy_device *phy; struct regmap *target; struct resource *res; struct phy *serdes; char res_name[8]; - u32 port; - if (of_property_read_u32(portnp, "reg", &port)) + if (of_property_read_u32(portnp, "reg", ®)) + continue; + + port = reg; + if (port < 0 || port >= ocelot->num_phys_ports) { + dev_err(ocelot->dev, + "invalid port number: %d >= %d\n", port, + ocelot->num_phys_ports); continue; + } snprintf(res_name, sizeof(res_name), "port%d", port); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); target = ocelot_regmap_init(ocelot, res); - if (IS_ERR(target)) - continue; + if (IS_ERR(target)) { + err = PTR_ERR(target); + goto out_teardown; + } phy_node = of_parse_phandle(portnp, "phy-handle", 0); if (!phy_node) @@ -1102,15 +984,25 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, if (!phy) continue; + err = ocelot_port_devlink_init(ocelot, port, + DEVLINK_PORT_FLAVOUR_PHYSICAL); + if (err) { + of_node_put(portnp); + goto out_teardown; + } + devlink_ports_registered |= BIT(port); + err = ocelot_probe_port(ocelot, port, target, phy); if (err) { of_node_put(portnp); - return err; + goto out_teardown; } ocelot_port = ocelot->ports[port]; priv = container_of(ocelot_port, struct ocelot_port_private, port); + dlp = &ocelot->devlink_ports[port]; + devlink_port_type_eth_set(dlp, priv->dev); of_get_phy_mode(portnp, &phy_mode); @@ -1135,7 +1027,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, "invalid phy mode for port%d, (Q)SGMII only\n", port); of_node_put(portnp); - return -EINVAL; + err = -EINVAL; + goto out_teardown; } serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); @@ -1149,13 +1042,36 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, port); of_node_put(portnp); - return err; + goto out_teardown; } priv->serdes = serdes; } + /* Initialize unused devlink ports at the end */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (devlink_ports_registered & BIT(port)) + continue; + + err = ocelot_port_devlink_init(ocelot, port, + DEVLINK_PORT_FLAVOUR_UNUSED); + if (err) + goto out_teardown; + + devlink_ports_registered |= BIT(port); + } + return 0; + +out_teardown: + /* Unregister the network interfaces */ + mscc_ocelot_release_ports(ocelot); + /* Tear down devlink ports for the registered network interfaces */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (devlink_ports_registered & BIT(port)) + ocelot_port_devlink_teardown(ocelot, port); + } + return err; } static int mscc_ocelot_probe(struct platform_device *pdev) @@ -1163,6 +1079,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; int err, irq_xtr, irq_ptp_rdy; struct device_node *ports; + struct devlink *devlink; struct ocelot *ocelot; struct regmap *hsio; unsigned int i; @@ -1186,10 +1103,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (!np && !pdev->dev.platform_data) return -ENODEV; - ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL); - if (!ocelot) + devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot)); + if (!devlink) return -ENOMEM; + ocelot = devlink_priv(devlink); + ocelot->devlink = priv_to_devlink(ocelot); platform_set_drvdata(pdev, ocelot); ocelot->dev = &pdev->dev; @@ -1206,7 +1125,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[io_target[i].id] = NULL; continue; } - return PTR_ERR(target); + err = PTR_ERR(target); + goto out_free_devlink; } ocelot->targets[io_target[i].id] = target; @@ -1215,24 +1135,27 @@ static int mscc_ocelot_probe(struct platform_device *pdev) hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); if (IS_ERR(hsio)) { dev_err(&pdev->dev, "missing hsio syscon\n"); - return PTR_ERR(hsio); + err = PTR_ERR(hsio); + goto out_free_devlink; } ocelot->targets[HSIO] = hsio; err = ocelot_chip_init(ocelot, &ocelot_ops); if (err) - return err; + goto out_free_devlink; irq_xtr = platform_get_irq_byname(pdev, "xtr"); - if (irq_xtr < 0) - return -ENODEV; + if (irq_xtr < 0) { + err = irq_xtr; + goto out_free_devlink; + } err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL, ocelot_xtr_irq_handler, IRQF_ONESHOT, "frame extraction", ocelot); if (err) - return err; + goto out_free_devlink; irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy"); if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) { @@ -1241,7 +1164,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) IRQF_ONESHOT, "ptp ready", ocelot); if (err) - return err; + goto out_free_devlink; /* Both the PTP interrupt and the PTP bank are available */ ocelot->ptp = 1; @@ -1250,25 +1173,32 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ports = of_get_child_by_name(np, "ethernet-ports"); if (!ports) { dev_err(ocelot->dev, "no ethernet-ports child node found\n"); - return -ENODEV; + err = -ENODEV; + goto out_free_devlink; } ocelot->num_phys_ports = of_get_child_count(ports); ocelot->num_flooding_pgids = 1; ocelot->vcap = vsc7514_vcap_props; - ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE; - ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE; ocelot->npi = -1; err = ocelot_init(ocelot); if (err) goto out_put_ports; - err = mscc_ocelot_init_ports(pdev, ports); + err = devlink_register(devlink, ocelot->dev); if (err) goto out_ocelot_deinit; + err = mscc_ocelot_init_ports(pdev, ports); + if (err) + goto out_ocelot_devlink_unregister; + + err = ocelot_devlink_sb_register(ocelot); + if (err) + goto out_ocelot_release_ports; + if (ocelot->ptp) { err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info); if (err) { @@ -1288,10 +1218,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev) return 0; +out_ocelot_release_ports: + mscc_ocelot_release_ports(ocelot); + mscc_ocelot_teardown_devlink_ports(ocelot); +out_ocelot_devlink_unregister: + devlink_unregister(devlink); out_ocelot_deinit: ocelot_deinit(ocelot); out_put_ports: of_node_put(ports); +out_free_devlink: + devlink_free(devlink); return err; } @@ -1300,11 +1237,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev) struct ocelot *ocelot = platform_get_drvdata(pdev); ocelot_deinit_timestamp(ocelot); + ocelot_devlink_sb_unregister(ocelot); mscc_ocelot_release_ports(ocelot); + mscc_ocelot_teardown_devlink_ports(ocelot); + devlink_unregister(ocelot->devlink); ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_switchdev_notifier(&ocelot_switchdev_nb); unregister_netdevice_notifier(&ocelot_netdevice_nb); + devlink_free(ocelot->devlink); return 0; } |