diff options
Diffstat (limited to 'net/dsa')
-rw-r--r-- | net/dsa/dsa.c | 6 | ||||
-rw-r--r-- | net/dsa/dsa2.c | 2 | ||||
-rw-r--r-- | net/dsa/dsa_priv.h | 17 | ||||
-rw-r--r-- | net/dsa/master.c | 21 | ||||
-rw-r--r-- | net/dsa/port.c | 71 | ||||
-rw-r--r-- | net/dsa/slave.c | 437 | ||||
-rw-r--r-- | net/dsa/switch.c | 37 | ||||
-rw-r--r-- | net/dsa/tag_8021q.c | 43 | ||||
-rw-r--r-- | net/dsa/tag_brcm.c | 25 | ||||
-rw-r--r-- | net/dsa/tag_ocelot.c | 3 | ||||
-rw-r--r-- | net/dsa/tag_sja1105.c | 19 |
11 files changed, 553 insertions, 128 deletions
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 17281fec710c..ee2610c4d46a 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -88,13 +88,9 @@ const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol) { struct dsa_tag_driver *dsa_tag_driver; const struct dsa_device_ops *ops; - char module_name[128]; bool found = false; - snprintf(module_name, 127, "%s%d", DSA_TAG_DRIVER_ALIAS, - tag_protocol); - - request_module(module_name); + request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol); mutex_lock(&dsa_tag_drivers_lock); list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index e7c30b472034..9a271a58a41d 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -18,8 +18,8 @@ #include "dsa_priv.h" -static LIST_HEAD(dsa_tree_list); static DEFINE_MUTEX(dsa2_mutex); +LIST_HEAD(dsa_tree_list); static const struct devlink_ops dsa_devlink_ops = { }; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index a7662e7a691d..904cc7c9b882 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -22,6 +22,7 @@ enum { DSA_NOTIFIER_MDB_DEL, DSA_NOTIFIER_VLAN_ADD, DSA_NOTIFIER_VLAN_DEL, + DSA_NOTIFIER_MTU, }; /* DSA_NOTIFIER_AGEING_TIME */ @@ -61,6 +62,14 @@ struct dsa_notifier_vlan_info { int port; }; +/* DSA_NOTIFIER_MTU */ +struct dsa_notifier_mtu_info { + bool propagate_upstream; + int sw_index; + int port; + int mtu; +}; + struct dsa_slave_priv { /* Copy of CPU port xmit for faster access in slave transmit hot path */ struct sk_buff * (*xmit)(struct sk_buff *skb, @@ -117,7 +126,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev, /* port.c */ int dsa_port_set_state(struct dsa_port *dp, u8 state, struct switchdev_trans *trans); +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy); int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy); +void dsa_port_disable_rt(struct dsa_port *dp); void dsa_port_disable(struct dsa_port *dp); int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br); void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br); @@ -125,6 +136,8 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, struct switchdev_trans *trans); int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, struct switchdev_trans *trans); +int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, + bool propagate_upstream); int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, u16 vid); int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, @@ -181,4 +194,8 @@ dsa_slave_to_master(const struct net_device *dev) /* switch.c */ int dsa_switch_register_notifier(struct dsa_switch *ds); void dsa_switch_unregister_notifier(struct dsa_switch *ds); + +/* dsa2.c */ +extern struct list_head dsa_tree_list; + #endif diff --git a/net/dsa/master.c b/net/dsa/master.c index bd44bde272f4..b5c535af63a3 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -314,20 +314,6 @@ static const struct attribute_group dsa_group = { .attrs = dsa_slave_attrs, }; -static void dsa_master_set_mtu(struct net_device *dev, struct dsa_port *cpu_dp) -{ - unsigned int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead; - int err; - - rtnl_lock(); - if (mtu <= dev->max_mtu) { - err = dev_set_mtu(dev, mtu); - if (err) - netdev_dbg(dev, "Unable to set MTU to include for DSA overheads\n"); - } - rtnl_unlock(); -} - static void dsa_master_reset_mtu(struct net_device *dev) { int err; @@ -344,7 +330,12 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) { int ret; - dsa_master_set_mtu(dev, cpu_dp); + rtnl_lock(); + ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead); + rtnl_unlock(); + if (ret) + netdev_warn(dev, "error %d setting MTU to include DSA overhead\n", + ret); /* If we use a tagging format that doesn't have an ethertype * field, make sure that all packets from this point on get diff --git a/net/dsa/port.c b/net/dsa/port.c index 774facb8d547..231b2d494f1c 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) pr_err("DSA: failed to set STP state %u (%d)\n", state, err); } -int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy) { struct dsa_switch *ds = dp->ds; int port = dp->index; @@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_FORWARDING); + if (dp->pl) + phylink_start(dp->pl); + return 0; } -void dsa_port_disable(struct dsa_port *dp) +int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) +{ + int err; + + rtnl_lock(); + err = dsa_port_enable_rt(dp, phy); + rtnl_unlock(); + + return err; +} + +void dsa_port_disable_rt(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; int port = dp->index; + if (dp->pl) + phylink_stop(dp->pl); + if (!dp->bridge_dev) dsa_port_set_state_now(dp, BR_STATE_DISABLED); @@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp) ds->ops->port_disable(ds, port); } +void dsa_port_disable(struct dsa_port *dp) +{ + rtnl_lock(); + dsa_port_disable_rt(dp); + rtnl_unlock(); +} + int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br) { struct dsa_notifier_bridge_info info = { @@ -273,6 +297,19 @@ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter, return ds->ops->port_egress_floods(ds, port, true, mrouter); } +int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, + bool propagate_upstream) +{ + struct dsa_notifier_mtu_info info = { + .sw_index = dp->ds->index, + .propagate_upstream = propagate_upstream, + .port = dp->index, + .mtu = new_mtu, + }; + + return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info); +} + int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, u16 vid) { @@ -433,6 +470,7 @@ static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; + int err; /* Only called for inband modes */ if (!ds->ops->phylink_mac_link_state) { @@ -440,8 +478,12 @@ static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config, return; } - if (ds->ops->phylink_mac_link_state(ds, dp->index, state) < 0) + err = ds->ops->phylink_mac_link_state(ds, dp->index, state); + if (err < 0) { + dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n", + dp->index, err); state->link = 0; + } } static void dsa_port_phylink_mac_config(struct phylink_config *config, @@ -489,9 +531,11 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config, } static void dsa_port_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev) + int speed, int duplex, + bool tx_pause, bool rx_pause) { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); struct dsa_switch *ds = dp->ds; @@ -502,7 +546,8 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config, return; } - ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev); + ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev, + speed, duplex, tx_pause, rx_pause); } const struct phylink_mac_ops dsa_port_phylink_mac_ops = { @@ -614,10 +659,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp) goto err_phy_connect; } - rtnl_lock(); - phylink_start(dp->pl); - rtnl_unlock(); - return 0; err_phy_connect: @@ -628,9 +669,14 @@ err_phy_connect: int dsa_port_link_register_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; + struct device_node *phy_np; - if (!ds->ops->adjust_link) - return dsa_port_phylink_register(dp); + if (!ds->ops->adjust_link) { + phy_np = of_parse_phandle(dp->dn, "phy-handle", 0); + if (of_phy_is_fixed_link(dp->dn) || phy_np) + return dsa_port_phylink_register(dp); + return 0; + } dev_warn(ds->dev, "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n"); @@ -645,11 +691,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp) { struct dsa_switch *ds = dp->ds; - if (!ds->ops->adjust_link) { + if (!ds->ops->adjust_link && dp->pl) { rtnl_lock(); phylink_disconnect_phy(dp->pl); rtnl_unlock(); phylink_destroy(dp->pl); + dp->pl = NULL; return; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 088c886e609e..5390ff541658 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -88,12 +88,10 @@ static int dsa_slave_open(struct net_device *dev) goto clear_allmulti; } - err = dsa_port_enable(dp, dev->phydev); + err = dsa_port_enable_rt(dp, dev->phydev); if (err) goto clear_promisc; - phylink_start(dp->pl); - return 0; clear_promisc: @@ -114,9 +112,7 @@ static int dsa_slave_close(struct net_device *dev) struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); - phylink_stop(dp->pl); - - dsa_port_disable(dp); + dsa_port_disable_rt(dp); dev_mc_unsync(master, dev); dev_uc_unsync(master, dev); @@ -846,59 +842,137 @@ dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) return NULL; } -static int dsa_slave_add_cls_matchall(struct net_device *dev, - struct tc_cls_matchall_offload *cls, - bool ingress) +static int +dsa_slave_add_cls_matchall_mirred(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) { struct dsa_port *dp = dsa_slave_to_port(dev); struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_mall_mirror_tc_entry *mirror; struct dsa_mall_tc_entry *mall_tc_entry; - __be16 protocol = cls->common.protocol; struct dsa_switch *ds = dp->ds; struct flow_action_entry *act; struct dsa_port *to_dp; - int err = -EOPNOTSUPP; + int err; + + act = &cls->rule->action.entries[0]; if (!ds->ops->port_mirror_add) - return err; + return -EOPNOTSUPP; - if (!flow_offload_has_one_action(&cls->rule->action)) - return err; + if (!act->dev) + return -EINVAL; + + if (!flow_action_basic_hw_stats_check(&cls->rule->action, + cls->common.extack)) + return -EOPNOTSUPP; act = &cls->rule->action.entries[0]; - if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) { - struct dsa_mall_mirror_tc_entry *mirror; + if (!dsa_slave_dev_check(act->dev)) + return -EOPNOTSUPP; - if (!act->dev) - return -EINVAL; + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; - if (!dsa_slave_dev_check(act->dev)) - return -EOPNOTSUPP; + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = DSA_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; + to_dp = dsa_slave_to_port(act->dev); - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = DSA_PORT_MALL_MIRROR; - mirror = &mall_tc_entry->mirror; + mirror->to_local_port = to_dp->index; + mirror->ingress = ingress; - to_dp = dsa_slave_to_port(act->dev); + err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); + if (err) { + kfree(mall_tc_entry); + return err; + } - mirror->to_local_port = to_dp->index; - mirror->ingress = ingress; + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); - err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress); - if (err) { - kfree(mall_tc_entry); - return err; + return err; +} + +static int +dsa_slave_add_cls_matchall_police(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_mall_policer_tc_entry *policer; + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = dp->ds; + struct flow_action_entry *act; + int err; + + if (!ds->ops->port_policer_add) { + NL_SET_ERR_MSG_MOD(extack, + "Policing offload not implemented\n"); + return -EOPNOTSUPP; + } + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, + "Only supported on ingress qdisc\n"); + return -EOPNOTSUPP; + } + + if (!flow_action_basic_hw_stats_check(&cls->rule->action, + cls->common.extack)) + return -EOPNOTSUPP; + + list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { + if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { + NL_SET_ERR_MSG_MOD(extack, + "Only one port policer allowed\n"); + return -EEXIST; } + } - list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); + act = &cls->rule->action.entries[0]; + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = DSA_PORT_MALL_POLICER; + policer = &mall_tc_entry->policer; + policer->rate_bytes_per_sec = act->police.rate_bytes_ps; + policer->burst = act->police.burst; + + err = ds->ops->port_policer_add(ds, dp->index, policer); + if (err) { + kfree(mall_tc_entry); + return err; } - return 0; + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); + + return err; +} + +static int dsa_slave_add_cls_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + int err = -EOPNOTSUPP; + + if (cls->common.protocol == htons(ETH_P_ALL) && + flow_offload_has_one_action(&cls->rule->action) && + cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) + err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); + else if (flow_offload_has_one_action(&cls->rule->action) && + cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) + err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); + + return err; } static void dsa_slave_del_cls_matchall(struct net_device *dev, @@ -908,9 +982,6 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, struct dsa_mall_tc_entry *mall_tc_entry; struct dsa_switch *ds = dp->ds; - if (!ds->ops->port_mirror_del) - return; - mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); if (!mall_tc_entry) return; @@ -919,7 +990,13 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, switch (mall_tc_entry->type) { case DSA_PORT_MALL_MIRROR: - ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror); + if (ds->ops->port_mirror_del) + ds->ops->port_mirror_del(ds, dp->index, + &mall_tc_entry->mirror); + break; + case DSA_PORT_MALL_POLICER: + if (ds->ops->port_policer_del) + ds->ops->port_policer_del(ds, dp->index); break; default: WARN_ON(1); @@ -946,6 +1023,64 @@ static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, } } +static int dsa_slave_add_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_add) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_add(ds, port, cls, ingress); +} + +static int dsa_slave_del_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_del) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_del(ds, port, cls, ingress); +} + +static int dsa_slave_stats_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_stats) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_stats(ds, port, cls, ingress); +} + +static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + switch (cls->command) { + case FLOW_CLS_REPLACE: + return dsa_slave_add_cls_flower(dev, cls, ingress); + case FLOW_CLS_DESTROY: + return dsa_slave_del_cls_flower(dev, cls, ingress); + case FLOW_CLS_STATS: + return dsa_slave_stats_cls_flower(dev, cls, ingress); + default: + return -EOPNOTSUPP; + } +} + static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv, bool ingress) { @@ -957,6 +1092,8 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSMATCHALL: return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); default: return -EOPNOTSUPP; } @@ -1158,6 +1295,208 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, return dsa_port_vid_del(dp, vid); } +struct dsa_hw_port { + struct list_head list; + struct net_device *dev; + int old_mtu; +}; + +static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) +{ + const struct dsa_hw_port *p; + int err; + + list_for_each_entry(p, hw_port_list, list) { + if (p->dev->mtu == mtu) + continue; + + err = dev_set_mtu(p->dev, mtu); + if (err) + goto rollback; + } + + return 0; + +rollback: + list_for_each_entry_continue_reverse(p, hw_port_list, list) { + if (p->dev->mtu == p->old_mtu) + continue; + + if (dev_set_mtu(p->dev, p->old_mtu)) + netdev_err(p->dev, "Failed to restore MTU\n"); + } + + return err; +} + +static void dsa_hw_port_list_free(struct list_head *hw_port_list) +{ + struct dsa_hw_port *p, *n; + + list_for_each_entry_safe(p, n, hw_port_list, list) + kfree(p); +} + +/* Make the hardware datapath to/from @dev limited to a common MTU */ +void dsa_bridge_mtu_normalization(struct dsa_port *dp) +{ + struct list_head hw_port_list; + struct dsa_switch_tree *dst; + int min_mtu = ETH_MAX_MTU; + struct dsa_port *other_dp; + int err; + + if (!dp->ds->mtu_enforcement_ingress) + return; + + if (!dp->bridge_dev) + return; + + INIT_LIST_HEAD(&hw_port_list); + + /* Populate the list of ports that are part of the same bridge + * as the newly added/modified port + */ + list_for_each_entry(dst, &dsa_tree_list, list) { + list_for_each_entry(other_dp, &dst->ports, list) { + struct dsa_hw_port *hw_port; + struct net_device *slave; + + if (other_dp->type != DSA_PORT_TYPE_USER) + continue; + + if (other_dp->bridge_dev != dp->bridge_dev) + continue; + + if (!other_dp->ds->mtu_enforcement_ingress) + continue; + + slave = other_dp->slave; + + if (min_mtu > slave->mtu) + min_mtu = slave->mtu; + + hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); + if (!hw_port) + goto out; + + hw_port->dev = slave; + hw_port->old_mtu = slave->mtu; + + list_add(&hw_port->list, &hw_port_list); + } + } + + /* Attempt to configure the entire hardware bridge to the newly added + * interface's MTU first, regardless of whether the intention of the + * user was to raise or lower it. + */ + err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); + if (!err) + goto out; + + /* Clearly that didn't work out so well, so just set the minimum MTU on + * all hardware bridge ports now. If this fails too, then all ports will + * still have their old MTU rolled back anyway. + */ + dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); + +out: + dsa_hw_port_list_free(&hw_port_list); +} + +static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) +{ + struct net_device *master = dsa_slave_to_master(dev); + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->dp->ds; + struct dsa_port *cpu_dp; + int port = p->dp->index; + int largest_mtu = 0; + int new_master_mtu; + int old_master_mtu; + int mtu_limit; + int cpu_mtu; + int err, i; + + if (!ds->ops->port_change_mtu) + return -EOPNOTSUPP; + + for (i = 0; i < ds->num_ports; i++) { + int slave_mtu; + + if (!dsa_is_user_port(ds, i)) + continue; + + /* During probe, this function will be called for each slave + * device, while not all of them have been allocated. That's + * ok, it doesn't change what the maximum is, so ignore it. + */ + if (!dsa_to_port(ds, i)->slave) + continue; + + /* Pretend that we already applied the setting, which we + * actually haven't (still haven't done all integrity checks) + */ + if (i == port) + slave_mtu = new_mtu; + else + slave_mtu = dsa_to_port(ds, i)->slave->mtu; + + if (largest_mtu < slave_mtu) + largest_mtu = slave_mtu; + } + + cpu_dp = dsa_to_port(ds, port)->cpu_dp; + + mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); + old_master_mtu = master->mtu; + new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead; + if (new_master_mtu > mtu_limit) + return -ERANGE; + + /* If the master MTU isn't over limit, there's no need to check the CPU + * MTU, since that surely isn't either. + */ + cpu_mtu = largest_mtu; + + /* Start applying stuff */ + if (new_master_mtu != old_master_mtu) { + err = dev_set_mtu(master, new_master_mtu); + if (err < 0) + goto out_master_failed; + + /* We only need to propagate the MTU of the CPU port to + * upstream switches. + */ + err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true); + if (err) + goto out_cpu_failed; + } + + err = dsa_port_mtu_change(dp, new_mtu, false); + if (err) + goto out_port_failed; + + dev->mtu = new_mtu; + + dsa_bridge_mtu_normalization(dp); + + return 0; + +out_port_failed: + if (new_master_mtu != old_master_mtu) + dsa_port_mtu_change(cpu_dp, old_master_mtu - + cpu_dp->tag_ops->overhead, + true); +out_cpu_failed: + if (new_master_mtu != old_master_mtu) + dev_set_mtu(master, old_master_mtu); +out_master_failed: + return err; +} + static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_drvinfo = dsa_slave_get_drvinfo, .get_regs_len = dsa_slave_get_regs_len, @@ -1235,6 +1574,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, .ndo_get_devlink_port = dsa_slave_get_devlink_port, + .ndo_change_mtu = dsa_slave_change_mtu, }; static struct device_type dsa_type = { @@ -1245,7 +1585,8 @@ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) { const struct dsa_port *dp = dsa_to_port(ds, port); - phylink_mac_change(dp->pl, up); + if (dp->pl) + phylink_mac_change(dp->pl, up); } EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); @@ -1405,7 +1746,10 @@ int dsa_slave_create(struct dsa_port *port) slave_dev->priv_flags |= IFF_NO_QUEUE; slave_dev->netdev_ops = &dsa_slave_netdev_ops; slave_dev->min_mtu = 0; - slave_dev->max_mtu = ETH_MAX_MTU; + if (ds->ops->port_max_mtu) + slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); + else + slave_dev->max_mtu = ETH_MAX_MTU; SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); SET_NETDEV_DEV(slave_dev, port->ds->dev); @@ -1423,6 +1767,15 @@ int dsa_slave_create(struct dsa_port *port) p->xmit = cpu_dp->tag_ops->xmit; port->slave = slave_dev; + rtnl_lock(); + ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); + rtnl_unlock(); + if (ret && ret != -EOPNOTSUPP) { + dev_err(ds->dev, "error %d setting MTU on port %d\n", + ret, port->index); + goto out_free; + } + netif_carrier_off(slave_dev); ret = dsa_slave_phy_setup(slave_dev); @@ -1485,6 +1838,8 @@ static int dsa_slave_changeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) { err = dsa_port_bridge_join(dp, info->upper_dev); + if (!err) + dsa_bridge_mtu_normalization(dp); err = notifier_from_errno(err); } else { dsa_port_bridge_leave(dp, info->upper_dev); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index df4abe897ed6..f3c32ff552b3 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -52,6 +52,40 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds, return 0; } +static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port, + struct dsa_notifier_mtu_info *info) +{ + if (ds->index == info->sw_index) + return (port == info->port) || dsa_is_dsa_port(ds, port); + + if (!info->propagate_upstream) + return false; + + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + return true; + + return false; +} + +static int dsa_switch_mtu(struct dsa_switch *ds, + struct dsa_notifier_mtu_info *info) +{ + int port, ret; + + if (!ds->ops->port_change_mtu) + return -EOPNOTSUPP; + + for (port = 0; port < ds->num_ports; port++) { + if (dsa_switch_mtu_match(ds, port, info)) { + ret = ds->ops->port_change_mtu(ds, port, info->mtu); + if (ret) + return ret; + } + } + + return 0; +} + static int dsa_switch_bridge_join(struct dsa_switch *ds, struct dsa_notifier_bridge_info *info) { @@ -328,6 +362,9 @@ static int dsa_switch_event(struct notifier_block *nb, case DSA_NOTIFIER_VLAN_DEL: err = dsa_switch_vlan_del(ds, info); break; + case DSA_NOTIFIER_MTU: + err = dsa_switch_mtu(ds, info); + break; default: err = -EOPNOTSUPP; break; diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 2fb6c26294b5..b97ad93d1c1a 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -298,47 +298,4 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -/* In the DSA packet_type handler, skb->data points in the middle of the VLAN - * tag, after tpid and before tci. This is because so far, ETH_HLEN - * (DMAC, SMAC, EtherType) bytes were pulled. - * There are 2 bytes of VLAN tag left in skb->data, and upper - * layers expect the 'real' EtherType to be consumed as well. - * Coincidentally, a VLAN header is also of the same size as - * the number of bytes that need to be pulled. - * - * skb_mac_header skb->data - * | | - * v v - * | | | | | | | | | | | | | | | | | | | - * +-----------------------+-----------------------+-------+-------+-------+ - * | Destination MAC | Source MAC | TPID | TCI | EType | - * +-----------------------+-----------------------+-------+-------+-------+ - * ^ | | - * |<--VLAN_HLEN-->to <---VLAN_HLEN---> - * from | - * >>>>>>> v - * >>>>>>> | | | | | | | | | | | | | | | - * >>>>>>> +-----------------------+-----------------------+-------+ - * >>>>>>> | Destination MAC | Source MAC | EType | - * +-----------------------+-----------------------+-------+ - * ^ ^ - * (now part of | | - * skb->head) skb_mac_header skb->data - */ -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - u8 *from = skb_mac_header(skb); - u8 *dest = from + VLAN_HLEN; - - memmove(dest, from, ETH_HLEN - VLAN_HLEN); - skb_pull(skb, VLAN_HLEN); - skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - skb_reset_mac_len(skb); - skb_pull_rcsum(skb, ETH_HLEN); - - return skb; -} -EXPORT_SYMBOL_GPL(dsa_8021q_remove_header); - MODULE_LICENSE("GPL v2"); diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 9c3114179690..cc8512b5f9e2 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -140,8 +140,31 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb, /* Remove Broadcom tag and update checksum */ skb_pull_rcsum(skb, BRCM_TAG_LEN); + skb->offload_fwd_mark = 1; + return skb; } + +static int brcm_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, + int *offset) +{ + /* We have been called on the DSA master network device after + * eth_type_trans() which pulled the Ethernet header already. + * Frames have one of these two layouts: + * ----------------------------------- + * | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM + * ----------------------------------- + * ----------------------------------- + * | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND + * ----------------------------------- + * skb->data points 2 bytes before the actual Ethernet type field and + * we have an offset of 4bytes between where skb->data and where the + * payload starts. + */ + *offset = BRCM_TAG_LEN; + *proto = ((__be16 *)skb->data)[1]; + return 0; +} #endif #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) @@ -177,6 +200,7 @@ static const struct dsa_device_ops brcm_netdev_ops = { .xmit = brcm_tag_xmit, .rcv = brcm_tag_rcv, .overhead = BRCM_TAG_LEN, + .flow_dissect = brcm_tag_flow_dissect, }; DSA_TAG_DRIVER(brcm_netdev_ops); @@ -205,6 +229,7 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = { .xmit = brcm_tag_xmit_prepend, .rcv = brcm_tag_rcv_prepend, .overhead = BRCM_TAG_LEN, + .flow_dissect = brcm_tag_flow_dissect, }; DSA_TAG_DRIVER(brcm_prepend_netdev_ops); diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 8e3e7283d430..59de1315100f 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -153,7 +153,8 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, memset(injection, 0, OCELOT_TAG_LEN); - src = dsa_upstream_port(ds, port); + /* Set the source port as the CPU port module and not the NPI port */ + src = ocelot->num_phys_ports; dest = BIT(port); bypass = true; qos_class = skb->priority; diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 5366ea430349..d553bf36bd41 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -250,14 +250,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, { struct sja1105_meta meta = {0}; int source_port, switch_id; - struct vlan_ethhdr *hdr; + struct ethhdr *hdr; u16 tpid, vid, tci; bool is_link_local; bool is_tagged; bool is_meta; - hdr = vlan_eth_hdr(skb); - tpid = ntohs(hdr->h_vlan_proto); + hdr = eth_hdr(skb); + tpid = ntohs(hdr->h_proto); is_tagged = (tpid == ETH_P_SJA1105); is_link_local = sja1105_is_link_local(skb); is_meta = sja1105_is_meta_frame(skb); @@ -266,7 +266,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, if (is_tagged) { /* Normal traffic path. */ - tci = ntohs(hdr->h_vlan_TCI); + skb_push_rcsum(skb, ETH_HLEN); + __skb_vlan_pop(skb, &tci); + skb_pull_rcsum(skb, ETH_HLEN); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + vid = tci & VLAN_VID_MASK; source_port = dsa_8021q_rx_source_port(vid); switch_id = dsa_8021q_rx_switch_id(vid); @@ -295,12 +300,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, return NULL; } - /* Delete/overwrite fake VLAN header, DSA expects to not find - * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN). - */ - if (is_tagged) - skb = dsa_8021q_remove_header(skb); - return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, is_meta); } |