diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox')
77 files changed, 7933 insertions, 2554 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 5098e7f21987..22b1cc012bc9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -7,7 +7,7 @@ config MLX4_EN depends on MAY_USE_DEVLINK depends on PCI select MLX4_CORE - select PTP_1588_CLOCK + imply PTP_1588_CLOCK ---help--- This driver supports Mellanox Technologies ConnectX Ethernet devices. diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e36bebcab3f2..a49072b4fa52 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2679,15 +2679,13 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) if (!mailbox) return ERR_PTR(-ENOMEM); - mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, - &mailbox->dma); + mailbox->buf = pci_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, + &mailbox->dma); if (!mailbox->buf) { kfree(mailbox); return ERR_PTR(-ENOMEM); } - memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); - return mailbox; } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index a5fc46bbcbe2..015198c14fa8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -38,7 +38,7 @@ /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) */ -static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) +static u64 mlx4_en_read_clock(const struct cyclecounter *tc) { struct mlx4_en_dev *mdev = container_of(tc, struct mlx4_en_dev, cycles); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index e3be7e44ff51..09dd3776db76 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -65,7 +65,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq->ring = ring; - cq->is_tx = mode; + cq->type = mode; cq->vector = mdev->dev->caps.num_comp_vectors; /* Allocate HW buffers on provided NUMA node. @@ -104,7 +104,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); - if (cq->is_tx == RX) { + if (cq->type == RX) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector)) { cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); @@ -127,25 +127,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, /* For TX we use the same irq per ring we assigned for the RX */ struct mlx4_en_cq *rx_cq; - int xdp_index; - - /* The xdp tx irq must align with the rx ring that forwards to - * it, so reindex these from 0. This should only happen when - * tx_ring_num is not a multiple of rx_ring_num. - */ - xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx; - if (xdp_index >= 0) - cq_idx = xdp_index; + cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; } - if (!cq->is_tx) + if (cq->type == RX) cq->size = priv->rx_ring[cq->ring]->actual_size; - if ((cq->is_tx && priv->hwtstamp_config.tx_type) || - (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + if ((cq->type != RX && priv->hwtstamp_config.tx_type) || + (cq->type == RX && priv->hwtstamp_config.rx_filter)) timestamp_en = 1; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, @@ -154,10 +146,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (err) goto free_eq; - cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; + cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) + if (cq->type != RX) netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); else @@ -181,7 +173,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && - cq->is_tx == RX) + cq->type == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); cq->vector = 0; cq->buf_size = 0; @@ -193,10 +185,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { napi_disable(&cq->napi); - if (!cq->is_tx) { - napi_hash_del(&cq->napi); - synchronize_rcu(); - } netif_napi_del(&cq->napi); mlx4_cq_free(priv->mdev->dev, &cq->mcq); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bdda17d2ea0f..d9c9f86a30df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -49,16 +49,19 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) { - int i; + int i, t; int err = 0; - for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_cq[i]->moder_cnt = priv->tx_frames; - priv->tx_cq[i]->moder_time = priv->tx_usecs; - if (priv->port_up) { - err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); - if (err) - return err; + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + priv->tx_cq[t][i]->moder_cnt = priv->tx_frames; + priv->tx_cq[t][i]->moder_time = priv->tx_usecs; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, + priv->tx_cq[t][i]); + if (err) + return err; + } } } @@ -192,6 +195,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "tx_prio_7_packets", "tx_prio_7_bytes", "tx_novlan_packets", "tx_novlan_bytes", + /* xdp statistics */ + "rx_xdp_drop", + "rx_xdp_tx", + "rx_xdp_tx_full", }; static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { @@ -336,8 +343,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return bitmap_iterator_count(&it) + - (priv->tx_ring_num * 2) + - (priv->rx_ring_num * 3); + (priv->tx_ring_num[TX] * 2) + + (priv->rx_ring_num * (3 + NUM_XDP_STATS)); case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -360,6 +367,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); + for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&dev->stats)[i]; @@ -397,14 +406,21 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&priv->pkstats)[i]; - for (i = 0; i < priv->tx_ring_num; i++) { - data[index++] = priv->tx_ring[i]->packets; - data[index++] = priv->tx_ring[i]->bytes; + for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->xdp_stats)[i]; + + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + data[index++] = priv->tx_ring[TX][i]->packets; + data[index++] = priv->tx_ring[TX][i]->bytes; } for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; data[index++] = priv->rx_ring[i]->dropped; + data[index++] = priv->rx_ring[i]->xdp_drop; + data[index++] = priv->rx_ring[i]->xdp_tx; + data[index++] = priv->rx_ring[i]->xdp_tx_full; } spin_unlock_bh(&priv->stats_lock); @@ -467,7 +483,13 @@ static void mlx4_en_get_strings(struct net_device *dev, strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[strings]); - for (i = 0; i < priv->tx_ring_num; i++) { + for (i = 0; i < NUM_XDP_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < priv->tx_ring_num[TX]; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, @@ -480,6 +502,12 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_bytes", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_dropped", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_drop", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_tx", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_tx_full", i); } break; case ETH_SS_PRIV_FLAGS: @@ -1060,7 +1088,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && - tx_size == priv->tx_ring[0]->size) + tx_size == priv->tx_ring[TX][0]->size) return 0; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); @@ -1105,7 +1133,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev, param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; param->rx_pending = priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; - param->tx_pending = priv->tx_ring[0]->size; + param->tx_pending = priv->tx_ring[TX][0]->size; } static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) @@ -1710,7 +1738,7 @@ static void mlx4_en_get_channels(struct net_device *dev, channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; channel->rx_count = priv->rx_ring_num; - channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; + channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP; } static int mlx4_en_set_channels(struct net_device *dev, @@ -1721,6 +1749,7 @@ static int mlx4_en_set_channels(struct net_device *dev, struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; int port_up = 0; + int xdp_count; int err = 0; if (channel->other_count || channel->combined_count || @@ -1729,20 +1758,25 @@ static int mlx4_en_set_channels(struct net_device *dev, !channel->tx_count || !channel->rx_count) return -EINVAL; - if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) { - en_err(priv, "Minimum %d tx channels required with XDP on\n", - priv->xdp_ring_num / MLX4_EN_NUM_UP + 1); - return -EINVAL; - } - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; mutex_lock(&mdev->state_lock); + xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; + if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) { + err = -EINVAL; + en_err(priv, + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", + channel->tx_count * MLX4_EN_NUM_UP + xdp_count, + MAX_TX_RINGS); + goto out; + } + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); new_prof.num_tx_rings_p_up = channel->tx_count; - new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; + new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP; + new_prof.tx_ring_num[TX_XDP] = xdp_count; new_prof.rx_ring_num = channel->rx_count; err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); @@ -1756,14 +1790,13 @@ static int mlx4_en_set_channels(struct net_device *dev, mlx4_en_safe_replace_resources(priv, tmp); - netif_set_real_num_tx_queues(dev, priv->tx_ring_num - - priv->xdp_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); if (dev->num_tc) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); - en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); + en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); if (port_up) { @@ -1774,8 +1807,8 @@ static int mlx4_en_set_channels(struct net_device *dev, err = mlx4_en_moderation_update(priv); out: - kfree(tmp); mutex_unlock(&mdev->state_lock); + kfree(tmp); return err; } @@ -1823,11 +1856,15 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) int ret = 0; if (bf_enabled_new != bf_enabled_old) { + int t; + if (bf_enabled_new) { bool bf_supported = true; - for (i = 0; i < priv->tx_ring_num; i++) - bf_supported &= priv->tx_ring[i]->bf_alloced; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + bf_supported &= + priv->tx_ring[t][i]->bf_alloced; if (!bf_supported) { en_err(priv, "BlueFlame is not supported\n"); @@ -1839,8 +1876,10 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } - for (i = 0; i < priv->tx_ring_num; i++) - priv->tx_ring[i]->bf_enabled = bf_enabled_new; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + priv->tx_ring[t][i]->bf_enabled = + bf_enabled_new; en_info(priv, "BlueFlame %s\n", bf_enabled_new ? "Enabled" : "Disabled"); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index bf7628db098a..36a7a54bbb82 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -169,7 +169,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; - params->prof[i].tx_ring_num = params->num_tx_rings_p_up * + params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up * MLX4_EN_NUM_UP; params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3a47e83d3e07..bcd955339058 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -51,6 +51,9 @@ #include "mlx4_en.h" #include "en_port.h" +#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ + XDP_PACKET_HEADROOM)) + int mlx4_en_setup_tc(struct net_device *dev, u8 up) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -129,6 +132,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) } }; +/* Must not acquire state_lock, as its corresponding work_sync + * is done under it. + */ static void mlx4_en_filter_work(struct work_struct *work) { struct mlx4_en_filter *filter = container_of(work, @@ -1214,8 +1220,8 @@ static void mlx4_en_netpoll(struct net_device *dev) struct mlx4_en_cq *cq; int i; - for (i = 0; i < priv->tx_ring_num; i++) { - cq = priv->tx_cq[i]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + cq = priv->tx_cq[TX][i]; napi_schedule(&cq->napi); } } @@ -1299,12 +1305,14 @@ static void mlx4_en_tx_timeout(struct net_device *dev) if (netif_msg_timer(priv)) en_warn(priv, "Tx timeout called on port:%d\n", priv->port); - for (i = 0; i < priv->tx_ring_num; i++) { + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i]; + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) continue; en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", - i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, - priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); + i, tx_ring->qpn, tx_ring->sp_cqn, + tx_ring->cons, tx_ring->prod); } priv->port_stats.tx_timeout++; @@ -1319,6 +1327,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx4_en_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); @@ -1328,7 +1337,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) { struct mlx4_en_cq *cq; - int i; + int i, t; /* If we haven't received a specific coalescing setting * (module param), we set the moderation parameters as follows: @@ -1353,10 +1362,12 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) priv->last_moder_bytes[i] = 0; } - for (i = 0; i < priv->tx_ring_num; i++) { - cq = priv->tx_cq[i]; - cq->moder_cnt = priv->tx_frames; - cq->moder_time = priv->tx_usecs; + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + cq = priv->tx_cq[t][i]; + cq->moder_cnt = priv->tx_frames; + cq->moder_time = priv->tx_usecs; + } } /* Reset auto-moderation params */ @@ -1387,10 +1398,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) return; for (ring = 0; ring < priv->rx_ring_num; ring++) { - spin_lock_bh(&priv->stats_lock); - rx_packets = priv->rx_ring[ring]->packets; - rx_bytes = priv->rx_ring[ring]->bytes; - spin_unlock_bh(&priv->stats_lock); + rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); + rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); rx_pkt_diff = ((unsigned long) (rx_packets - priv->last_moder_packets[ring])); @@ -1526,19 +1535,13 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, int tx_ring_idx) { - struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; - int rr_index; + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; + int rr_index = tx_ring_idx; - rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; - if (rr_index >= 0) { - tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; - tx_ring->recycle_ring = priv->rx_ring[rr_index]; - en_dbg(DRV, priv, - "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", - tx_ring_idx, rr_index); - } else { - tx_ring->recycle_ring = NULL; - } + tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; + tx_ring->recycle_ring = priv->rx_ring[rr_index]; + en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", + TX_XDP, tx_ring_idx, rr_index); } int mlx4_en_start_port(struct net_device *dev) @@ -1548,9 +1551,8 @@ int mlx4_en_start_port(struct net_device *dev) struct mlx4_en_cq *cq; struct mlx4_en_tx_ring *tx_ring; int rx_index = 0; - int tx_index = 0; int err = 0; - int i; + int i, t; int j; u8 mc_list[16] = {0}; @@ -1635,43 +1637,51 @@ int mlx4_en_start_port(struct net_device *dev) goto rss_err; /* Configure tx cq's and rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - /* Configure cq */ - cq = priv->tx_cq[i]; - err = mlx4_en_activate_cq(priv, cq, i); - if (err) { - en_err(priv, "Failed allocating Tx CQ\n"); - goto tx_err; - } - err = mlx4_en_set_cq_moder(priv, cq); - if (err) { - en_err(priv, "Failed setting cq moderation parameters\n"); - mlx4_en_deactivate_cq(priv, cq); - goto tx_err; - } - en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); - cq->buf->wqe_index = cpu_to_be16(0xffff); - - /* Configure ring */ - tx_ring = priv->tx_ring[i]; - err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, - i / priv->num_tx_rings_p_up); - if (err) { - en_err(priv, "Failed allocating Tx ring\n"); - mlx4_en_deactivate_cq(priv, cq); - goto tx_err; - } - tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; - mlx4_en_init_recycle_ring(priv, i); + for (i = 0; i < priv->tx_ring_num[t]; i++) { + /* Configure cq */ + cq = priv->tx_cq[t][i]; + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed allocating Tx CQ\n"); + goto tx_err; + } + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + en_dbg(DRV, priv, + "Resetting index of collapsed CQ:%d to -1\n", i); + cq->buf->wqe_index = cpu_to_be16(0xffff); + + /* Configure ring */ + tx_ring = priv->tx_ring[t][i]; + err = mlx4_en_activate_tx_ring(priv, tx_ring, + cq->mcq.cqn, + i / num_tx_rings_p_up); + if (err) { + en_err(priv, "Failed allocating Tx ring\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + if (t != TX_XDP) { + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + tx_ring->recycle_ring = NULL; + } else { + mlx4_en_init_recycle_ring(priv, i); + } - /* Arm CQ for TX completions */ - mlx4_en_arm_cq(priv, cq); + /* Arm CQ for TX completions */ + mlx4_en_arm_cq(priv, cq); - /* Set initial ownership of all Tx TXBBs to SW (1) */ - for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) - *((u32 *) (tx_ring->buf + j)) = 0xffffffff; - ++tx_index; + /* Set initial ownership of all Tx TXBBs to SW (1) */ + for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) + *((u32 *)(tx_ring->buf + j)) = 0xffffffff; + } } /* Configure port */ @@ -1746,9 +1756,18 @@ int mlx4_en_start_port(struct net_device *dev) return 0; tx_err: - while (tx_index--) { - mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); - mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); + if (t == MLX4_EN_NUM_TX_TYPES) { + t--; + i = priv->tx_ring_num[t]; + } + while (t >= 0) { + while (i--) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); + } + if (!t--) + break; + i = priv->tx_ring_num[t]; } mlx4_en_destroy_drop_qp(priv); rss_err: @@ -1773,7 +1792,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_mc_list *mclist, *tmp; struct ethtool_flow_id *flow, *tmp_flow; - int i; + int i, t; u8 mc_list[16] = {0}; if (!priv->port_up) { @@ -1793,8 +1812,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) netif_tx_disable(dev); + spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); /* Set port as not active */ priv->port_up = false; + spin_unlock_bh(&priv->stats_lock); + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); /* Promsicuous mode */ @@ -1859,14 +1882,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) mlx4_en_destroy_drop_qp(priv); /* Free TX Rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); - mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); + } } msleep(10); - for (i = 0; i < priv->tx_ring_num; i++) - mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) mlx4_en_delete_rss_steer_rules(priv); @@ -1915,6 +1941,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring **tx_ring; int i; if (!mlx4_is_slave(mdev->dev)) @@ -1932,15 +1959,16 @@ static void mlx4_en_clear_stats(struct net_device *dev) sizeof(priv->tx_priority_flowstats)); memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); - for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_ring[i]->bytes = 0; - priv->tx_ring[i]->packets = 0; - priv->tx_ring[i]->tx_csum = 0; - priv->tx_ring[i]->tx_dropped = 0; - priv->tx_ring[i]->queue_stopped = 0; - priv->tx_ring[i]->wake_queue = 0; - priv->tx_ring[i]->tso_packets = 0; - priv->tx_ring[i]->xmit_more = 0; + tx_ring = priv->tx_ring[TX]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + tx_ring[i]->bytes = 0; + tx_ring[i]->packets = 0; + tx_ring[i]->tx_csum = 0; + tx_ring[i]->tx_dropped = 0; + tx_ring[i]->queue_stopped = 0; + tx_ring[i]->wake_queue = 0; + tx_ring[i]->tso_packets = 0; + tx_ring[i]->xmit_more = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i]->bytes = 0; @@ -1996,17 +2024,20 @@ static int mlx4_en_close(struct net_device *dev) static void mlx4_en_free_resources(struct mlx4_en_priv *priv) { - int i; + int i, t; #ifdef CONFIG_RFS_ACCEL priv->dev->rx_cpu_rmap = NULL; #endif - for (i = 0; i < priv->tx_ring_num; i++) { - if (priv->tx_ring && priv->tx_ring[i]) - mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); - if (priv->tx_cq && priv->tx_cq[i]) - mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + if (priv->tx_ring[t] && priv->tx_ring[t][i]) + mlx4_en_destroy_tx_ring(priv, + &priv->tx_ring[t][i]); + if (priv->tx_cq[t] && priv->tx_cq[t][i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); + } } for (i = 0; i < priv->rx_ring_num; i++) { @@ -2022,20 +2053,22 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv) static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; - int i; + int i, t; int node; /* Create tx Rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - node = cpu_to_node(i % num_online_cpus()); - if (mlx4_en_create_cq(priv, &priv->tx_cq[i], - prof->tx_ring_size, i, TX, node)) - goto err; - - if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], - prof->tx_ring_size, TXBB_SIZE, - node, i)) - goto err; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], + prof->tx_ring_size, i, t, node)) + goto err; + + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], + prof->tx_ring_size, + TXBB_SIZE, node, i)) + goto err; + } } /* Create rx Rings */ @@ -2067,31 +2100,28 @@ err: if (priv->rx_cq[i]) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } - for (i = 0; i < priv->tx_ring_num; i++) { - if (priv->tx_ring[i]) - mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); - if (priv->tx_cq[i]) - mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + if (priv->tx_ring[t][i]) + mlx4_en_destroy_tx_ring(priv, + &priv->tx_ring[t][i]); + if (priv->tx_cq[t][i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); + } } return -ENOMEM; } -static void mlx4_en_shutdown(struct net_device *dev) -{ - rtnl_lock(); - netif_device_detach(dev); - mlx4_en_close(dev); - rtnl_unlock(); -} static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, struct mlx4_en_priv *src, struct mlx4_en_port_profile *prof) { + int t; + memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, sizeof(dst->hwtstamp_config)); dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; - dst->tx_ring_num = prof->tx_ring_num; dst->rx_ring_num = prof->rx_ring_num; dst->flags = prof->flags; dst->mdev = src->mdev; @@ -2101,33 +2131,50 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); - dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!dst->tx_ring) - return -ENOMEM; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + dst->tx_ring_num[t] = prof->tx_ring_num[t]; + if (!dst->tx_ring_num[t]) + continue; - dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!dst->tx_cq) { - kfree(dst->tx_ring); - return -ENOMEM; + dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!dst->tx_ring[t]) + goto err_free_tx; + + dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!dst->tx_cq[t]) { + kfree(dst->tx_ring[t]); + goto err_free_tx; + } } + return 0; + +err_free_tx: + while (t--) { + kfree(dst->tx_ring[t]); + kfree(dst->tx_cq[t]); + } + return -ENOMEM; } static void mlx4_en_update_priv(struct mlx4_en_priv *dst, struct mlx4_en_priv *src) { + int t; memcpy(dst->rx_ring, src->rx_ring, sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); memcpy(dst->rx_cq, src->rx_cq, sizeof(struct mlx4_en_cq *) * src->rx_ring_num); memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, sizeof(dst->hwtstamp_config)); - dst->tx_ring_num = src->tx_ring_num; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + dst->tx_ring_num[t] = src->tx_ring_num[t]; + dst->tx_ring[t] = src->tx_ring[t]; + dst->tx_cq[t] = src->tx_cq[t]; + } dst->rx_ring_num = src->rx_ring_num; - dst->tx_ring = src->tx_ring; - dst->tx_cq = src->tx_cq; memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); } @@ -2135,14 +2182,18 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, struct mlx4_en_priv *tmp, struct mlx4_en_port_profile *prof) { + int t; + mlx4_en_copy_priv(tmp, priv, prof); if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, "%s: Resource allocation failed, using previous configuration\n", __func__); - kfree(tmp->tx_ring); - kfree(tmp->tx_cq); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + kfree(tmp->tx_ring[t]); + kfree(tmp->tx_cq[t]); + } return -ENOMEM; } return 0; @@ -2159,8 +2210,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - bool shutdown = mdev->dev->persist->interface_state & - MLX4_INTERFACE_STATE_SHUTDOWN; + int t; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); @@ -2168,10 +2218,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - if (shutdown) - mlx4_en_shutdown(dev); - else - unregister_netdev(dev); + unregister_netdev(dev); } if (priv->allocated) @@ -2189,19 +2236,33 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; mdev->upper[priv->port] = NULL; - mutex_unlock(&mdev->state_lock); #ifdef CONFIG_RFS_ACCEL mlx4_en_cleanup_filters(priv); #endif mlx4_en_free_resources(priv); + mutex_unlock(&mdev->state_lock); + + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + kfree(priv->tx_ring[t]); + kfree(priv->tx_cq[t]); + } + + free_netdev(dev); +} + +static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); - kfree(priv->tx_ring); - kfree(priv->tx_cq); + if (mtu > MLX4_EN_MAX_XDP_MTU) { + en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", + mtu, MLX4_EN_MAX_XDP_MTU); + return false; + } - if (!shutdown) - free_netdev(dev); + return true; } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@ -2213,15 +2274,10 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", dev->mtu, new_mtu); - if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { - en_err(priv, "Bad MTU size:%d.\n", new_mtu); - return -EPERM; - } - if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { - en_err(priv, "MTU size:%d requires frags but XDP running\n", - new_mtu); - return -EOPNOTSUPP; - } + if (priv->tx_ring_num[TX_XDP] && + !mlx4_en_check_xdp_mtu(dev, new_mtu)) + return -ENOTSUPP; + dev->mtu = new_mtu; if (netif_running(dev)) { @@ -2608,7 +2664,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; struct mlx4_update_qp_params params; int err; @@ -2636,18 +2692,21 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_port_profile new_prof; struct bpf_prog *old_prog; + struct mlx4_en_priv *tmp; + int tx_changed = 0; int xdp_ring_num; int port_up = 0; int err; int i; - xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; + xdp_ring_num = prog ? priv->rx_ring_num : 0; /* No need to reconfigure buffers when simply swapping the * program for a new one. */ - if (priv->xdp_ring_num == xdp_ring_num) { + if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { if (prog) { prog = bpf_prog_add(prog, priv->rx_ring_num - 1); if (IS_ERR(prog)) @@ -2666,33 +2725,47 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) return 0; } - if (priv->num_frags > 1) { - en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); + if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) return -EOPNOTSUPP; - } - if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { - en_err(priv, - "Minimum %d tx channels required to run XDP\n", - (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); - return -EINVAL; - } + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; if (prog) { prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto out; + } } mutex_lock(&mdev->state_lock); + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); + new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; + + if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { + tx_changed = 1; + new_prof.tx_ring_num[TX] = + MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP); + en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); + } + + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + if (err) { + if (prog) + bpf_prog_sub(prog, priv->rx_ring_num - 1); + goto unlock_out; + } + if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); } - priv->xdp_ring_num = xdp_ring_num; - netif_set_real_num_tx_queues(dev, priv->tx_ring_num - - priv->xdp_ring_num); + mlx4_en_safe_replace_resources(priv, tmp); + if (tx_changed) + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); for (i = 0; i < priv->rx_ring_num; i++) { old_prog = rcu_dereference_protected( @@ -2712,15 +2785,18 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) } } +unlock_out: mutex_unlock(&mdev->state_lock); - return 0; +out: + kfree(tmp); + return err; } static bool mlx4_xdp_attached(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - return !!priv->xdp_ring_num; + return !!priv->tx_ring_num[TX_XDP]; } static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) @@ -3057,6 +3133,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, if (!mlx4_is_slave(dev)) bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); + last_i += NUM_PKT_STATS; + + bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); + last_i += NUM_XDP_STATS; } int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -3064,7 +3144,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, { struct net_device *dev; struct mlx4_en_priv *priv; - int i; + int i, t; int err; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), @@ -3072,7 +3152,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (dev == NULL) return -ENOMEM; - netif_set_real_num_tx_queues(dev, prof->tx_ring_num); + netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, prof->rx_ring_num); SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); @@ -3109,21 +3189,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED); priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; - priv->tx_ring_num = prof->tx_ring_num; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); - priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!priv->tx_ring) { - err = -ENOMEM; - goto out; - } - priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!priv->tx_cq) { - err = -ENOMEM; - goto out; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + priv->tx_ring_num[t] = prof->tx_ring_num[t]; + if (!priv->tx_ring_num[t]) + continue; + + priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_ring[t]) { + err = -ENOMEM; + goto err_free_tx; + } + priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_cq[t]) { + kfree(priv->tx_ring[t]); + err = -ENOMEM; + goto out; + } } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; @@ -3206,7 +3292,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, else dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; - netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); dev->ethtool_ops = &mlx4_en_ethtool_ops; @@ -3296,13 +3382,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } + /* MTU range: 46 - hw-specific max */ + dev->min_mtu = MLX4_EN_MIN_MTU; + dev->max_mtu = priv->max_mtu; + mdev->pndev[port] = dev; mdev->upper[port] = NULL; netif_carrier_off(dev); mlx4_en_set_default_moderation(priv); - en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); mlx4_en_update_loopback_state(priv->dev, priv->dev->features); @@ -3362,6 +3452,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, return 0; +err_free_tx: + while (t--) { + kfree(priv->tx_ring[t]); + kfree(priv->tx_cq[t]); + } out: mlx4_en_destroy_netdev(dev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 59473a0ebcdf..9166d90e7328 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -147,6 +147,39 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) return ret; } +void mlx4_en_fold_software_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + unsigned long packets, bytes; + int i; + + if (!priv->port_up || mlx4_is_master(mdev->dev)) + return; + + packets = 0; + bytes = 0; + for (i = 0; i < priv->rx_ring_num; i++) { + const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; + + packets += READ_ONCE(ring->packets); + bytes += READ_ONCE(ring->bytes); + } + dev->stats.rx_packets = packets; + dev->stats.rx_bytes = bytes; + + packets = 0; + bytes = 0; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i]; + + packets += READ_ONCE(ring->packets); + bytes += READ_ONCE(ring->bytes); + } + dev->stats.tx_packets = packets; + dev->stats.tx_bytes = bytes; +} + int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) { struct mlx4_counter tmp_counter_stats; @@ -159,6 +192,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) u64 in_mod = reset << 8 | port; int err; int i, counter_index; + unsigned long sw_tx_dropped = 0; unsigned long sw_rx_dropped = 0; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); @@ -174,40 +208,42 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) spin_lock_bh(&priv->stats_lock); - stats->rx_packets = 0; - stats->rx_bytes = 0; + mlx4_en_fold_software_stats(dev); + priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; priv->port_stats.rx_chksum_complete = 0; + priv->xdp_stats.rx_xdp_drop = 0; + priv->xdp_stats.rx_xdp_tx = 0; + priv->xdp_stats.rx_xdp_tx_full = 0; for (i = 0; i < priv->rx_ring_num; i++) { - stats->rx_packets += priv->rx_ring[i]->packets; - stats->rx_bytes += priv->rx_ring[i]->bytes; - sw_rx_dropped += priv->rx_ring[i]->dropped; - priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; - priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; - priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; + const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; + + sw_rx_dropped += READ_ONCE(ring->dropped); + priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); + priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); + priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); + priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); + priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); + priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); } - stats->tx_packets = 0; - stats->tx_bytes = 0; - stats->tx_dropped = 0; priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; priv->port_stats.tso_packets = 0; priv->port_stats.xmit_more = 0; - for (i = 0; i < priv->tx_ring_num; i++) { - const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; - - stats->tx_packets += ring->packets; - stats->tx_bytes += ring->bytes; - stats->tx_dropped += ring->tx_dropped; - priv->port_stats.tx_chksum_offload += ring->tx_csum; - priv->port_stats.queue_stopped += ring->queue_stopped; - priv->port_stats.wake_queue += ring->wake_queue; - priv->port_stats.tso_packets += ring->tso_packets; - priv->port_stats.xmit_more += ring->xmit_more; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i]; + + sw_tx_dropped += READ_ONCE(ring->tx_dropped); + priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum); + priv->port_stats.queue_stopped += READ_ONCE(ring->queue_stopped); + priv->port_stats.wake_queue += READ_ONCE(ring->wake_queue); + priv->port_stats.tso_packets += READ_ONCE(ring->tso_packets); + priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more); } + if (mlx4_is_master(mdev->dev)) { stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0, &mlx4_en_stats->RTOT_prio_1, @@ -245,7 +281,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); + stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) + + sw_tx_dropped; /* RX stats */ priv->pkstats.rx_multicast_packets = stats->multicast; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f2e8beddcf44..3c37e216bbf3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -96,7 +96,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; const struct mlx4_en_frag_info *frag_info; struct page *page; - dma_addr_t dma; int i; for (i = 0; i < priv->num_frags; i++) { @@ -115,9 +114,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { frags[i] = ring_alloc[i]; - dma = ring_alloc[i].dma + ring_alloc[i].page_offset; + frags[i].page_offset += priv->frag_info[i].rx_headroom; + rx_desc->data[i].addr = cpu_to_be64(frags[i].dma + + frags[i].page_offset); ring_alloc[i] = page_alloc[i]; - rx_desc->data[i].addr = cpu_to_be64(dma); } return 0; @@ -250,7 +250,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, if (ring->page_cache.index > 0) { frags[0] = ring->page_cache.buf[--ring->page_cache.index]; - rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma + + frags[0].page_offset); return 0; } @@ -688,18 +689,23 @@ out_loopback: dev_kfree_skb_any(skb); } -static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) +static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) { - int index = ring->prod & ring->size_mask; + u32 missing = ring->actual_size - (ring->prod - ring->cons); - while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index, + /* Try to batch allocations, but not too much. */ + if (missing < 8) + return false; + do { + if (mlx4_en_prepare_rx_desc(priv, ring, + ring->prod & ring->size_mask, GFP_ATOMIC | __GFP_COLD)) break; ring->prod++; - index = ring->prod & ring->size_mask; - } + } while (--missing); + + return true; } /* When hardware doesn't strip the vlan, we need to calculate the checksum @@ -788,7 +794,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int doorbell_pending; struct sk_buff *skb; - int tx_index; int index; int nr; unsigned int length; @@ -808,7 +813,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud rcu_read_lock(); xdp_prog = rcu_dereference(ring->xdp_prog); doorbell_pending = 0; - tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -877,8 +881,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud */ length = be32_to_cpu(cqe->byte_cnt); length -= ring->fcs_del; - ring->bytes += length; - ring->packets++; l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); @@ -888,6 +890,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (xdp_prog) { struct xdp_buff xdp; dma_addr_t dma; + void *orig_data; u32 act; dma = be64_to_cpu(rx_desc->data[0].addr); @@ -895,31 +898,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud priv->frag_info[0].frag_size, DMA_FROM_DEVICE); - xdp.data = page_address(frags[0].page) + - frags[0].page_offset; + xdp.data_hard_start = page_address(frags[0].page); + xdp.data = xdp.data_hard_start + frags[0].page_offset; xdp.data_end = xdp.data + length; + orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); + + if (xdp.data != orig_data) { + length = xdp.data_end - xdp.data; + frags[0].page_offset = xdp.data - + xdp.data_hard_start; + } + switch (act) { case XDP_PASS: break; case XDP_TX: - if (likely(!mlx4_en_xmit_frame(frags, dev, - length, tx_index, + if (likely(!mlx4_en_xmit_frame(ring, frags, dev, + length, cq->ring, &doorbell_pending))) goto consumed; - goto xdp_drop; /* Drop on xmit failure */ + goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: case XDP_DROP: -xdp_drop: + ring->xdp_drop++; +xdp_drop_no_cnt: if (likely(mlx4_en_rx_recycle(ring, frags))) goto consumed; goto next; } } + ring->bytes += length; + ring->packets++; + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { @@ -1081,15 +1096,20 @@ consumed: out: rcu_read_unlock(); - if (doorbell_pending) - mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); + if (polled) { + if (doorbell_pending) + mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]); + + mlx4_cq_set_ci(&cq->mcq); + wmb(); /* ensure HW sees CQ consumer before we post new buffers */ + ring->cons = cq->mcq.cons_index; + } AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); - mlx4_cq_set_ci(&cq->mcq); - wmb(); /* ensure HW sees CQ consumer before we post new buffers */ - ring->cons = cq->mcq.cons_index; - mlx4_en_refill_rx_buffers(priv, ring); - mlx4_en_update_rx_prod_db(ring); + + if (mlx4_en_refill_rx_buffers(priv, ring)) + mlx4_en_update_rx_prod_db(ring); + return polled; } @@ -1131,14 +1151,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) return budget; /* Current cpu is not according to smp_irq_affinity - - * probably affinity changed. need to stop this NAPI - * poll, and restart it on the right CPU + * probably affinity changed. Need to stop this NAPI + * poll, and restart it on the right CPU. + * Try to avoid returning a too small value (like 0), + * to not fool net_rx_action() and its netdev_budget */ - done = 0; + if (done) + done--; } /* Done for now */ - napi_complete_done(napi, done); - mlx4_en_arm_cq(priv, cq); + if (napi_complete_done(napi, done)) + mlx4_en_arm_cq(priv, cq); return done; } @@ -1151,37 +1174,41 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { - enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; struct mlx4_en_priv *priv = netdev_priv(dev); int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); - int order = MLX4_EN_ALLOC_PREFER_ORDER; - u32 align = SMP_CACHE_BYTES; - int buf_size = 0; int i = 0; /* bpf requires buffers to be set up as 1 packet per page. * This only works when num_frags == 1. */ - if (priv->xdp_ring_num) { - dma_dir = PCI_DMA_BIDIRECTIONAL; - /* This will gain efficient xdp frame recycling at the expense - * of more costly truesize accounting + if (priv->tx_ring_num[TX_XDP]) { + priv->frag_info[0].order = 0; + priv->frag_info[0].frag_size = eff_mtu; + priv->frag_info[0].frag_prefix_size = 0; + /* This will gain efficient xdp frame recycling at the + * expense of more costly truesize accounting */ - align = PAGE_SIZE; - order = 0; - } - - while (buf_size < eff_mtu) { - priv->frag_info[i].order = order; - priv->frag_info[i].frag_size = - (eff_mtu > buf_size + frag_sizes[i]) ? - frag_sizes[i] : eff_mtu - buf_size; - priv->frag_info[i].frag_prefix_size = buf_size; - priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, align); - priv->frag_info[i].dma_dir = dma_dir; - buf_size += priv->frag_info[i].frag_size; - i++; + priv->frag_info[0].frag_stride = PAGE_SIZE; + priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM; + i = 1; + } else { + int buf_size = 0; + + while (buf_size < eff_mtu) { + priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER; + priv->frag_info[i].frag_size = + (eff_mtu > buf_size + frag_sizes[i]) ? + frag_sizes[i] : eff_mtu - buf_size; + priv->frag_info[i].frag_prefix_size = buf_size; + priv->frag_info[i].frag_stride = + ALIGN(priv->frag_info[i].frag_size, + SMP_CACHE_BYTES); + priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE; + priv->frag_info[i].rx_headroom = 0; + buf_size += priv->frag_info[i].frag_size; + i++; + } } priv->num_frags = i; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index c06346a82496..95290e1fc9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -68,7 +68,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); - skb_set_mac_header(skb, 0); + skb_reset_mac_header(skb); for (i = 0; i < packet_size; ++i) /* fill our packet */ packet[i] = (unsigned char)(i & 0xff); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e2509bba3e7c..5886ad78058f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -66,7 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; - ring->stride = stride; + ring->sp_stride = stride; ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); @@ -90,22 +90,22 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, goto err_info; } } - ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } - ring->buf = ring->wqres.buf.direct.buf; + ring->buf = ring->sp_wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, - (unsigned long long) ring->wqres.buf.direct.map); + (unsigned long long) ring->sp_wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_ETH_BF_QP); @@ -114,12 +114,12 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, goto err_hwq_res; } - err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } - ring->qp.event = mlx4_en_sqp_event; + ring->sp_qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { @@ -141,7 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, if (queue_index < priv->num_tx_rings_p_up) cpumask_set_cpu(cpumask_local_spread(queue_index, priv->mdev->dev->numa_node), - &ring->affinity_mask); + &ring->sp_affinity_mask); *pring = ring; return 0; @@ -149,7 +149,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_hwq_res: - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; @@ -171,10 +171,10 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, if (ring->bf_alloced) mlx4_bf_free(mdev->dev, &ring->bf); - mlx4_qp_remove(mdev->dev, &ring->qp); - mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_remove(mdev->dev, &ring->sp_qp); + mlx4_qp_free(mdev->dev, &ring->sp_qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; kvfree(ring->tx_info); @@ -190,7 +190,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev = priv->mdev; int err; - ring->cqn = cq; + ring->sp_cqn = cq; ring->prod = 0; ring->cons = 0xffffffff; ring->last_nr_txbb = 1; @@ -198,21 +198,21 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, memset(ring->buf, 0, ring->buf_size); ring->free_tx_desc = mlx4_en_free_tx_desc; - ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->sp_qp_state = MLX4_QP_STATE_RST; + ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8); ring->mr_key = cpu_to_be32(mdev->mr.key); - mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, - ring->cqn, user_prio, &ring->context); + mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn, + ring->sp_cqn, user_prio, &ring->sp_context); if (ring->bf_alloced) - ring->context.usr_page = + ring->sp_context.usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, ring->bf.uar->index)); - err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, - &ring->qp, &ring->qp_state); - if (!cpumask_empty(&ring->affinity_mask)) - netif_set_xps_queue(priv->dev, &ring->affinity_mask, + err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context, + &ring->sp_qp, &ring->sp_qp_state); + if (!cpumask_empty(&ring->sp_affinity_mask)) + netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask, ring->queue_index); return err; @@ -223,8 +223,8 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, { struct mlx4_en_dev *mdev = priv->mdev; - mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, - MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); + mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp); } static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) @@ -354,7 +354,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, - .page_offset = 0, + .page_offset = XDP_PACKET_HEADROOM, .page_size = PAGE_SIZE, }; @@ -392,7 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) cnt++; } - netdev_tx_reset_queue(ring->tx_queue); + if (ring->tx_queue) + netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); @@ -405,7 +406,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; - struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; + struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index; u16 new_index, ring_index, stamp_index; @@ -807,7 +808,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool bf_ok; tx_ind = skb_get_queue_mapping(skb); - ring = priv->tx_ring[tx_ind]; + ring = priv->tx_ring[TX][tx_ind]; if (!priv->port_up) goto tx_drop; @@ -1078,7 +1079,8 @@ tx_drop: return NETDEV_TX_OK; } -netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, + struct mlx4_en_rx_alloc *frame, struct net_device *dev, unsigned int length, int tx_ind, int *doorbell_pending) { @@ -1101,7 +1103,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE, "mlx4_en_xmit_frame requires minimum size tx desc"); - ring = priv->tx_ring[tx_ind]; + ring = priv->tx_ring[TX_XDP][tx_ind]; if (!priv->port_up) goto tx_drop; @@ -1130,7 +1132,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, tx_info->page = frame->page; frame->page = NULL; tx_info->map0_dma = dma; - tx_info->map0_byte_count = length; + tx_info->map0_byte_count = PAGE_SIZE; tx_info->nr_txbb = nr_txbb; tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); tx_info->data_offset = (void *)data - (void *)tx_desc; @@ -1139,9 +1141,10 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, tx_info->linear = 1; tx_info->inl = 0; - dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE); + dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset, + length, PCI_DMA_TODEVICE); - data->addr = cpu_to_be64(dma); + data->addr = cpu_to_be64(dma + frame->page_offset); data->lkey = ring->mr_key; dma_wmb(); data->byte_count = cpu_to_be32(length); @@ -1153,8 +1156,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); - ring->packets++; - ring->bytes += tx_info->nr_bytes; + rx_ring->xdp_tx++; AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length); ring->prod += nr_txbb; @@ -1178,7 +1180,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, return NETDEV_TX_OK; tx_drop_count: - ring->tx_dropped++; + rx_ring->xdp_tx_full++; tx_drop: return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6f4e67bc3538..5e7840a7a33b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1823,10 +1823,10 @@ static void unmap_bf_area(struct mlx4_dev *dev) io_mapping_free(mlx4_priv(dev)->bf_mapping); } -cycle_t mlx4_read_clock(struct mlx4_dev *dev) +u64 mlx4_read_clock(struct mlx4_dev *dev) { u32 clockhi, clocklo, clockhi1; - cycle_t cycles; + u64 cycles; int i; struct mlx4_priv *priv = mlx4_priv(dev); @@ -4020,49 +4020,51 @@ int mlx4_restart_one(struct pci_dev *pdev) return err; } +#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT } +#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF } +#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } + static const struct pci_device_id mlx4_pci_table[] = { - /* MT25408 "Hermon" SDR */ - { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" DDR */ - { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" QDR */ - { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" DDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" QDR PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" EN 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25458 ConnectX EN 10GBASE-T 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ - { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT26468 ConnectX EN 10GigE PCIe gen2*/ - { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ - { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT26478 ConnectX2 40GigE PCIe gen2 */ - { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, - /* MT25400 Family [ConnectX-2 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, + /* MT25408 "Hermon" */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */ + /* MT25458 ConnectX EN 10GBASE-T */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN), + MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */ + /* MT26468 ConnectX EN 10GigE PCIe Gen2*/ + MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2), + /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2), + /* MT26478 ConnectX2 40GigE PCIe Gen2 */ + MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), + /* MT25400 Family [ConnectX-2] */ + MLX_VF(0x1002), /* Virtual Function */ /* MT27500 Family [ConnectX-3] */ - { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, - /* MT27500 Family [ConnectX-3 Virtual Function] */ - { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, - { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ - { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ - { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ - { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ - { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ - { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ - { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ - { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ - { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ - { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ - { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ - { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ + MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), + MLX_VF(0x1004), /* Virtual Function */ + MLX_GN(0x1005), /* MT27510 Family */ + MLX_GN(0x1006), /* MT27511 Family */ + MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */ + MLX_GN(0x1008), /* MT27521 Family */ + MLX_GN(0x1009), /* MT27530 Family */ + MLX_GN(0x100a), /* MT27531 Family */ + MLX_GN(0x100b), /* MT27540 Family */ + MLX_GN(0x100c), /* MT27541 Family */ + MLX_GN(0x100d), /* MT27550 Family */ + MLX_GN(0x100e), /* MT27551 Family */ + MLX_GN(0x100f), /* MT27560 Family */ + MLX_GN(0x1010), /* MT27561 Family */ + + /* + * See the mellanox_check_broken_intx_masking() quirk when + * adding devices + */ + { 0, } }; @@ -4147,11 +4149,8 @@ static void mlx4_shutdown(struct pci_dev *pdev) mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { - /* Notify mlx4 clients that the kernel is being shut down */ - persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); - } mutex_unlock(&persist->interface_state_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 94b891c118c1..1a670b681555 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1457,7 +1457,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach); int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) { - struct mlx4_net_trans_rule rule; + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + }; + u64 *regid_p; switch (mode) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index a3528dd1e72e..ba1c6cd0cc79 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -207,8 +207,11 @@ enum { */ enum cq_type { - RX = 0, - TX = 1, + /* keep tx types first */ + TX, + TX_XDP, +#define MLX4_EN_NUM_TX_TYPES (TX_XDP + 1) + RX, }; @@ -278,46 +281,50 @@ struct mlx4_en_tx_ring { u32 last_nr_txbb; u32 cons; unsigned long wake_queue; + struct netdev_queue *tx_queue; + u32 (*free_tx_desc)(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, + u64 timestamp, int napi_mode); + struct mlx4_en_rx_ring *recycle_ring; /* cache line used and dirtied in mlx4_en_xmit() */ u32 prod ____cacheline_aligned_in_smp; + unsigned int tx_dropped; unsigned long bytes; unsigned long packets; unsigned long tx_csum; unsigned long tso_packets; unsigned long xmit_more; - unsigned int tx_dropped; struct mlx4_bf bf; - unsigned long queue_stopped; /* Following part should be mostly read */ - cpumask_t affinity_mask; - struct mlx4_qp qp; - struct mlx4_hwq_resources wqres; + __be32 doorbell_qpn; + __be32 mr_key; u32 size; /* number of TXBBs */ u32 size_mask; - u16 stride; u32 full_size; - u16 cqn; /* index of port CQ associated with this ring */ u32 buf_size; - __be32 doorbell_qpn; - __be32 mr_key; void *buf; struct mlx4_en_tx_info *tx_info; - struct mlx4_en_rx_ring *recycle_ring; - u32 (*free_tx_desc)(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring, - int index, u8 owner, - u64 timestamp, int napi_mode); - u8 *bounce_buf; - struct mlx4_qp_context context; int qpn; - enum mlx4_qp_state qp_state; u8 queue_index; bool bf_enabled; bool bf_alloced; - struct netdev_queue *tx_queue; - int hwtstamp_tx_type; + u8 hwtstamp_tx_type; + u8 *bounce_buf; + + /* Not used in fast path + * Only queue_stopped might be used if BQL is not properly working. + */ + unsigned long queue_stopped; + struct mlx4_hwq_resources sp_wqres; + struct mlx4_qp sp_qp; + struct mlx4_qp_context sp_context; + cpumask_t sp_affinity_mask; + enum mlx4_qp_state sp_qp_state; + u16 sp_stride; + u16 sp_cqn; /* index of port CQ associated with this ring */ } ____cacheline_aligned_in_smp; struct mlx4_en_rx_desc { @@ -347,6 +354,9 @@ struct mlx4_en_rx_ring { unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; + unsigned long xdp_drop; + unsigned long xdp_tx; + unsigned long xdp_tx_full; unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; @@ -361,7 +371,7 @@ struct mlx4_en_cq { int size; int buf_size; int vector; - enum cq_type is_tx; + enum cq_type type; u16 moder_time; u16 moder_cnt; struct mlx4_cqe *buf; @@ -372,7 +382,7 @@ struct mlx4_en_cq { struct mlx4_en_port_profile { u32 flags; - u32 tx_ring_num; + u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES]; u32 rx_ring_num; u32 tx_ring_size; u32 rx_ring_size; @@ -465,7 +475,8 @@ struct mlx4_en_frag_info { u16 frag_prefix_size; u32 frag_stride; enum dma_data_direction dma_dir; - int order; + u16 order; + u16 rx_headroom; }; #ifdef CONFIG_MLX4_EN_DCB @@ -569,17 +580,16 @@ struct mlx4_en_priv { u32 flags; u8 num_tx_rings_p_up; u32 tx_work_limit; - u32 tx_ring_num; + u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES]; u32 rx_ring_num; u32 rx_skb_size; struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; u16 num_frags; u16 log_rx_info; - int xdp_ring_num; - struct mlx4_en_tx_ring **tx_ring; + struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES]; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; - struct mlx4_en_cq **tx_cq; + struct mlx4_en_cq **tx_cq[MLX4_EN_NUM_TX_TYPES]; struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; @@ -597,6 +607,7 @@ struct mlx4_en_priv { struct mlx4_en_flow_stats_rx rx_flowstats; struct mlx4_en_flow_stats_tx tx_flowstats; struct mlx4_en_port_stats port_stats; + struct mlx4_en_xdp_stats xdp_stats; struct mlx4_en_stats_bitmap stats_bitmap; struct list_head mc_list; struct list_head curr_list; @@ -685,7 +696,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); -netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, + struct mlx4_en_rx_alloc *frame, struct net_device *dev, unsigned int length, int tx_ind, int *doorbell_pending); void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); @@ -744,6 +756,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq); int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); +void mlx4_en_fold_software_stats(struct net_device *dev); int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 7fd466c0b929..48641cb0367f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -55,6 +55,13 @@ struct mlx4_en_perf_stats { #define NUM_PERF_COUNTERS 6 }; +struct mlx4_en_xdp_stats { + unsigned long rx_xdp_drop; + unsigned long rx_xdp_tx; + unsigned long rx_xdp_tx_full; +#define NUM_XDP_STATS 3 +}; + #define NUM_MAIN_STATS 21 #define MLX4_NUM_PRIORITIES 8 @@ -107,7 +114,8 @@ enum { }; #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ - NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS) + NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \ + NUM_XDP_STATS) #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ sizeof(((struct net_device_stats *)0)->n)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index aae46884bf93..ddb4ca4ff930 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -14,12 +14,10 @@ config MLX5_CORE config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE - select PTP_1588_CLOCK + imply PTP_1588_CLOCK default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. - Ethernet and Infiniband support in ConnectX-4 are currently mutually - exclusive. config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 0343725d7f44..9f43beb86250 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -8,6 +8,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ - en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o + en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 2c6e3c7b7417..66bd213f35ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -106,6 +106,63 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) } EXPORT_SYMBOL_GPL(mlx5_buf_free); +int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_frag_buf *buf, int node) +{ + int i; + + buf->size = size; + buf->npages = 1 << get_order(size); + buf->page_shift = PAGE_SHIFT; + buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), + GFP_KERNEL); + if (!buf->frags) + goto err_out; + + for (i = 0; i < buf->npages; i++) { + struct mlx5_buf_list *frag = &buf->frags[i]; + int frag_sz = min_t(int, size, PAGE_SIZE); + + frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz, + &frag->map, node); + if (!frag->buf) + goto err_free_buf; + if (frag->map & ((1 << buf->page_shift) - 1)) { + dma_free_coherent(&dev->pdev->dev, frag_sz, + buf->frags[i].buf, buf->frags[i].map); + mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n", + &frag->map, buf->page_shift); + goto err_free_buf; + } + size -= frag_sz; + } + + return 0; + +err_free_buf: + while (i--) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, + buf->frags[i].map); + kfree(buf->frags); +err_out: + return -ENOMEM; +} + +void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) +{ + int size = buf->size; + int i; + + for (i = 0; i < buf->npages; i++) { + int frag_sz = min_t(int, size, PAGE_SIZE); + + dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf, + buf->frags[i].map); + size -= frag_sz; + } + kfree(buf->frags); +} + static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) { @@ -230,3 +287,12 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) } } EXPORT_SYMBOL_GPL(mlx5_fill_page_array); + +void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas) +{ + int i; + + for (i = 0; i < buf->npages; i++) + pas[i] = cpu_to_be64(buf->frags[i].map); +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1e639f886021..3797cc7c1288 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -54,14 +54,6 @@ enum { }; enum { - NUM_LONG_LISTS = 2, - NUM_MED_LISTS = 64, - LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + - MLX5_CMD_DATA_BLOCK_SIZE, - MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, -}; - -enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, @@ -268,11 +260,6 @@ static void dump_buf(void *buf, int size, int data_only, int offset) pr_debug("\n"); } -enum { - MLX5_DRIVER_STATUS_ABORTED = 0xfe, - MLX5_DRIVER_SYND = 0xbadd00de, -}; - static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, u32 *synd, u8 *status) { @@ -318,6 +305,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -419,11 +408,14 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP: - case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: + case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -580,6 +572,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); + MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); + MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); default: return "unknown command opcode"; } } @@ -1063,14 +1061,13 @@ static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, if (!mailbox) return ERR_PTR(-ENOMEM); - mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, - &mailbox->dma); + mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags, + &mailbox->dma); if (!mailbox->buf) { mlx5_core_dbg(dev, "failed allocation\n"); kfree(mailbox); return ERR_PTR(-ENOMEM); } - memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); mailbox->next = NULL; return mailbox; @@ -1361,10 +1358,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { unsigned long flags; - if (msg->cache) { - spin_lock_irqsave(&msg->cache->lock, flags); - list_add_tail(&msg->list, &msg->cache->head); - spin_unlock_irqrestore(&msg->cache->lock, flags); + if (msg->parent) { + spin_lock_irqsave(&msg->parent->lock, flags); + list_add_tail(&msg->list, &msg->parent->head); + spin_unlock_irqrestore(&msg->parent->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } @@ -1461,30 +1458,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct cmd_msg_cache *ch = NULL; struct mlx5_cmd *cmd = &dev->cmd; - struct cache_ent *ent = NULL; - - if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) - ent = &cmd->cache.large; - else if (in_size > 16 && in_size <= MED_LIST_SIZE) - ent = &cmd->cache.med; - - if (ent) { - spin_lock_irq(&ent->lock); - if (!list_empty(&ent->head)) { - msg = list_entry(ent->head.next, typeof(*msg), list); - /* For cached lists, we must explicitly state what is - * the real size - */ - msg->len = in_size; - list_del(&msg->list); + int i; + + if (in_size <= 16) + goto cache_miss; + + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &cmd->cache[i]; + if (in_size > ch->max_inbox_size) + continue; + spin_lock_irq(&ch->lock); + if (list_empty(&ch->head)) { + spin_unlock_irq(&ch->lock); + continue; } - spin_unlock_irq(&ent->lock); + msg = list_entry(ch->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + spin_unlock_irq(&ch->lock); + break; } - if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); + if (!IS_ERR(msg)) + return msg; +cache_miss: + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; } @@ -1582,58 +1586,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb); static void destroy_msg_cache(struct mlx5_core_dev *dev) { - struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; + int i; - list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); - } - - list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { - list_del(&msg->list); - mlx5_free_cmd_msg(dev, msg); + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { + ch = &dev->cmd.cache[i]; + list_for_each_entry_safe(msg, n, &ch->head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } } } -static int create_msg_cache(struct mlx5_core_dev *dev) +static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { + 512, 32, 16, 8, 2 +}; + +static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { + 16 + MLX5_CMD_DATA_BLOCK_SIZE, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, +}; + +static void create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; + struct cmd_msg_cache *ch; struct mlx5_cmd_msg *msg; - int err; int i; - - spin_lock_init(&cmd->cache.large.lock); - INIT_LIST_HEAD(&cmd->cache.large.head); - spin_lock_init(&cmd->cache.med.lock); - INIT_LIST_HEAD(&cmd->cache.med.head); - - for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; + int k; + + /* Initialize and fill the caches with initial entries */ + for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { + ch = &cmd->cache[k]; + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->head); + ch->num_ent = cmd_cache_num_ent[k]; + ch->max_inbox_size = cmd_cache_ent_size[k]; + for (i = 0; i < ch->num_ent; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, + ch->max_inbox_size, 0); + if (IS_ERR(msg)) + break; + msg->parent = ch; + list_add_tail(&msg->list, &ch->head); } - msg->cache = &cmd->cache.large; - list_add_tail(&msg->list, &cmd->cache.large.head); } - - for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); - if (IS_ERR(msg)) { - err = PTR_ERR(msg); - goto ex_err; - } - msg->cache = &cmd->cache.med; - list_add_tail(&msg->list, &cmd->cache.med.head); - } - - return 0; - -ex_err: - destroy_msg_cache(dev); - return err; } static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) @@ -1756,11 +1758,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->mode = CMD_MODE_POLLING; - err = create_msg_cache(dev); - if (err) { - dev_err(&dev->pdev->dev, "failed to create command cache\n"); - goto err_free_page; - } + create_msg_cache(dev); set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7a43502a89cc..951dbd58594d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -77,9 +77,9 @@ MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) -#define MLX5E_REQUIRED_MTTS(rqs, wqes)\ - (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) -#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) +#define MLX5E_REQUIRED_MTTS(wqes) \ + (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) #define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) @@ -150,12 +150,6 @@ static inline int mlx5_max_log_rq_size(int wq_type) } } -enum { - MLX5E_INLINE_MODE_L2, - MLX5E_INLINE_MODE_VPORT_CONTEXT, - MLX5_INLINE_MODE_NOT_REQUIRED, -}; - struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -173,22 +167,28 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_data_seg data; }; +extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; + static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "rx_cqe_moder", + "rx_cqe_compress", }; enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), + MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1), }; -#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ - do { \ - if (enable) \ - priv->pflags |= pflag; \ - else \ - priv->pflags &= ~pflag; \ +#define MLX5E_SET_PFLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + (priv)->params.pflags |= (pflag); \ + else \ + (priv)->params.pflags &= ~(pflag); \ } while (0) +#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag))) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif @@ -207,8 +207,7 @@ struct mlx5e_params { u16 num_channels; u8 num_tc; u8 rx_cq_period_mode; - bool rx_cqe_compress_admin; - bool rx_cqe_compress; + bool rx_cqe_compress_def; struct mlx5e_cq_moder rx_cq_moderation; struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; @@ -220,12 +219,34 @@ struct mlx5e_params { u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; -#ifdef CONFIG_MLX5_CORE_EN_DCB - struct ieee_ets ets; -#endif bool rx_am_enabled; u32 lro_timeout; + u32 pflags; +}; + +#ifdef CONFIG_MLX5_CORE_EN_DCB +struct mlx5e_cee_config { + /* bw pct for priority group */ + u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; + u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; + bool pfc_setting[CEE_DCBX_MAX_PRIO]; + bool pfc_enable; +}; + +enum { + MLX5_DCB_CHG_RESET, + MLX5_DCB_NO_CHG, + MLX5_DCB_CHG_NO_RESET, +}; + +struct mlx5e_dcbx { + enum mlx5_dcbx_oper_mode mode; + struct mlx5e_cee_config cee_cfg; /* pending configuration */ + + /* The only setting that cannot be read from FW */ + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; }; +#endif struct mlx5e_tstamp { rwlock_t lock; @@ -241,7 +262,7 @@ struct mlx5e_tstamp { }; enum { - MLX5E_RQ_STATE_FLUSH, + MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, MLX5E_RQ_STATE_AM, }; @@ -265,7 +286,7 @@ struct mlx5e_cq { u16 decmprs_wqe_counter; /* control */ - struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_frag_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; struct mlx5e_rq; @@ -326,7 +347,6 @@ struct mlx5e_rq { struct { struct mlx5e_mpw_info *info; void *mtt_no_align; - u32 mtt_offset; } mpwqe; }; struct { @@ -361,6 +381,7 @@ struct mlx5e_rq { u32 rqn; struct mlx5e_channel *channel; struct mlx5e_priv *priv; + struct mlx5_core_mkey umr_mkey; } ____cacheline_aligned_in_smp; struct mlx5e_umr_dma_info { @@ -394,7 +415,7 @@ struct mlx5e_sq_dma { }; enum { - MLX5E_SQ_STATE_FLUSH, + MLX5E_SQ_STATE_ENABLED, MLX5E_SQ_STATE_BF_ENABLE, }; @@ -524,7 +545,7 @@ struct mlx5e_vxlan_db { struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; }; struct mlx5e_flow_table { @@ -545,10 +566,10 @@ struct mlx5e_tc_table { struct mlx5e_vlan_table { struct mlx5e_flow_table ft; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; - struct mlx5_flow_rule *untagged_rule; - struct mlx5_flow_rule *any_vlan_rule; - bool filter_disabled; + struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *untagged_rule; + struct mlx5_flow_handle *any_vlan_rule; + bool filter_disabled; }; struct mlx5e_l2_table { @@ -566,14 +587,14 @@ struct mlx5e_l2_table { /* L3/L4 traffic type classifier */ struct mlx5e_ttc_table { struct mlx5e_flow_table ft; - struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; + struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; }; #define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) struct arfs_table { struct mlx5e_flow_table ft; - struct mlx5_flow_rule *default_rule; + struct mlx5_flow_handle *default_rule; struct hlist_head rules_hash[ARFS_HASH_SIZE]; }; @@ -668,7 +689,6 @@ struct mlx5e_priv { unsigned long state; struct mutex state_lock; /* Protects Interface state */ - struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; @@ -688,12 +708,15 @@ struct mlx5e_priv { struct work_struct tx_timeout_work; struct delayed_work update_stats_work; - u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; u16 q_counter; +#ifdef CONFIG_MLX5_CORE_EN_DCB + struct mlx5e_dcbx dcbx; +#endif + const struct mlx5e_profile *profile; void *ppriv; }; @@ -735,6 +758,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); +int mlx5e_self_test_num(struct mlx5e_priv *priv); +void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, + u64 *buf); int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, int location); int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, @@ -811,8 +837,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) { - return rq->mpwqe.mtt_offset + - wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); + return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) @@ -825,6 +850,7 @@ extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); +void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); #endif #ifndef CONFIG_RFS_ACCEL @@ -860,7 +886,8 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); -int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev); +int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, + bool enable_uc_lb); struct mlx5_eswitch_rep; int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, @@ -874,6 +901,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); @@ -890,8 +918,16 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); -struct rtnl_link_stats64 * -mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); +void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti); +void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti); + +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp); +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id); +bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); +bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index a8cb38789774..68419a01db36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -56,7 +56,7 @@ struct arfs_tuple { struct arfs_rule { struct mlx5e_priv *priv; struct work_struct arfs_work; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; struct hlist_node hlist; int rxq; /* Flow ID passed to ndo_rx_flow_steer */ @@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv) tt = arfs_get_tt(i); /* Modify ttc rules destination to bypass the aRFS tables*/ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], - &dest); + &dest, NULL); if (err) { netdev_err(priv->netdev, "%s: modify ttc destination failed\n", @@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) tt = arfs_get_tt(i); /* Modify ttc rules destination to point on the aRFS FTs */ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], - &dest); + &dest, NULL); if (err) { netdev_err(priv->netdev, "%s: modify ttc destination failed err=%d\n", @@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) static void arfs_destroy_table(struct arfs_table *arfs_t) { - mlx5_del_flow_rule(arfs_t->default_rule); + mlx5_del_flow_rules(arfs_t->default_rule); mlx5e_destroy_flow_table(&arfs_t->ft); } @@ -174,6 +174,11 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, + .encap_id = 0, + }; struct mlx5_flow_destination dest; struct mlx5e_tir *tir = priv->indir_tir; struct mlx5_flow_spec *spec; @@ -205,10 +210,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, goto out; } - arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, - &dest); + arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec, + &flow_act, + &dest, 1); if (IS_ERR(arfs_t->default_rule)) { err = PTR_ERR(arfs_t->default_rule); arfs_t->default_rule = NULL; @@ -324,7 +328,7 @@ static int arfs_create_table(struct mlx5e_priv *priv, int err; ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL); + MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -396,7 +400,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) spin_unlock_bh(&priv->fs.arfs.arfs_lock); hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { if (arfs_rule->rule) - mlx5_del_flow_rule(arfs_rule->rule); + mlx5_del_flow_rules(arfs_rule->rule); hlist_del(&arfs_rule->hlist); kfree(arfs_rule); } @@ -420,7 +424,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv) hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { cancel_work_sync(&rule->arfs_work); if (rule->rule) - mlx5_del_flow_rule(rule->rule); + mlx5_del_flow_rules(rule->rule); hlist_del(&rule->hlist); kfree(rule); } @@ -462,12 +466,17 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, return NULL; } -static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, - struct arfs_rule *arfs_rule) +static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, + struct arfs_rule *arfs_rule) { + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, + .encap_id = 0, + }; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_tuple *tuple = &arfs_rule->tuple; - struct mlx5_flow_rule *rule = NULL; + struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_destination dest; struct arfs_table *arfs_table; struct mlx5_flow_spec *spec; @@ -544,9 +553,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; - rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, - &dest); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", @@ -559,14 +566,14 @@ out: } static void arfs_modify_rule_rq(struct mlx5e_priv *priv, - struct mlx5_flow_rule *rule, u16 rxq) + struct mlx5_flow_handle *rule, u16 rxq) { struct mlx5_flow_destination dst; int err = 0; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst.tir_num = priv->direct_tir[rxq].tirn; - err = mlx5_modify_rule_destination(rule, &dst); + err = mlx5_modify_rule_destination(rule, &dst, NULL); if (err) netdev_warn(priv->netdev, "Failed to modfiy aRFS rule destination to rq=%d\n", rxq); @@ -578,7 +585,7 @@ static void arfs_handle_work(struct work_struct *work) struct arfs_rule, arfs_work); struct mlx5e_priv *priv = arfs_rule->priv; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 13dc388667b6..746a92c13644 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -49,7 +49,7 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, hwts->hwtstamp = ns_to_ktime(nsec); } -static cycle_t mlx5e_read_internal_timer(const struct cyclecounter *cc) +static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) { struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, cycles); @@ -94,7 +94,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin); + mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -111,6 +111,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Disable CQE compression */ + netdev_warn(dev, "Disabling cqe compression"); mlx5e_modify_rx_cqe_compression(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 029e856f72a0..f175518ff07a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -137,7 +137,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_unmap_free_uar(mdev, &res->cq_uar); } -int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) +int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, + bool enable_uc_lb) { struct mlx5e_tir *tir; void *in; @@ -149,6 +150,10 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) if (!in) return -ENOMEM; + if (enable_uc_lb) + MLX5_SET(modify_tir_in, in, ctx.self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 762af16ed021..7f6c225666c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -38,16 +38,77 @@ #define MLX5E_100MB (100000) #define MLX5E_1GB (1000000) +#define MLX5E_CEE_STATE_UP 1 +#define MLX5E_CEE_STATE_DOWN 0 + +/* If dcbx mode is non-host set the dcbx mode to host. + */ +static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, + enum mlx5_dcbx_oper_mode mode) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 param[MLX5_ST_SZ_DW(dcbx_param)]; + int err; + + err = mlx5_query_port_dcbx_param(mdev, param); + if (err) + return err; + + MLX5_SET(dcbx_param, param, version_admin, mode); + if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST) + MLX5_SET(dcbx_param, param, willing_admin, 1); + + return mlx5_set_port_dcbx_param(mdev, param); +} + +static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv) +{ + struct mlx5e_dcbx *dcbx = &priv->dcbx; + int err; + + if (!MLX5_CAP_GEN(priv->mdev, dcbx)) + return 0; + + if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) + return 0; + + err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST); + if (err) + return err; + + dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST; + return 0; +} + static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err = 0; + int i; if (!MLX5_CAP_GEN(priv->mdev, ets)) return -ENOTSUPP; - memcpy(ets, &priv->params.ets, sizeof(*ets)); - return 0; + ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { + err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); + if (err) + return err; + } + + for (i = 0; i < ets->ets_cap; i++) { + err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); + if (err) + return err; + if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + } + + memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); + + return err; } enum { @@ -110,9 +171,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) int max_tc = mlx5_max_tc(mdev); int err; - if (!MLX5_CAP_GEN(mdev, ets)) - return -ENOTSUPP; - mlx5e_build_tc_group(ets, tc_group, max_tc); mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc); @@ -124,7 +182,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) if (err) return err; - return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); + err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); + + if (err) + return err; + + memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + return err; } static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, @@ -170,6 +235,9 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); int err; + if (!MLX5_CAP_GEN(priv->mdev, ets)) + return -ENOTSUPP; + err = mlx5e_dbcnl_validate_ets(netdev, ets); if (err) return err; @@ -178,9 +246,6 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, if (err) return err; - memcpy(&priv->params.ets, ets, sizeof(*ets)); - priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; - return 0; } @@ -222,13 +287,39 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) { - return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_dcbx *dcbx = &priv->dcbx; + u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE; + + if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) + mode |= DCB_CAP_DCBX_HOST; + + return mode; } static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_dcbx *dcbx = &priv->dcbx; + + if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { + if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO) + return 0; + + /* set dcbx to fw controlled */ + if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { + dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; + return 0; + } + + return 1; + } + + if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) + return 1; + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE) || !(mode & DCB_CAP_DCBX_HOST)) return 1; @@ -304,6 +395,284 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); } +static u8 mlx5e_dcbnl_setall(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct mlx5_core_dev *mdev = priv->mdev; + struct ieee_ets ets; + struct ieee_pfc pfc; + int err = -ENOTSUPP; + int i; + + if (!MLX5_CAP_GEN(mdev, ets)) + goto out; + + memset(&ets, 0, sizeof(ets)); + memset(&pfc, 0, sizeof(pfc)); + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; + } + + err = mlx5e_dbcnl_validate_ets(netdev, &ets); + if (err) { + netdev_err(netdev, + "%s, Failed to validate ETS: %d\n", __func__, err); + goto out; + } + + err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); + if (err) { + netdev_err(netdev, + "%s, Failed to set ETS: %d\n", __func__, err); + goto out; + } + + /* Set PFC */ + pfc.pfc_cap = mlx5_max_tc(mdev) + 1; + if (!cee_cfg->pfc_enable) + pfc.pfc_en = 0; + else + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + pfc.pfc_en |= cee_cfg->pfc_setting[i] << i; + + err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc); + if (err) { + netdev_err(netdev, + "%s, Failed to set PFC: %d\n", __func__, err); + goto out; + } +out: + return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET; +} + +static u8 mlx5e_dcbnl_getstate(struct net_device *netdev) +{ + return MLX5E_CEE_STATE_UP; +} + +static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!perm_addr) + return; + + mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); +} + +static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->prio_to_pg_map[priority] = pgid; +} + +static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->pg_bw_pct[pgid] = bw_pct; +} + +static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, + int priority, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + *prio_type = 0; + *bw_pct = 0; + *up_map = 0; + + if (mlx5_query_port_prio_tc(mdev, priority, pgid)) + *pgid = 0; +} + +static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) + *bw_pct = 0; +} + +static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, + int priority, u8 setting) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (setting > 1) + return; + + cee_cfg->pfc_setting[priority] = setting; +} + +static int +mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev, + int priority, u8 *setting) +{ + struct ieee_pfc pfc; + int err; + + err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc); + + if (err) + *setting = 0; + else + *setting = (pfc.pfc_en >> priority) & 0x01; + + return err; +} + +static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev, + int priority, u8 *setting) +{ + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (!setting) + return; + + mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting); +} + +static u8 mlx5e_dcbnl_getcap(struct net_device *netdev, + int capid, u8 *cap) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 rval = 0; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 1 << mlx5_max_tc(mdev); + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << mlx5_max_tc(mdev); + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = (DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_STATIC); + break; + default: + *cap = 0; + rval = 1; + break; + } + + return rval; +} + +static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev, + int tcs_id, u8 *num) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + switch (tcs_id) { + case DCB_NUMTCS_ATTR_PG: + case DCB_NUMTCS_ATTR_PFC: + *num = mlx5_max_tc(mdev) + 1; + break; + default: + return -EINVAL; + } + + return 0; +} + +static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct ieee_pfc pfc; + + if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc)) + return MLX5E_CEE_STATE_DOWN; + + return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN; +} + +static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN)) + return; + + cee_cfg->pfc_enable = state; +} + const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, @@ -313,4 +682,70 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, .getdcbx = mlx5e_dcbnl_getdcbx, .setdcbx = mlx5e_dcbnl_setdcbx, + +/* CEE interfaces */ + .setall = mlx5e_dcbnl_setall, + .getstate = mlx5e_dcbnl_getstate, + .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr, + + .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx, + .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx, + .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx, + + .setpfccfg = mlx5e_dcbnl_setpfccfg, + .getpfccfg = mlx5e_dcbnl_getpfccfg, + .getcap = mlx5e_dcbnl_getcap, + .getnumtcs = mlx5e_dcbnl_getnumtcs, + .getpfcstate = mlx5e_dcbnl_getpfcstate, + .setpfcstate = mlx5e_dcbnl_setpfcstate, }; + +static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv, + enum mlx5_dcbx_oper_mode *mode) +{ + u32 out[MLX5_ST_SZ_DW(dcbx_param)]; + + *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST; + + if (!mlx5_query_port_dcbx_param(priv->mdev, out)) + *mode = MLX5_GET(dcbx_param, out, version_oper); + + /* From driver's point of view, we only care if the mode + * is host (HOST) or non-host (AUTO) + */ + if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST) + *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; +} + +static void mlx5e_ets_init(struct mlx5e_priv *priv) +{ + int i; + struct ieee_ets ets; + + memset(&ets, 0, sizeof(ets)); + ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < ets.ets_cap; i++) { + ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; + ets.prio_tc[i] = i; + } + + memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa)); + + /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ + ets.prio_tc[0] = 1; + ets.prio_tc[1] = 0; + + mlx5e_dcbnl_ieee_setets_core(priv, &ets); +} + +void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) +{ + struct mlx5e_dcbx *dcbx = &priv->dcbx; + + if (MLX5_CAP_GEN(priv->mdev, dcbx)) + mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); + + mlx5e_ets_init(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 27ff401cec20..352462af8d51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -171,11 +171,17 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) return NUM_SW_COUNTERS + MLX5E_NUM_Q_CNTRS(priv) + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + NUM_PCIE_COUNTERS + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + - MLX5E_NUM_PFC_COUNTERS(priv); + MLX5E_NUM_PFC_COUNTERS(priv) + + ARRAY_SIZE(mlx5e_pme_status_desc) + + ARRAY_SIZE(mlx5e_pme_error_desc); + case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(mlx5e_priv_flags); + case ETH_SS_TEST: + return mlx5e_self_test_num(priv); /* fallthrough */ default: return -EOPNOTSUPP; @@ -213,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + + for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_tas_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -237,6 +251,13 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) } } + /* port module event counters */ + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); + + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -267,6 +288,9 @@ static void mlx5e_get_strings(struct net_device *dev, break; case ETH_SS_TEST: + for (i = 0; i < mlx5e_self_test_num(priv); i++) + strcpy(data + i * ETH_GSTRING_LEN, + mlx5e_self_tests[i]); break; case ETH_SS_STATS: @@ -279,6 +303,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_priv *mlx5_priv; int i, j, tc, prio, idx = 0; unsigned long pfc_combined; @@ -314,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + + for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters, + pcie_tas_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], @@ -335,6 +368,16 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, } } + /* port module event counters */ + mlx5_priv = &priv->mdev->priv; + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, + mlx5e_pme_status_desc, i); + + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, + mlx5e_pme_error_desc, i); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -456,8 +499,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, return -EINVAL; } - num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels, - rx_pending_wqes); + num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && !MLX5E_VALID_NUM_MTTS(num_mtts)) { netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", @@ -522,7 +564,6 @@ static int mlx5e_set_channels(struct net_device *dev, unsigned int count = ch->combined_count; bool arfs_enabled; bool was_opened; - u32 num_mtts; int err = 0; if (!count) { @@ -541,14 +582,6 @@ static int mlx5e_set_channels(struct net_device *dev, return -EINVAL; } - num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size)); - if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - !MLX5E_VALID_NUM_MTTS(num_mtts)) { - netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n", - __func__, count); - return -EINVAL; - } - if (priv->params.num_channels == count) return 0; @@ -1438,6 +1471,35 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) return err; } +static int set_pflag_rx_cqe_compress(struct net_device *netdev, + bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err = 0; + bool reset; + + if (!MLX5_CAP_GEN(mdev, cqe_compression)) + return -ENOTSUPP; + + if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); + return -EINVAL; + } + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + + if (reset) + mlx5e_close_locked(netdev); + + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable); + priv->params.rx_cqe_compress_def = enable; + + if (reset) + err = mlx5e_open_locked(netdev); + return err; +} + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, enum mlx5e_priv_flag flag, @@ -1445,7 +1507,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); bool enable = !!(wanted_flags & flag); - u32 changes = wanted_flags ^ priv->pflags; + u32 changes = wanted_flags ^ priv->params.pflags; int err; if (!(changes & flag)) @@ -1458,7 +1520,7 @@ static int mlx5e_handle_pflag(struct net_device *netdev, return err; } - MLX5E_SET_PRIV_FLAG(priv, flag, enable); + MLX5E_SET_PFLAG(priv, flag, enable); return 0; } @@ -1468,20 +1530,26 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) int err; mutex_lock(&priv->state_lock); - err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_CQE_BASED_MODER, set_pflag_rx_cqe_based_moder); + if (err) + goto out; + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_COMPRESS, + set_pflag_rx_cqe_compress); +out: mutex_unlock(&priv->state_lock); - return err ? -EINVAL : 0; + return err; } static u32 mlx5e_get_priv_flags(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - return priv->pflags; + return priv->params.pflags; } static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) @@ -1535,5 +1603,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, .get_priv_flags = mlx5e_get_priv_flags, - .set_priv_flags = mlx5e_set_priv_flags + .set_priv_flags = mlx5e_set_priv_flags, + .self_test = mlx5e_self_test, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 36fbc6b21a33..1fe80de5d68f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -158,9 +158,14 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid, struct mlx5_flow_spec *spec) { + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, + .encap_id = 0, + }; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; - struct mlx5_flow_rule **rule_p; + struct mlx5_flow_handle **rule_p; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -187,10 +192,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; } - *rule_p = mlx5_add_flow_rule(ft, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, - &dest); + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { err = PTR_ERR(*rule_p); @@ -229,20 +231,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: if (priv->fs.vlan.untagged_rule) { - mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); + mlx5_del_flow_rules(priv->fs.vlan.untagged_rule); priv->fs.vlan.untagged_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: if (priv->fs.vlan.any_vlan_rule) { - mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); + mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule); priv->fs.vlan.any_vlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: mlx5e_vport_context_update_vlans(priv); if (priv->fs.vlan.active_vlans_rule[vid]) { - mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); + mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]); priv->fs.vlan.active_vlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); @@ -560,7 +562,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) for (i = 0; i < MLX5E_NUM_TT; i++) { if (!IS_ERR_OR_NULL(ttc->rules[i])) { - mlx5_del_flow_rule(ttc->rules[i]); + mlx5_del_flow_rules(ttc->rules[i]); ttc->rules[i] = NULL; } } @@ -616,13 +618,19 @@ static struct { }, }; -static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, - u16 etype, - u8 proto) +static struct mlx5_flow_handle * +mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, + u8 proto) { - struct mlx5_flow_rule *rule; + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, + .encap_id = 0, + }; + struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; int err = 0; @@ -643,10 +651,7 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); } - rule = mlx5_add_flow_rule(ft, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, - dest); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: add rule failed\n", __func__); @@ -660,7 +665,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) { struct mlx5_flow_destination dest; struct mlx5e_ttc_table *ttc; - struct mlx5_flow_rule **rules; + struct mlx5_flow_handle **rules; struct mlx5_flow_table *ft; int tt; int err; @@ -776,7 +781,7 @@ static int mlx5e_create_ttc_table(struct mlx5e_priv *priv) int err; ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL); + MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -801,7 +806,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai) { if (!IS_ERR_OR_NULL(ai->rule)) { - mlx5_del_flow_rule(ai->rule); + mlx5_del_flow_rules(ai->rule); ai->rule = NULL; } } @@ -809,6 +814,11 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type) { + struct mlx5_flow_act flow_act = { + .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, + .encap_id = 0, + }; struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_destination dest; struct mlx5_flow_spec *spec; @@ -847,9 +857,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, break; } - ai->rule = mlx5_add_flow_rule(ft, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); + ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(ai->rule)) { netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac); @@ -947,7 +955,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv) ft->num_groups = 0; ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL); + MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1037,7 +1045,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) ft->num_groups = 0; ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, - MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL); + MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d17c24227900..3691451c728c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -36,7 +36,7 @@ struct mlx5e_ethtool_rule { struct list_head list; struct ethtool_rx_flow_spec flow_spec; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; struct mlx5e_ethtool_table *eth_ft; }; @@ -99,7 +99,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, MLX5E_ETHTOOL_NUM_ENTRIES); ft = mlx5_create_auto_grouped_flow_table(ns, prio, table_size, - MLX5E_ETHTOOL_NUM_GROUPS, 0); + MLX5E_ETHTOOL_NUM_GROUPS, 0, 0); if (IS_ERR(ft)) return (void *)ft; @@ -284,15 +284,16 @@ static bool outer_header_zero(u32 *match_criteria) size - 1); } -static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct ethtool_rx_flow_spec *fs) +static struct mlx5_flow_handle * +add_ethtool_flow_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct ethtool_rx_flow_spec *fs) { struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; int err = 0; - u32 action; spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) @@ -303,7 +304,7 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, goto free; if (fs->ring_cookie == RX_CLS_FLOW_DISC) { - action = MLX5_FLOW_CONTEXT_ACTION_DROP; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; } else { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) { @@ -313,12 +314,12 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn; - action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); - rule = mlx5_add_flow_rule(ft, spec, action, - MLX5_FS_DEFAULT_FLOW_TAG, dst); + flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", @@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, struct mlx5e_ethtool_rule *eth_rule) { if (eth_rule->rule) - mlx5_del_flow_rule(eth_rule->rule); + mlx5_del_flow_rules(eth_rule->rule); list_del(ð_rule->list); priv->fs.ethtool.tot_num_rules--; put_flow_table(eth_rule->eth_ft); @@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, { struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_rule *eth_rule; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; int num_tuples; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 84e8b250e2af..cbfa38fc72c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -84,7 +84,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) switch (priv->params.rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; - priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ? + priv->params.mpwqe_log_stride_sz = + MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : MLX5_MPWRQ_LOG_STRIDE_SIZE; priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - @@ -101,7 +102,7 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, BIT(priv->params.log_rq_size), BIT(priv->params.mpwqe_log_stride_sz), - priv->params.rx_cqe_compress_admin); + MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)); } static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv) @@ -290,12 +291,36 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv) &qcnt->rx_out_of_buffer); } +static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); + void *out; + u32 *in; + + in = mlx5_vzalloc(sz); + if (!in) + return; + + out = pcie_stats->pcie_perf_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + out = pcie_stats->pcie_tas_counters; + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); + + kvfree(in); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { mlx5e_update_q_counter(priv); mlx5e_update_vport_counters(priv); mlx5e_update_pport_counters(priv); mlx5e_update_sw_counters(priv); + mlx5e_update_pcie_counters(priv); } void mlx5e_update_stats_work(struct work_struct *work) @@ -446,14 +471,50 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) kfree(rq->mpwqe.info); } -static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) +static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv, + u64 npages, u8 page_shift, + struct mlx5_core_mkey *umr_mkey) { - struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; + int err; - if (rep && rep->vport != FDB_UPLINK_VPORT) - return true; + if (!MLX5E_VALID_NUM_MTTS(npages)) + return -EINVAL; - return false; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); + + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET64(mkc, mkc, len, npages << page_shift); + MLX5_SET(mkc, mkc, translations_octword_size, + MLX5_MTT_OCTW(npages)); + MLX5_SET(mkc, mkc, log_page_size, page_shift); + + err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); + + kvfree(in); + return err; +} + +static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = rq->priv; + u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size)); + + return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } static int mlx5e_create_rq(struct mlx5e_channel *c, @@ -489,7 +550,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; - rq->xdp_prog = priv->xdp_prog; + + rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL; + if (IS_ERR(rq->xdp_prog)) { + err = PTR_ERR(rq->xdp_prog); + rq->xdp_prog = NULL; + goto err_rq_wq_destroy; + } rq->buff.map_dir = DMA_FROM_DEVICE; if (rq->xdp_prog) @@ -506,18 +573,20 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - rq->mpwqe.mtt_offset = c->ix * - MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size)); - rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; byte_count = rq->buff.wqe_sz; - rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key); - err = mlx5e_rq_alloc_mpwqe_info(rq, c); + + err = mlx5e_create_rq_umr_mkey(rq); if (err) goto err_rq_wq_destroy; + rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); + + err = mlx5e_rq_alloc_mpwqe_info(rq, c); + if (err) + goto err_destroy_umr_mkey; break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info), @@ -566,12 +635,14 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->page_cache.head = 0; rq->page_cache.tail = 0; - if (rq->xdp_prog) - bpf_prog_add(rq->xdp_prog, 1); - return 0; +err_destroy_umr_mkey: + mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + err_rq_wq_destroy: + if (rq->xdp_prog) + bpf_prog_put(rq->xdp_prog); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -587,6 +658,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: mlx5e_rq_free_mpwqe_info(rq); + mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_LINKED_LIST */ kfree(rq->dma_info); @@ -759,6 +831,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_destroy_rq; + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; @@ -773,6 +846,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, return 0; err_disable_rq: + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); mlx5e_disable_rq(rq); err_destroy_rq: mlx5e_destroy_rq(rq); @@ -782,7 +856,7 @@ err_destroy_rq: static void mlx5e_close_rq(struct mlx5e_rq *rq) { - set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state); + clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ cancel_work_sync(&rq->am.work); @@ -938,7 +1012,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; sq->min_inline_mode = - MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? + MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ? param->min_inline_mode : 0; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); @@ -1006,7 +1080,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); - MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); @@ -1083,6 +1156,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_destroy_sq; + set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, false, 0); if (err) @@ -1096,6 +1170,7 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, return 0; err_disable_sq: + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); mlx5e_disable_sq(sq); err_destroy_sq: mlx5e_destroy_sq(sq); @@ -1112,7 +1187,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { - set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ napi_synchronize(&sq->channel->napi); @@ -1181,7 +1256,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, static void mlx5e_destroy_cq(struct mlx5e_cq *cq) { - mlx5_wq_destroy(&cq->wq_ctrl); + mlx5_cqwq_destroy(&cq->wq_ctrl); } static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) @@ -1198,7 +1273,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * cq->wq_ctrl.buf.npages; + sizeof(u64) * cq->wq_ctrl.frag_buf.npages; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; @@ -1207,15 +1282,15 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) memcpy(cqc, param->cqc, sizeof(param->cqc)); - mlx5_fill_page_array(&cq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1533,7 +1608,6 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); - napi_hash_del(&c->napi); kfree(c); return err; @@ -1554,9 +1628,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); - napi_hash_del(&c->napi); - synchronize_rcu(); - kfree(c); } @@ -1649,7 +1720,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); - if (priv->params.rx_cqe_compress) { + if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) { MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); MLX5_SET(cqc, cqc, cqe_comp_en, 1); } @@ -2121,7 +2192,7 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; } - err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); + err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@ -2639,7 +2710,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -struct rtnl_link_stats64 * +static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2647,13 +2718,20 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - stats->rx_packets = sstats->rx_packets; - stats->rx_bytes = sstats->rx_bytes; - stats->tx_packets = sstats->tx_packets; - stats->tx_bytes = sstats->tx_bytes; + if (mlx5e_is_uplink_rep(priv)) { + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); + stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); + stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); + stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + } else { + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; + } stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; - stats->tx_dropped = sstats->tx_queue_dropped; stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + @@ -2850,31 +2928,13 @@ static int mlx5e_set_features(struct net_device *netdev, return err ? -EINVAL : 0; } -#define MXL5_HW_MIN_MTU 64 -#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) - static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - u16 max_mtu; - u16 min_mtu; int err = 0; bool reset; - mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - - max_mtu = MLX5E_HW2SW_MTU(max_mtu); - min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); - - if (new_mtu > max_mtu || new_mtu < min_mtu) { - netdev_err(netdev, - "%s: Bad MTU (%d), valid range is: [%d..%d]\n", - __func__, new_mtu, min_mtu, max_mtu); - return -EINVAL; - } - mutex_lock(&priv->state_lock); reset = !priv->params.lro_en && @@ -2944,6 +3004,20 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); } + +static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + if (min_tx_rate) + return -EOPNOTSUPP; + + return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, + max_tx_rate); +} + static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { @@ -3000,8 +3074,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } -static void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3014,8 +3088,8 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); } -static void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3092,7 +3166,7 @@ static void mlx5e_tx_timeout(struct net_device *dev) if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; - set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); } @@ -3109,6 +3183,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) bool reset, was_opened; int i; + if (prog && prog->xdp_adjust_head) { + netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n"); + return -EOPNOTSUPP; + } + mutex_lock(&priv->state_lock); if ((netdev->features & NETIF_F_LRO) && prog) { @@ -3123,11 +3202,21 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) if (was_opened && reset) mlx5e_close_locked(netdev); + if (was_opened && !reset) { + /* num_channels is invariant here, so we can take the + * batched reference right upfront. + */ + prog = bpf_prog_add(prog, priv->params.num_channels); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto unlock; + } + } - /* exchange programs */ + /* exchange programs, extra prog reference we got from caller + * as long as we don't fail from this point onwards. + */ old_prog = xchg(&priv->xdp_prog, prog); - if (prog) - bpf_prog_add(prog, 1); if (old_prog) bpf_prog_put(old_prog); @@ -3143,17 +3232,16 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* exchanging programs w/o reset, we update ref counts on behalf * of the channels RQs here. */ - bpf_prog_add(prog, priv->params.num_channels); for (i = 0; i < priv->params.num_channels; i++) { struct mlx5e_channel *c = priv->channel[i]; - set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state); + clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); napi_synchronize(&c->napi); /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */ old_prog = xchg(&c->rq.xdp_prog, prog); - clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state); + set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); /* napi_schedule in case we have missed anything */ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); napi_schedule(&c->napi); @@ -3251,6 +3339,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, .ndo_set_vf_trust = mlx5e_set_vf_trust, + .ndo_set_vf_rate = mlx5e_set_vf_rate, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, @@ -3259,6 +3348,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -3295,24 +3386,6 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } -#ifdef CONFIG_MLX5_CORE_EN_DCB -static void mlx5e_ets_init(struct mlx5e_priv *priv) -{ - int i; - - priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; - for (i = 0; i < priv->params.ets.ets_cap; i++) { - priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; - priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; - priv->params.ets.prio_tc[i] = i; - } - - /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ - priv->params.ets.prio_tc[0] = 1; - priv->params.ets.prio_tc[1] = 0; -} -#endif - void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, u32 *indirection_rqt, int len, int num_channels) @@ -3387,14 +3460,13 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline_mode) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5E_INLINE_MODE_L2: + case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; - case MLX5E_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, - min_inline_mode); + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); break; - case MLX5_INLINE_MODE_NOT_REQUIRED: + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: *min_inline_mode = MLX5_INLINE_MODE_NONE; break; } @@ -3436,17 +3508,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; /* set CQE compression */ - priv->params.rx_cqe_compress_admin = false; + priv->params.rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && MLX5_CAP_GEN(mdev, vport_group_manager)) { mlx5e_get_max_linkspeed(mdev, &link_speed); mlx5e_get_pci_bw(mdev, &pci_bw); mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", link_speed, pci_bw); - priv->params.rx_cqe_compress_admin = + priv->params.rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); } - priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin; mlx5e_set_rq_priv_params(priv); if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) @@ -3477,12 +3548,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Initialize pflags */ - MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, - priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); - -#ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_ets_init(priv); -#endif + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def); mutex_init(&priv->state_lock); @@ -3520,7 +3588,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) if (MLX5_CAP_GEN(mdev, vport_group_manager)) { netdev->netdev_ops = &mlx5e_netdev_ops_sriov; #ifdef CONFIG_MLX5_CORE_EN_DCB - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif } else { netdev->netdev_ops = &mlx5e_netdev_ops_basic; @@ -3616,43 +3685,6 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); } -static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), - BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); - int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - void *mkc; - u32 *in; - int err; - - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - - npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); - - MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, umr_en, 1); - MLX5_SET(mkc, mkc, lw, 1); - MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); - - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); - MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT); - MLX5_SET(mkc, mkc, translations_octword_size, - MLX5_MTT_OCTW(npages)); - MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); - - err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen); - - kvfree(in); - return err; -} - static void mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile, @@ -3674,6 +3706,9 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) if (MLX5_CAP_GEN(mdev, vport_group_manager)) mlx5_eswitch_unregister_vport_rep(esw, 0); + + if (priv->xdp_prog) + bpf_prog_put(priv->xdp_prog); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@ -3756,7 +3791,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) } #ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); + mlx5e_dcbnl_initialize(priv); #endif return 0; } @@ -3784,7 +3819,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) rep.load = mlx5e_nic_rep_load; rep.unload = mlx5e_nic_rep_unload; rep.vport = FDB_UPLINK_VPORT; - rep.priv_data = priv; + rep.netdev = netdev; mlx5_eswitch_register_vport_rep(esw, 0, &rep); } } @@ -3849,21 +3884,16 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) { const struct mlx5e_profile *profile; struct mlx5e_priv *priv; + u16 max_mtu; int err; priv = netdev_priv(netdev); profile = priv->profile; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); - err = mlx5e_create_umr_mkey(priv); - if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto out; - } - err = profile->init_tx(priv); if (err) - goto err_destroy_umr_mkey; + goto out; err = mlx5e_open_drop_rq(priv); if (err) { @@ -3879,6 +3909,11 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) mlx5e_init_l2_addr(priv); + /* MTU range: 68 - hw-specific max */ + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu); + mlx5e_set_dev_port_mtu(netdev); if (profile->enable) @@ -3898,9 +3933,6 @@ err_close_drop_rq: err_cleanup_tx: profile->cleanup_tx(priv); -err_destroy_umr_mkey: - mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); - out: return err; } @@ -3949,7 +3981,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); profile->cleanup_tx(priv); - mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); cancel_delayed_work_sync(&priv->update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index bf1c09ca73c0..850378893b25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -72,7 +72,29 @@ static void mlx5e_rep_get_strings(struct net_device *dev, } } -static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) +static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct rtnl_link_stats64 *vport_stats; + struct ifla_vf_stats vf_stats; + int err; + + err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); + if (err) { + pr_warn("vport %d error %d reading stats\n", rep->vport, err); + return; + } + + vport_stats = &priv->stats.vf_vport; + /* flip tx/rx as we are reporting the counters for the switch vport */ + vport_stats->rx_packets = vf_stats.tx_packets; + vport_stats->rx_bytes = vf_stats.tx_bytes; + vport_stats->tx_packets = vf_stats.rx_packets; + vport_stats->tx_bytes = vf_stats.rx_bytes; +} + +static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) { struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_rq_stats *rq_stats; @@ -95,6 +117,12 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) } } +static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_rep_update_sw_counters(priv); + mlx5e_rep_update_hw_counters(priv); +} + static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -106,7 +134,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_update_sw_rep_counters(priv); + mlx5e_rep_update_sw_counters(priv); mutex_unlock(&priv->state_lock); for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) @@ -180,7 +208,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = rep->priv_data; + struct net_device *netdev = rep->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) return mlx5e_add_sqs_fwd_rules(priv); @@ -198,7 +227,8 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = rep->priv_data; + struct net_device *netdev = rep->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_remove_sqs_fwd_rules(priv); @@ -208,6 +238,35 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, mlx5e_tc_init(priv); } +static int mlx5e_rep_open(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int err; + + err = mlx5e_open(dev); + if (err) + return err; + + err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP); + if (!err) + netif_carrier_on(dev); + + return 0; +} + +static int mlx5e_rep_close(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + + return mlx5e_close(dev); +} + static int mlx5e_rep_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -230,6 +289,14 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) return -EOPNOTSUPP; + if (tc->egress_dev) { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); + + return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle, + proto, tc); + } + switch (tc->type) { case TC_SETUP_CLSFLOWER: switch (tc->cls_flower->command) { @@ -245,17 +312,92 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, } } +bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS) + return true; + + return false; +} + +bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + + if (rep && rep->vport != FDB_UPLINK_VPORT) + return true; + + return false; +} + +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv)) + return true; + } + + return false; +} + +static int +mlx5e_get_sw_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + + stats->tx_dropped = sstats->tx_queue_dropped; + + return 0; +} + +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + return mlx5e_get_sw_stats64(dev, sp); + } + + return -EINVAL; +} + +static struct rtnl_link_stats64 * +mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); + return stats; +} + static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; static const struct net_device_ops mlx5e_netdev_ops_rep = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, + .ndo_open = mlx5e_rep_open, + .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, - .ndo_get_stats64 = mlx5e_get_stats, + .ndo_get_stats64 = mlx5e_rep_get_stats, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, }; static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, @@ -328,7 +470,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch_rep *rep = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_handle *flow_rule; int err; int i; @@ -360,7 +502,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return 0; err_del_flow_rule: - mlx5_del_flow_rule(rep->vport_rx_rule); + mlx5_del_flow_rules(rep->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: @@ -375,7 +517,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) int i; mlx5e_tc_cleanup(priv); - mlx5_del_flow_rule(rep->vport_rx_rule); + mlx5_del_flow_rules(rep->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); for (i = 0; i < priv->params.num_channels; i++) mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); @@ -405,7 +547,7 @@ static struct mlx5e_profile mlx5e_rep_profile = { .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_update_sw_rep_counters, + .update_stats = mlx5e_rep_update_stats, .max_nch = mlx5e_get_rep_max_num_channels, .max_tc = 1, }; @@ -423,7 +565,7 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, return -EINVAL; } - rep->priv_data = netdev_priv(netdev); + rep->netdev = netdev; err = mlx5e_attach_netdev(esw->dev, netdev); if (err) { @@ -445,7 +587,7 @@ err_detach_netdev: mlx5e_detach_netdev(esw->dev, netdev); err_destroy_netdev: - mlx5e_destroy_netdev(esw->dev, rep->priv_data); + mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev)); return err; @@ -454,10 +596,9 @@ err_destroy_netdev: void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = rep->priv_data; - struct net_device *netdev = priv->netdev; + struct net_device *netdev = rep->netdev; unregister_netdev(netdev); mlx5e_detach_netdev(esw->dev, netdev); - mlx5e_destroy_netdev(esw->dev, priv); + mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c6de6fba5843..0e2fb3ed1790 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -164,14 +164,14 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) mutex_lock(&priv->state_lock); - if (priv->params.rx_cqe_compress == val) + if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) goto unlock; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) mlx5e_close_locked(priv->netdev); - priv->params.rx_cqe_compress = val; + MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val); if (was_opened) mlx5e_open_locked(priv->netdev); @@ -340,7 +340,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; sq->db.ico_wqe[pi].num_wqebbs = 1; - mlx5e_send_nop(sq, true); + mlx5e_send_nop(sq, false); } wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -412,7 +412,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); - if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) { + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) { mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); return; } @@ -445,7 +445,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) } #define RQ_CANNOT_POST(rq) \ - (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \ + (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) @@ -737,10 +737,10 @@ static inline struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u16 wqe_counter, u32 cqe_bcnt) { - struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog); struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); @@ -759,7 +759,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return NULL; } - if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt)) + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, + cqe_bcnt); + rcu_read_unlock(); + if (consumed) return NULL; /* page/packet was consumed by XDP */ skb = build_skb(va, RQ_PAGE_SIZE(rq)); @@ -924,7 +928,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq; int work_done = 0; - if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; if (cq->decmprs_left) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c new file mode 100644 index 000000000000..65442c36a6e1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/ip.h> +#include <linux/udp.h> +#include <net/udp.h> +#include "en.h" + +enum { + MLX5E_ST_LINK_STATE, + MLX5E_ST_LINK_SPEED, + MLX5E_ST_HEALTH_INFO, +#ifdef CONFIG_INET + MLX5E_ST_LOOPBACK, +#endif + MLX5E_ST_NUM, +}; + +const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = { + "Link Test", + "Speed Test", + "Health Test", +#ifdef CONFIG_INET + "Loopback Test", +#endif +}; + +int mlx5e_self_test_num(struct mlx5e_priv *priv) +{ + return ARRAY_SIZE(mlx5e_self_tests); +} + +static int mlx5e_test_health_info(struct mlx5e_priv *priv) +{ + struct mlx5_core_health *health = &priv->mdev->priv.health; + + return health->sick ? 1 : 0; +} + +static int mlx5e_test_link_state(struct mlx5e_priv *priv) +{ + u8 port_state; + + if (!netif_carrier_ok(priv->netdev)) + return 1; + + port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); + return port_state == VPORT_STATE_UP ? 0 : 1; +} + +static int mlx5e_test_link_speed(struct mlx5e_priv *priv) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 eth_proto_oper; + int i; + + if (!netif_carrier_ok(priv->netdev)) + return 1; + + if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) + return 1; + + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) { + if (eth_proto_oper & MLX5E_PROT_MASK(i)) + return 0; + } + return 1; +} + +#ifdef CONFIG_INET +/* loopback test */ +#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN) +static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; +#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL + +struct mlx5ehdr { + __be32 version; + __be64 magic; + char text[ETH_GSTRING_LEN]; +}; + +static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) +{ + struct sk_buff *skb = NULL; + struct mlx5ehdr *mlxh; + struct ethhdr *ethh; + struct udphdr *udph; + struct iphdr *iph; + int datalen, iplen; + + datalen = MLX5E_TEST_PKT_SIZE - + (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); + + skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); + if (!skb) { + netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n"); + return NULL; + } + + prefetchw(skb->data); + skb_reserve(skb, NET_IP_ALIGN); + + /* Reserve for ethernet and IP header */ + ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + + skb_set_network_header(skb, skb->len); + iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); + + skb_set_transport_header(skb, skb->len); + udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); + + /* Fill ETH header */ + ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_IP); + + /* Fill UDP header */ + udph->source = htons(9); + udph->dest = htons(9); /* Discard Protocol */ + udph->len = htons(datalen + sizeof(struct udphdr)); + udph->check = 0; + + /* Fill IP header */ + iph->ihl = 5; + iph->ttl = 32; + iph->version = 4; + iph->protocol = IPPROTO_UDP; + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; + iph->tot_len = htons(iplen); + iph->frag_off = 0; + iph->saddr = 0; + iph->daddr = 0; + iph->tos = 0; + iph->id = 0; + ip_send_check(iph); + + /* Fill test header and data */ + mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh)); + mlxh->version = 0; + mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); + strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); + datalen -= sizeof(*mlxh); + memset(skb_put(skb, datalen), 0, datalen); + + skb->csum = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + udp4_hwcsum(skb, iph->saddr, iph->daddr); + + skb->protocol = htons(ETH_P_IP); + skb->pkt_type = PACKET_HOST; + skb->dev = priv->netdev; + + return skb; +} + +struct mlx5e_lbt_priv { + struct packet_type pt; + struct completion comp; + bool loopback_ok; +}; + +static int +mlx5e_test_loopback_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct mlx5e_lbt_priv *lbtp = pt->af_packet_priv; + struct mlx5ehdr *mlxh; + struct ethhdr *ethh; + struct udphdr *udph; + struct iphdr *iph; + + /* We are only going to peek, no need to clone the SKB */ + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) + goto out; + + ethh = (struct ethhdr *)skb_mac_header(skb); + if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr)) + goto out; + + iph = ip_hdr(skb); + if (iph->protocol != IPPROTO_UDP) + goto out; + + udph = udp_hdr(skb); + if (udph->dest != htons(9)) + goto out; + + mlxh = (struct mlx5ehdr *)((char *)udph + sizeof(*udph)); + if (mlxh->magic != cpu_to_be64(MLX5E_TEST_MAGIC)) + goto out; /* so close ! */ + + /* bingo */ + lbtp->loopback_ok = true; + complete(&lbtp->comp); +out: + kfree_skb(skb); + return 0; +} + +static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, + struct mlx5e_lbt_priv *lbtp) +{ + int err = 0; + + err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true); + if (err) { + netdev_err(priv->netdev, + "\tFailed to enable UC loopback err(%d)\n", err); + return err; + } + + lbtp->loopback_ok = false; + init_completion(&lbtp->comp); + + lbtp->pt.type = htons(ETH_P_ALL); + lbtp->pt.func = mlx5e_test_loopback_validate; + lbtp->pt.dev = priv->netdev; + lbtp->pt.af_packet_priv = lbtp; + dev_add_pack(&lbtp->pt); + return err; +} + +static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, + struct mlx5e_lbt_priv *lbtp) +{ + dev_remove_pack(&lbtp->pt); + mlx5e_refresh_tirs_self_loopback(priv->mdev, false); +} + +#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) +static int mlx5e_test_loopback(struct mlx5e_priv *priv) +{ + struct mlx5e_lbt_priv *lbtp; + struct sk_buff *skb = NULL; + int err; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + netdev_err(priv->netdev, + "\tCan't perform loobpack test while device is down\n"); + return -ENODEV; + } + + lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL); + if (!lbtp) + return -ENOMEM; + lbtp->loopback_ok = false; + + err = mlx5e_test_loopback_setup(priv, lbtp); + if (err) + goto out; + + skb = mlx5e_test_get_udp_skb(priv); + if (!skb) { + err = -ENOMEM; + goto cleanup; + } + + skb_set_queue_mapping(skb, 0); + err = dev_queue_xmit(skb); + if (err) { + netdev_err(priv->netdev, + "\tFailed to xmit loopback packet err(%d)\n", + err); + goto cleanup; + } + + wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT); + err = !lbtp->loopback_ok; + +cleanup: + mlx5e_test_loopback_cleanup(priv, lbtp); +out: + kfree(lbtp); + return err; +} +#endif + +static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = { + mlx5e_test_link_state, + mlx5e_test_link_speed, + mlx5e_test_health_info, +#ifdef CONFIG_INET + mlx5e_test_loopback, +#endif +}; + +void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, + u64 *buf) +{ + struct mlx5e_priv *priv = netdev_priv(ndev); + int i; + + memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM); + + mutex_lock(&priv->state_lock); + netdev_info(ndev, "Self test begin..\n"); + + for (i = 0; i < MLX5E_ST_NUM; i++) { + netdev_info(ndev, "\t[%d] %s start..\n", + i, mlx5e_self_tests[i]); + buf[i] = mlx5e_st_func[i](priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", + i, mlx5e_self_tests[i], buf[i]); + } + + mutex_unlock(&priv->state_lock); + + for (i = 0; i < MLX5E_ST_NUM; i++) { + if (buf[i]) { + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + } + netdev_info(ndev, "Self test out: status flags(0x%x)\n", + etest->flags); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 57452fdc5154..f202f872f57f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -39,7 +39,7 @@ #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ (*(u32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) @@ -276,6 +276,32 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_TAS_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c) +#define PCIE_TAS_GET(pcie_stats, c) \ + MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \ + counter_set.pcie_tas_cntrs_grp_data_layout.c) + +struct mlx5e_pcie_stats { + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; + __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; +}; + +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + +static const struct counter_desc pcie_tas_stats_desc[] = { + { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) }, + { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -360,6 +386,8 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) +#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ @@ -369,6 +397,7 @@ static const struct counter_desc sq_stats_desc[] = { NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) +#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) @@ -377,6 +406,25 @@ struct mlx5e_stats { struct mlx5e_qcounter_stats qcnt; struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; + struct mlx5e_pcie_stats pcie; + struct rtnl_link_stats64 vf_vport; +}; + +static const struct counter_desc mlx5e_pme_status_desc[] = { + { "module_plug", 0 }, + { "module_unplug", 8 }, +}; + +static const struct counter_desc mlx5e_pme_error_desc[] = { + { "module_pwr_budget_exd", 0 }, /* power budget exceed */ + { "module_long_range", 8 }, /* long range for non MLNX cable */ + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ + { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ + { "module_enforce_part", 32 }, /* enforce part number list */ + { "module_unknown_id", 40 }, /* unknown identifier */ + { "module_high_temp", 48 }, /* high temperature */ + { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ + { "module_unknown_status", 64 }, }; #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 6bb21b31cfeb..f8829b517156 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -31,6 +31,7 @@ */ #include <net/flow_dissector.h> +#include <net/sch_generic.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_skbedit.h> @@ -40,28 +41,43 @@ #include <net/switchdev.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> +#include <net/tc_act/tc_tunnel_key.h> +#include <net/vxlan.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" +#include "vxlan.h" struct mlx5e_tc_flow { struct rhash_head node; u64 cookie; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; + struct list_head encap; /* flows sharing the same encap */ struct mlx5_esw_flow_attr *attr; }; +enum { + MLX5_HEADER_TYPE_VXLAN = 0x0, + MLX5_HEADER_TYPE_NVGRE = 0x1, +}; + #define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_GROUPS 4 -static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - u32 action, u32 flow_tag) +static struct mlx5_flow_handle * +mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + u32 action, u32 flow_tag) { struct mlx5_core_dev *dev = priv->mdev; struct mlx5_flow_destination dest = { 0 }; + struct mlx5_flow_act flow_act = { + .action = action, + .flow_tag = flow_tag, + .encap_id = 0, + }; struct mlx5_fc *counter = NULL; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; bool table_created = false; if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { @@ -82,7 +98,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_PRIO, MLX5E_TC_TABLE_NUM_ENTRIES, MLX5E_TC_TABLE_NUM_GROUPS, - 0); + 0, 0); if (IS_ERR(priv->fs.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); @@ -94,9 +110,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, } spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, - action, flow_tag, - &dest); + rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) goto err_add_rule; @@ -114,9 +128,10 @@ err_create_ft: return rule; } -static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - struct mlx5_esw_flow_attr *attr) +static struct mlx5_flow_handle * +mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; @@ -128,19 +143,39 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } +static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) { + struct list_head *next = flow->encap.next; + + list_del(&flow->encap); + if (list_empty(next)) { + struct mlx5_encap_entry *e; + + e = list_entry(next, struct mlx5_encap_entry, flows); + if (e->n) { + mlx5_encap_dealloc(priv->mdev, e->encap_id); + neigh_release(e->n); + } + hlist_del_rcu(&e->encap_hlist); + kfree(e); + } +} + static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, - struct mlx5_flow_rule *rule, - struct mlx5_esw_flow_attr *attr) + struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(rule); + counter = mlx5_flow_rule_counter(flow->rule); - if (esw && esw->mode == SRIOV_OFFLOADS) - mlx5_eswitch_del_vlan_action(esw, attr); + mlx5_del_flow_rules(flow->rule); - mlx5_del_flow_rule(rule); + if (esw && esw->mode == SRIOV_OFFLOADS) { + mlx5_eswitch_del_vlan_action(esw, flow->attr); + if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) + mlx5e_detach_encap(priv, flow); + } mlx5_fc_destroy(priv->mdev, counter); @@ -150,8 +185,125 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, } } -static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, - struct tc_cls_flower_offload *f) +static void parse_vxlan_attr(struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f) +{ + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); + void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->key); + struct flow_dissector_key_keyid *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, + be32_to_cpu(mask->keyid)); + MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, + be32_to_cpu(key->keyid)); + } +} + +static int parse_tunnel_attr(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f) +{ + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + f->mask); + + /* Full udp dst port must be given */ + if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) + return -EOPNOTSUPP; + + /* udp src port isn't supported */ + if (memchr_inv(&mask->src, 0, sizeof(mask->src))) + return -EOPNOTSUPP; + + if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && + MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) + parse_vxlan_attr(spec, f); + else + return -EOPNOTSUPP; + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_dport, ntohs(key->dst)); + + } else { /* udp dst port must be given */ + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4, + ntohl(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4, + ntohl(key->src)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(key->dst)); + } + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + + /* Enforce DMAC when offloading incoming tunneled flows. + * Flow counters require a match on the DMAC. + */ + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), priv->netdev->dev_addr); + + /* let software handle IP fragments */ + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); + + return 0; +} + +static int __parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + u8 *min_inline) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); @@ -160,6 +312,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec u16 addr_type = 0; u8 ip_proto = 0; + *min_inline = MLX5_INLINE_MODE_L2; + if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | @@ -167,18 +321,61 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) { netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; } + if ((dissector_uses_key(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || + dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) || + dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) && + dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + f->key); + switch (key->addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + if (parse_tunnel_attr(priv, spec, f)) + return -EOPNOTSUPP; + break; + default: + return -EOPNOTSUPP; + } + + /* In decap flow, header pointers should point to the inner + * headers, outer header were already set by parse_tunnel_attr + */ + headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + inner_headers); + } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_CONTROL, f->key); + + struct flow_dissector_key_control *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->mask); addr_type = key->addr_type; + + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + key->flags & FLOW_DIS_IS_FRAGMENT); + } } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -201,6 +398,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, key->ip_proto); + + if (mask->ip_proto) + *min_inline = MLX5_INLINE_MODE_IP; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -271,6 +471,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &key->dst, sizeof(key->dst)); + + if (mask->src || mask->dst) + *min_inline = MLX5_INLINE_MODE_IP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -296,6 +499,10 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), &key->dst, sizeof(key->dst)); + + if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || + ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) + *min_inline = MLX5_INLINE_MODE_IP; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -336,11 +543,39 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec "Only UDP and TCP transport are supported\n"); return -EINVAL; } + + if (mask->src || mask->dst) + *min_inline = MLX5_INLINE_MODE_TCP_UDP; } return 0; } +static int parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f) +{ + struct mlx5_core_dev *dev = priv->mdev; + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + u8 min_inline; + int err; + + err = __parse_cls_flower(priv, spec, f, &min_inline); + + if (!err && esw->mode == SRIOV_OFFLOADS && + rep->vport != FDB_UPLINK_VPORT) { + if (min_inline > esw->offloads.inline_mode) { + netdev_warn(priv->netdev, + "Flow is not offloaded due to min inline setting, required %d actual %d\n", + min_inline, esw->offloads.inline_mode); + return -EOPNOTSUPP; + } + } + + return err; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 *action, u32 *flow_tag) { @@ -387,11 +622,243 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } +static inline int cmp_encap_info(struct mlx5_encap_info *a, + struct mlx5_encap_info *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +static inline int hash_encap_info(struct mlx5_encap_info *info) +{ + return jhash(info, sizeof(*info), 0); +} + +static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct flowi4 *fl4, + struct neighbour **out_n, + __be32 *saddr, + int *out_ttl) +{ + struct rtable *rt; + struct neighbour *n = NULL; + int ttl; + +#if IS_ENABLED(CONFIG_INET) + rt = ip_route_output_key(dev_net(mirred_dev), fl4); + if (IS_ERR(rt)) { + pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr); + return -EOPNOTSUPP; + } +#else + return -EOPNOTSUPP; +#endif + + if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { + pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n", + __func__); + ip_rt_put(rt); + return -EOPNOTSUPP; + } + + ttl = ip4_dst_hoplimit(&rt->dst); + n = dst_neigh_lookup(&rt->dst, &fl4->daddr); + ip_rt_put(rt); + if (!n) + return -ENOMEM; + + *out_n = n; + *saddr = fl4->saddr; + *out_ttl = ttl; + *out_dev = rt->dst.dev; + + return 0; +} + +static int gen_vxlan_header_ipv4(struct net_device *out_dev, + char buf[], + unsigned char h_dest[ETH_ALEN], + int ttl, + __be32 daddr, + __be32 saddr, + __be16 udp_dst_port, + __be32 vx_vni) +{ + int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN; + struct ethhdr *eth = (struct ethhdr *)buf; + struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); + struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); + struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); + + memset(buf, 0, encap_size); + + ether_addr_copy(eth->h_dest, h_dest); + ether_addr_copy(eth->h_source, out_dev->dev_addr); + eth->h_proto = htons(ETH_P_IP); + + ip->daddr = daddr; + ip->saddr = saddr; + + ip->ttl = ttl; + ip->protocol = IPPROTO_UDP; + ip->version = 0x4; + ip->ihl = 0x5; + + udp->dest = udp_dst_port; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vx_vni); + + return encap_size; +} + +static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5_encap_entry *e, + struct net_device **out_dev) +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct flowi4 fl4 = {}; + struct neighbour *n; + char *encap_header; + int encap_size; + __be32 saddr; + int ttl; + int err; + + encap_header = kzalloc(max_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + fl4.flowi4_proto = IPPROTO_UDP; + fl4.fl4_dport = e->tun_info.tp_dst; + break; + default: + err = -EOPNOTSUPP; + goto out; + } + fl4.daddr = e->tun_info.daddr; + + err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev, + &fl4, &n, &saddr, &ttl); + if (err) + goto out; + + e->n = n; + e->out_dev = *out_dev; + + if (!(n->nud_state & NUD_VALID)) { + err = -ENOTSUPP; + goto out; + } + + neigh_ha_snapshot(e->h_dest, n, *out_dev); + + switch (e->tunnel_type) { + case MLX5_HEADER_TYPE_VXLAN: + encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, + e->h_dest, ttl, + e->tun_info.daddr, + saddr, e->tun_info.tp_dst, + e->tun_info.tun_id); + break; + default: + err = -EOPNOTSUPP; + goto out; + } + + err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, + encap_size, encap_header, &e->encap_id); +out: + kfree(encap_header); + return err; +} + +static int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct ip_tunnel_info *tun_info, + struct net_device *mirred_dev, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + unsigned short family = ip_tunnel_info_af(tun_info); + struct ip_tunnel_key *key = &tun_info->key; + struct mlx5_encap_info info; + struct mlx5_encap_entry *e; + struct net_device *out_dev; + uintptr_t hash_key; + bool found = false; + int tunnel_type; + int err; + + /* udp dst port must be given */ + if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) + return -EOPNOTSUPP; + + if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && + MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { + info.tp_dst = key->tp_dst; + info.tun_id = tunnel_id_to_key32(key->tun_id); + tunnel_type = MLX5_HEADER_TYPE_VXLAN; + } else { + return -EOPNOTSUPP; + } + + switch (family) { + case AF_INET: + info.daddr = key->u.ipv4.dst; + break; + default: + return -EOPNOTSUPP; + } + + hash_key = hash_encap_info(&info); + + hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, + encap_hlist, hash_key) { + if (!cmp_encap_info(&e->tun_info, &info)) { + found = true; + break; + } + } + + if (found) { + attr->encap = e; + return 0; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->tun_info = info; + e->tunnel_type = tunnel_type; + INIT_LIST_HEAD(&e->flows); + + err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev); + if (err) + goto out_err; + + attr->encap = e; + hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + + return err; + +out_err: + kfree(e); + return err; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, - struct mlx5_esw_flow_attr *attr) + struct mlx5e_tc_flow *flow) { + struct mlx5_esw_flow_attr *attr = flow->attr; + struct ip_tunnel_info *info = NULL; const struct tc_action *a; LIST_HEAD(actions); + bool encap = false; + int err; if (tc_no_actions(exts)) return -EINVAL; @@ -407,22 +874,44 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } - if (is_tcf_mirred_redirect(a)) { + if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; struct mlx5e_priv *out_priv; out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); - if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) { + if (switchdev_port_same_parent_id(priv->netdev, + out_dev)) { + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + out_priv = netdev_priv(out_dev); + attr->out_rep = out_priv->ppriv; + } else if (encap) { + err = mlx5e_attach_encap(priv, info, + out_dev, attr); + if (err) + return err; + list_add(&flow->encap, &attr->encap->flows); + attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + out_priv = netdev_priv(attr->encap->out_dev); + attr->out_rep = out_priv->ppriv; + } else { pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); return -EINVAL; } + continue; + } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - out_priv = netdev_priv(out_dev); - attr->out_rep = out_priv->ppriv; + if (is_tcf_tunnel_set(a)) { + info = tcf_tunnel_info(a); + if (info) + encap = true; + else + return -EOPNOTSUPP; continue; } @@ -439,6 +928,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, continue; } + if (is_tcf_tunnel_release(a)) { + attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + continue; + } + return -EINVAL; } return 0; @@ -453,25 +947,17 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, u32 flow_tag, action; struct mlx5e_tc_flow *flow; struct mlx5_flow_spec *spec; - struct mlx5_flow_rule *old = NULL; - struct mlx5_esw_flow_attr *old_attr = NULL; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; if (esw && esw->mode == SRIOV_OFFLOADS) fdb_flow = true; - flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, - tc->ht_params); - if (flow) { - old = flow->rule; - old_attr = flow->attr; - } else { - if (fdb_flow) - flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr), - GFP_KERNEL); - else - flow = kzalloc(sizeof(*flow), GFP_KERNEL); - } + if (fdb_flow) + flow = kzalloc(sizeof(*flow) + + sizeof(struct mlx5_esw_flow_attr), + GFP_KERNEL); + else + flow = kzalloc(sizeof(*flow), GFP_KERNEL); spec = mlx5_vzalloc(sizeof(*spec)); if (!spec || !flow) { @@ -487,7 +973,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, if (fdb_flow) { flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); - err = parse_tc_fdb_actions(priv, f->exts, flow->attr); + err = parse_tc_fdb_actions(priv, f->exts, flow); if (err < 0) goto err_free; flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr); @@ -508,17 +994,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, if (err) goto err_del_rule; - if (old) - mlx5e_tc_del_flow(priv, old, old_attr); - goto out; err_del_rule: - mlx5_del_flow_rule(flow->rule); + mlx5_del_flow_rules(flow->rule); err_free: - if (!old) - kfree(flow); + kfree(flow); out: kvfree(spec); return err; @@ -537,7 +1019,8 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); - mlx5e_tc_del_flow(priv, flow->rule, flow->attr); + mlx5e_tc_del_flow(priv, flow); + kfree(flow); @@ -594,7 +1077,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) struct mlx5e_tc_flow *flow = ptr; struct mlx5e_priv *priv = arg; - mlx5e_tc_del_flow(priv, flow->rule, flow->attr); + mlx5e_tc_del_flow(priv, flow); kfree(flow); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 70a717382357..cfb68371c397 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -409,7 +409,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_sq, cq); - if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; npkts = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 5703f19a6a24..e5c12a732aa1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -56,7 +56,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) struct mlx5_cqe64 *cqe; u16 sqcc; - if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return; cqe = mlx5e_get_cqe(cq); @@ -113,7 +113,7 @@ static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq) sq = container_of(cq, struct mlx5e_sq, cq); - if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index aaca09002ca6..8ffcc8808e50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -139,6 +139,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PORT_CHANGE"; case MLX5_EVENT_TYPE_GPIO_EVENT: return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: + return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; case MLX5_EVENT_TYPE_REMOTE_CONFIG: return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; case MLX5_EVENT_TYPE_DB_BF_CONGESTION: @@ -285,6 +287,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); break; #endif + + case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: + mlx5_port_module_event(dev, eqe); + break; + default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -469,7 +476,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev) int mlx5_start_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; - u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; + u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; if (MLX5_CAP_GEN(dev, pg)) @@ -480,6 +487,11 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) mlx5_core_is_pf(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); + if (MLX5_CAP_GEN(dev, port_module_event)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT); + else + mlx5_core_dbg(dev, "port_module_event is not set\n"); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index be1f7333ab7f..d6807c3cc461 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -56,7 +56,7 @@ struct esw_uc_addr { /* E-Switch MC FDB table hash node */ struct esw_mc_addr { /* SRIOV only */ struct l2addr_node node; - struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ + struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ u32 refcnt; }; @@ -65,7 +65,7 @@ struct vport_addr { struct l2addr_node node; u8 action; u32 vport; - struct mlx5_flow_rule *flow_rule; /* SRIOV only */ + struct mlx5_flow_handle *flow_rule; /* SRIOV only */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc; }; @@ -237,13 +237,14 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) } /* E-Switch FDB */ -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { int match_header = (is_zero_ether_addr(mac_c) ? 0 : MLX5_MATCH_OUTER_HEADERS); - struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_handle *flow_rule = NULL; + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_destination dest; struct mlx5_flow_spec *spec; void *mv_misc = NULL; @@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", dmac_v, dmac_c, vport); spec->match_criteria_enable = match_header; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = - mlx5_add_flow_rule(esw->fdb_table.fdb, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - 0, &dest); + mlx5_add_flow_rules(esw->fdb_table.fdb, spec, + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", @@ -300,7 +301,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, return flow_rule; } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) { u8 mac_c[ETH_ALEN]; @@ -309,7 +310,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) { u8 mac_c[ETH_ALEN]; @@ -322,7 +323,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) { u8 mac_c[ETH_ALEN]; @@ -361,7 +362,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); + fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create FDB Table err %d\n", err); @@ -515,7 +516,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) del_l2_table_entry(esw->dev, esw_uc->table_index); if (vaddr->flow_rule) - mlx5_del_flow_rule(vaddr->flow_rule); + mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; l2addr_hash_del(esw_uc); @@ -562,7 +563,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, case MLX5_ACTION_DEL: if (!iter_vaddr) continue; - mlx5_del_flow_rule(iter_vaddr->flow_rule); + mlx5_del_flow_rules(iter_vaddr->flow_rule); l2addr_hash_del(iter_vaddr); break; } @@ -632,7 +633,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) esw_mc->uplink_rule); if (vaddr->flow_rule) - mlx5_del_flow_rule(vaddr->flow_rule); + mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; /* If the multicast mac is added as a result of mc promiscuous vport, @@ -645,7 +646,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) update_allmulti_vports(esw, vaddr, esw_mc); if (esw_mc->uplink_rule) - mlx5_del_flow_rule(esw_mc->uplink_rule); + mlx5_del_flow_rules(esw_mc->uplink_rule); l2addr_hash_del(esw_mc); return 0; @@ -828,14 +829,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, UPLINK_VPORT); allmulti_addr->refcnt++; } else if (vport->allmulti_rule) { - mlx5_del_flow_rule(vport->allmulti_rule); + mlx5_del_flow_rules(vport->allmulti_rule); vport->allmulti_rule = NULL; if (--allmulti_addr->refcnt > 0) goto promisc; if (allmulti_addr->uplink_rule) - mlx5_del_flow_rule(allmulti_addr->uplink_rule); + mlx5_del_flow_rules(allmulti_addr->uplink_rule); allmulti_addr->uplink_rule = NULL; } @@ -847,7 +848,7 @@ promisc: vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, vport_num); } else if (vport->promisc_rule) { - mlx5_del_flow_rule(vport->promisc_rule); + mlx5_del_flow_rules(vport->promisc_rule); vport->promisc_rule = NULL; } } @@ -1018,10 +1019,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) - mlx5_del_flow_rule(vport->egress.allowed_vlan); + mlx5_del_flow_rules(vport->egress.allowed_vlan); if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) - mlx5_del_flow_rule(vport->egress.drop_rule); + mlx5_del_flow_rules(vport->egress.drop_rule); vport->egress.allowed_vlan = NULL; vport->egress.drop_rule = NULL; @@ -1179,10 +1180,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) - mlx5_del_flow_rule(vport->ingress.drop_rule); + mlx5_del_flow_rules(vport->ingress.drop_rule); if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) - mlx5_del_flow_rule(vport->ingress.allow_rule); + mlx5_del_flow_rules(vport->ingress.allow_rule); vport->ingress.drop_rule = NULL; vport->ingress.allow_rule = NULL; @@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; int err = 0; u8 *smac_v; @@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = - mlx5_add_flow_rule(vport->ingress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_ALLOW, - 0, NULL); + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); esw_warn(esw->dev, @@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } memset(spec, 0, sizeof(*spec)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; vport->ingress.drop_rule = - mlx5_add_flow_rule(vport->ingress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_DROP, - 0, NULL); + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); esw_warn(esw->dev, @@ -1301,6 +1303,7 @@ out: static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; int err = 0; @@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->egress.allowed_vlan = - mlx5_add_flow_rule(vport->egress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_ALLOW, - 0, NULL); + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->egress.allowed_vlan)) { err = PTR_ERR(vport->egress.allowed_vlan); esw_warn(esw->dev, @@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, /* Drop others rule (star rule) */ memset(spec, 0, sizeof(*spec)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; vport->egress.drop_rule = - mlx5_add_flow_rule(vport->egress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_DROP, - 0, NULL); + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); esw_warn(esw->dev, @@ -1369,6 +1372,147 @@ out: return err; } +/* Vport QoS management */ +static int esw_create_tsar(struct mlx5_eswitch *esw) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_core_dev *dev = esw->dev; + int err; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return 0; + + if (esw->qos.enabled) + return -EEXIST; + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &tsar_ctx, + &esw->qos.root_tsar_id); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); + return err; + } + + esw->qos.enabled = true; + return 0; +} + +static void esw_destroy_tsar(struct mlx5_eswitch *esw) +{ + int err; + + if (!esw->qos.enabled) + return; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_id); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err); + + esw->qos.enabled = false; +} + +static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, + u32 initial_max_rate) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_core_dev *dev = esw->dev; + void *vport_elem; + int err = 0; + + if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) || + !MLX5_CAP_QOS(dev, esw_scheduling)) + return 0; + + if (vport->qos.enabled) + return -EEXIST; + + MLX5_SET(scheduling_context, &sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + esw->qos.root_tsar_id); + MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + initial_max_rate); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &sched_ctx, + &vport->qos.esw_tsar_ix); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + return err; + } + + vport->qos.enabled = true; + return 0; +} + +static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + int err = 0; + + if (!vport->qos.enabled) + return; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + + vport->qos.enabled = false; +} + +static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, + u32 max_rate) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_core_dev *dev = esw->dev; + void *vport_elem; + u32 bitmask = 0; + int err = 0; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; + + if (!vport->qos.enabled) + return -EIO; + + MLX5_SET(scheduling_context, &sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + esw->qos.root_tsar_id); + MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + + err = mlx5_modify_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &sched_ctx, + vport->qos.esw_tsar_ix, + bitmask); + if (err) { + esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + return err; + } + + return 0; +} + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) { ((u8 *)node_guid)[7] = mac[0]; @@ -1404,6 +1548,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, esw_vport_egress_config(esw, vport); } } + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@ -1417,6 +1562,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, /* Restore old vport configuration */ esw_apply_vport_conf(esw, vport); + /* Attach vport to the eswitch rate limiter */ + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); + /* Sync with current vport context */ vport->enabled_events = enable_events; vport->enabled = true; @@ -1455,7 +1604,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; - + esw_vport_disable_qos(esw, vport_num); if (vport_num && esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, @@ -1501,6 +1650,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (err) goto abort; + err = esw_create_tsar(esw); + if (err) + esw_warn(esw->dev, "Failed to create eswitch TSAR"); + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); @@ -1535,7 +1688,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_disable_vport(esw, i); if (mc_promisc && mc_promisc->uplink_rule) - mlx5_del_flow_rule(mc_promisc->uplink_rule); + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_tsar(esw); if (esw->mode == SRIOV_LEGACY) esw_destroy_legacy_fdb_table(esw); @@ -1627,6 +1782,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + hash_init(esw->offloads.encap_tbl); mutex_init(&esw->state_lock); for (vport_num = 0; vport_num < total_vports; vport_num++) { @@ -1642,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->total_vports = total_vports; esw->enabled_vports = 0; esw->mode = SRIOV_NONE; + esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; dev->priv.eswitch = esw; return 0; @@ -1795,6 +1952,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); return 0; @@ -1888,6 +2046,27 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, + int vport, u32 max_rate) +{ + struct mlx5_vport *evport; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + evport = &esw->vports[vport]; + err = esw_vport_qos_config(esw, vport, max_rate); + if (!err) + evport->info.max_rate = max_rate; + + mutex_unlock(&esw->state_lock); + return err; +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2e2938e08cda..8661dd3f542c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -97,16 +97,16 @@ struct vport_ingress { struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *drop_grp; - struct mlx5_flow_rule *allow_rule; - struct mlx5_flow_rule *drop_rule; + struct mlx5_flow_handle *allow_rule; + struct mlx5_flow_handle *drop_rule; }; struct vport_egress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *drop_grp; - struct mlx5_flow_rule *allowed_vlan; - struct mlx5_flow_rule *drop_rule; + struct mlx5_flow_handle *allowed_vlan; + struct mlx5_flow_handle *drop_rule; }; struct mlx5_vport_info { @@ -115,6 +115,7 @@ struct mlx5_vport_info { u8 qos; u64 node_guid; int link_state; + u32 max_rate; bool spoofchk; bool trusted; }; @@ -124,8 +125,8 @@ struct mlx5_vport { int vport; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; - struct mlx5_flow_rule *promisc_rule; - struct mlx5_flow_rule *allmulti_rule; + struct mlx5_flow_handle *promisc_rule; + struct mlx5_flow_handle *allmulti_rule; struct work_struct vport_change_handler; struct vport_ingress ingress; @@ -133,6 +134,11 @@ struct mlx5_vport { struct mlx5_vport_info info; + struct { + bool enabled; + u32 esw_tsar_ix; + } qos; + bool enabled; u16 enabled_events; }; @@ -156,7 +162,7 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_table *fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; - struct mlx5_flow_rule *miss_rule; + struct mlx5_flow_handle *miss_rule; int vlan_push_pop_refcount; } offloads; }; @@ -169,7 +175,7 @@ enum { }; struct mlx5_esw_sq { - struct mlx5_flow_rule *send_to_vport_rule; + struct mlx5_flow_handle *send_to_vport_rule; struct list_head list; }; @@ -180,9 +186,9 @@ struct mlx5_eswitch_rep { struct mlx5_eswitch_rep *rep); u16 vport; u8 hw_id[ETH_ALEN]; - void *priv_data; + struct net_device *netdev; - struct mlx5_flow_rule *vport_rx_rule; + struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; u16 vlan; u32 vlan_refcount; @@ -193,6 +199,8 @@ struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; struct mlx5_eswitch_rep *vport_reps; + DECLARE_HASHTABLE(encap_tbl, 8); + u8 inline_mode; }; struct mlx5_eswitch { @@ -209,6 +217,12 @@ struct mlx5_eswitch { */ struct mutex state_lock; struct esw_mc_addr *mc_promisc; + + struct { + bool enabled; + u32 root_tsar_id; + } qos; + struct mlx5_esw_offload offloads; int mode; }; @@ -234,6 +248,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int vport, bool spoofchk); int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, + int vport, u32 max_rate); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -243,11 +259,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, struct mlx5_flow_spec; struct mlx5_esw_flow_attr; -struct mlx5_flow_rule * +struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); -struct mlx5_flow_rule * +struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); enum { @@ -258,6 +274,24 @@ enum { #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 +struct mlx5_encap_info { + __be32 daddr; + __be32 tun_id; + __be16 tp_dst; +}; + +struct mlx5_encap_entry { + struct hlist_node encap_hlist; + struct list_head flows; + u32 encap_id; + struct neighbour *n; + struct mlx5_encap_info tun_info; + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + + struct net_device *out_dev; + int tunnel_type; +}; + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *out_rep; @@ -265,6 +299,7 @@ struct mlx5_esw_flow_attr { int action; u16 vlan; bool vlan_handled; + struct mlx5_encap_entry *encap; }; int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, @@ -275,11 +310,15 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); +int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); +int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *rep); void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, int vport_index); +struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d239f5d0ea36..466e161010f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -43,33 +43,36 @@ enum { FDB_SLOW_PATH }; -struct mlx5_flow_rule * +struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { - struct mlx5_flow_destination dest = { 0 }; + struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_act flow_act = {0}; struct mlx5_fc *counter = NULL; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; void *misc; - int action; + int i = 0; if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); /* per flow vlan pop/push is emulated, don't set that into the firmware */ - action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); + flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); - if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = attr->out_rep->vport; - action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport_num = attr->out_rep->vport; + i++; + } + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw->dev, true); if (IS_ERR(counter)) return ERR_CAST(counter); - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter = counter; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[i].counter = counter; + i++; } misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); @@ -80,10 +83,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, - spec, action, 0, &dest); + if (attr->encap) + flow_act.encap_id = attr->encap->encap_id; + rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, + spec, &flow_act, dest, i); if (IS_ERR(rule)) mlx5_fc_destroy(esw->dev, counter); @@ -270,11 +277,12 @@ out: return err; } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_destination dest; - struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; @@ -296,10 +304,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = vport; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - 0, &dest); + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); out: @@ -316,7 +324,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, return; list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { - mlx5_del_flow_rule(esw_sq->send_to_vport_rule); + mlx5_del_flow_rules(esw_sq->send_to_vport_rule); list_del(&esw_sq->list); kfree(esw_sq); } @@ -326,7 +334,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u16 *sqns_array, int sqns_num) { - struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_handle *flow_rule; struct mlx5_esw_sq *esw_sq; int err; int i; @@ -362,8 +370,9 @@ out_err: static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_destination dest; - struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_spec *spec; int err = 0; @@ -376,10 +385,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = 0; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - 0, &dest); + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); @@ -406,6 +415,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) u32 *flow_group_in; void *match_criteria; int table_size, ix, err = 0; + u32 flags = 0; flow_group_in = mlx5_vzalloc(inlen); if (!flow_group_in) @@ -420,9 +430,14 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + flags |= MLX5_FLOW_TABLE_TUNNEL_EN; + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, ESW_OFFLOADS_NUM_ENTRIES, - ESW_OFFLOADS_NUM_GROUPS, 0); + ESW_OFFLOADS_NUM_GROUPS, 0, + flags); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); @@ -431,7 +446,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) esw->fdb_table.fdb = fdb; table_size = nvports + MAX_PF_SQ + 1; - fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0); + fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); @@ -502,7 +517,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) return; esw_debug(esw->dev, "Destroy offloads FDB Table\n"); - mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); + mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); @@ -523,7 +538,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -ENOMEM; } - ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); + ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); if (IS_ERR(ft_offloads)) { err = PTR_ERR(ft_offloads); esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); @@ -586,11 +601,12 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->offloads.vport_rx_group); } -struct mlx5_flow_rule * +struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_destination dest; - struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; @@ -611,9 +627,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.tir_num = tirn; - flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - 0, &dest); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); goto out; @@ -641,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) if (err1) esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); } + if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { + if (mlx5_eswitch_inline_mode_get(esw, + num_vfs, + &esw->offloads.inline_mode)) { + esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; + esw_warn(esw->dev, "Inline mode is different between vports\n"); + } + } return err; } @@ -755,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) return 0; } +static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_INLINE_MODE_NONE: + *mlx5_mode = MLX5_INLINE_MODE_NONE; + break; + case DEVLINK_ESWITCH_INLINE_MODE_LINK: + *mlx5_mode = MLX5_INLINE_MODE_L2; + break; + case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: + *mlx5_mode = MLX5_INLINE_MODE_IP; + break; + case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: + *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) +{ + switch (mlx5_mode) { + case MLX5_INLINE_MODE_NONE: + *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; + break; + case MLX5_INLINE_MODE_L2: + *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; + break; + case MLX5_INLINE_MODE_IP: + *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; + break; + case MLX5_INLINE_MODE_TCP_UDP: + *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; + break; + default: + return -EINVAL; + } + + return 0; +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) { struct mlx5_core_dev *dev; @@ -799,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + int num_vports = esw->enabled_vports; + int err; + int vport; + u8 mlx5_mode; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + err = esw_inline_mode_from_devlink(mode, &mlx5_mode); + if (err) + goto out; + + for (vport = 1; vport < num_vports; vport++) { + err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); + if (err) { + esw_warn(dev, "Failed to set min inline on vport %d\n", + vport); + goto revert_inline_mode; + } + } + + esw->offloads.inline_mode = mlx5_mode; + return 0; + +revert_inline_mode: + while (--vport > 0) + mlx5_modify_nic_vport_min_inline(dev, + vport, + esw->offloads.inline_mode); +out: + return err; +} + +int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); +} + +int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) +{ + struct mlx5_core_dev *dev = esw->dev; + int vport; + u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + for (vport = 1; vport <= nvfs; vport++) { + mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); + if (vport > 1 && prev_mlx5_mode != mlx5_mode) + return -EINVAL; + prev_mlx5_mode = mlx5_mode; + } + + *mode = mlx5_mode; + return 0; +} + void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *__rep) @@ -813,7 +970,7 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, rep->load = __rep->load; rep->unload = __rep->unload; rep->vport = __rep->vport; - rep->priv_data = __rep->priv_data; + rep->netdev = __rep->netdev; ether_addr_copy(rep->hw_id, __rep->hw_id); INIT_LIST_HEAD(&rep->vport_sqs_list); @@ -833,3 +990,13 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, rep->valid = false; } + +struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw) +{ +#define UPLINK_REP_INDEX 0 + struct mlx5_esw_offload *offloads = &esw->offloads; + struct mlx5_eswitch_rep *rep; + + rep = &offloads->vport_reps[UPLINK_REP_INDEX]; + return rep->netdev; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 113c32326333..c4478ecd8056 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -37,6 +37,7 @@ #include "fs_core.h" #include "fs_cmd.h" #include "mlx5_core.h" +#include "eswitch.h" int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) @@ -61,8 +62,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id) + *next_ft, unsigned int *table_id, u32 flags) { + int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN); u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; int err; @@ -78,6 +80,9 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, other_vport, 1); } + MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap); + MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap); + switch (op_mod) { case FS_FT_OP_MOD_NORMAL: if (next_ft) { @@ -243,6 +248,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action); + MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); @@ -453,27 +459,32 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, *bytes = MLX5_GET64(traffic_counter, stats, octets); } -#define MAX_ENCAP_SIZE (128) - -int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, - int header_type, - size_t size, - void *encap_header, - u32 *encap_id) +int mlx5_encap_alloc(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id) { + int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; - u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) + - (MAX_ENCAP_SIZE / sizeof(u32))]; - void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, - encap_header); - void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in, - encap_header); - int inlen = header - (void *)in + size; + void *encap_header_in; + void *header; + int inlen; int err; + u32 *in; - if (size > MAX_ENCAP_SIZE) + if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) return -EINVAL; + in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, + GFP_KERNEL); + if (!in) + return -ENOMEM; + + encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header); + header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header); + inlen = header - (void *)in + size; + memset(in, 0, inlen); MLX5_SET(alloc_encap_header_in, in, opcode, MLX5_CMD_OP_ALLOC_ENCAP_HEADER); @@ -485,10 +496,11 @@ int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); + kfree(in); return err; } -void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id) +void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) { u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index c5bc4686c832..8fad80688536 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -38,7 +38,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table - *next_ft, unsigned int *table_id); + *next_ft, unsigned int *table_id, u32 flags); int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft); @@ -89,11 +89,4 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b, u16 id, u64 *packets, u64 *bytes); -int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, - int header_type, - size_t size, - void *encap_header, - u32 *encap_id); -void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id); - #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 914e5466f729..a263d8904a4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -153,6 +153,11 @@ static void del_rule(struct fs_node *node); static void del_flow_table(struct fs_node *node); static void del_flow_group(struct fs_node *node); static void del_fte(struct fs_node *node); +static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, + struct mlx5_flow_destination *d2); +static struct mlx5_flow_rule * +find_flow_rule(struct fs_fte *fte, + struct mlx5_flow_destination *dest); static void tree_init_node(struct fs_node *node, unsigned int refcount, @@ -369,6 +374,7 @@ static void del_rule(struct fs_node *node) struct mlx5_core_dev *dev = get_dev(node); int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int err; + bool update_fte = false; match_value = mlx5_vzalloc(match_len); if (!match_value) { @@ -387,13 +393,23 @@ static void del_rule(struct fs_node *node) list_del(&rule->next_ft); mutex_unlock(&rule->dest_attr.ft->lock); } + + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && + --fte->dests_size) { + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); + fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; + update_fte = true; + goto out; + } + if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), - err = mlx5_cmd_update_fte(dev, ft, - fg->id, - modify_mask, - fte); + update_fte = true; + } +out: + if (update_fte && fte->dests_size) { + err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte); if (err) mlx5_core_warn(dev, "%s can't del rule fg id=%d fte_index=%d\n", @@ -444,8 +460,7 @@ static void del_flow_group(struct fs_node *node) fg->id, ft->id); } -static struct fs_fte *alloc_fte(u8 action, - u32 flow_tag, +static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, u32 *match_value, unsigned int index) { @@ -457,9 +472,10 @@ static struct fs_fte *alloc_fte(u8 action, memcpy(fte->val, match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; - fte->flow_tag = flow_tag; + fte->flow_tag = flow_act->flow_tag; fte->index = index; - fte->action = action; + fte->action = flow_act->action; + fte->encap_id = flow_act->encap_id; return fte; } @@ -489,7 +505,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, enum fs_flow_table_type table_type, - enum fs_flow_table_op_mod op_mod) + enum fs_flow_table_op_mod op_mod, + u32 flags) { struct mlx5_flow_table *ft; @@ -503,6 +520,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->type = table_type; ft->vport = vport; ft->max_fte = max_fte; + ft->flags = flags; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); @@ -641,8 +659,8 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } -int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, - struct mlx5_flow_destination *dest) +static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; @@ -667,6 +685,28 @@ int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, return err; } +int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle, + struct mlx5_flow_destination *new_dest, + struct mlx5_flow_destination *old_dest) +{ + int i; + + if (!old_dest) { + if (handle->num_rules != 1) + return -EINVAL; + return _mlx5_modify_rule_destination(handle->rule[0], + new_dest); + } + + for (i = 0; i < handle->num_rules; i++) { + if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr)) + return _mlx5_modify_rule_destination(handle->rule[i], + new_dest); + } + + return -EINVAL; +} + /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */ static int connect_fwd_rules(struct mlx5_core_dev *dev, struct mlx5_flow_table *new_next_ft, @@ -689,7 +729,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules); mutex_unlock(&old_next_ft->lock); list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) { - err = mlx5_modify_rule_destination(iter, &dest); + err = _mlx5_modify_rule_destination(iter, &dest); if (err) pr_err("mlx5_core: failed to modify rule to point on flow table %d\n", new_next_ft->id); @@ -739,7 +779,8 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, enum fs_flow_table_op_mod op_mod, u16 vport, int prio, - int max_fte, u32 level) + int max_fte, u32 level, + u32 flags) { struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *ft; @@ -772,7 +813,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa vport, max_fte ? roundup_pow_of_two(max_fte) : 0, root->table_type, - op_mod); + op_mod, flags); if (!ft) { err = -ENOMEM; goto unlock_root; @@ -782,7 +823,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, - ft->level, log_table_sz, next_ft, &ft->id); + ft->level, log_table_sz, next_ft, &ft->id, + ft->flags); if (err) goto free_ft; @@ -807,10 +849,11 @@ unlock_root: struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, - u32 level) + u32 level, + u32 flags) { return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio, - max_fte, level); + max_fte, level, flags); } struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, @@ -818,7 +861,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace u32 level, u16 vport) { return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio, - max_fte, level); + max_fte, level, 0); } struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( @@ -826,7 +869,7 @@ struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( int prio, u32 level) { return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0, - level); + level, 0); } EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); @@ -834,14 +877,15 @@ struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_nam int prio, int num_flow_table_entries, int max_num_groups, - u32 level) + u32 level, + u32 flags) { struct mlx5_flow_table *ft; if (max_num_groups > num_flow_table_entries) return ERR_PTR(-EINVAL); - ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level); + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags); if (IS_ERR(ft)) return ft; @@ -918,55 +962,133 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) return rule; } -/* fte should not be deleted while calling this function */ -static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, - struct mlx5_flow_group *fg, - struct mlx5_flow_destination *dest) +static struct mlx5_flow_handle *alloc_handle(int num_rules) +{ + struct mlx5_flow_handle *handle; + + handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) * + num_rules, GFP_KERNEL); + if (!handle) + return NULL; + + handle->num_rules = num_rules; + + return handle; +} + +static void destroy_flow_handle(struct fs_fte *fte, + struct mlx5_flow_handle *handle, + struct mlx5_flow_destination *dest, + int i) +{ + for (; --i >= 0;) { + if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) { + fte->dests_size--; + list_del(&handle->rule[i]->node.list); + kfree(handle->rule[i]); + } + } + kfree(handle); +} + +static struct mlx5_flow_handle * +create_flow_handle(struct fs_fte *fte, + struct mlx5_flow_destination *dest, + int dest_num, + int *modify_mask, + bool *new_rule) { + struct mlx5_flow_handle *handle; + struct mlx5_flow_rule *rule = NULL; + static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); + static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + int type; + int i = 0; + + handle = alloc_handle((dest_num) ? dest_num : 1); + if (!handle) + return ERR_PTR(-ENOMEM); + + do { + if (dest) { + rule = find_flow_rule(fte, dest + i); + if (rule) { + atomic_inc(&rule->node.refcount); + goto rule_found; + } + } + + *new_rule = true; + rule = alloc_rule(dest + i); + if (!rule) + goto free_rules; + + /* Add dest to dests list- we need flow tables to be in the + * end of the list for forward to next prio rules. + */ + tree_init_node(&rule->node, 1, del_rule); + if (dest && + dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) + list_add(&rule->node.list, &fte->node.children); + else + list_add_tail(&rule->node.list, &fte->node.children); + if (dest) { + fte->dests_size++; + + type = dest[i].type == + MLX5_FLOW_DESTINATION_TYPE_COUNTER; + *modify_mask |= type ? count : dst; + } +rule_found: + handle->rule[i] = rule; + } while (++i < dest_num); + + return handle; + +free_rules: + destroy_flow_handle(fte, handle, dest, i); + return ERR_PTR(-ENOMEM); +} + +/* fte should not be deleted while calling this function */ +static struct mlx5_flow_handle * +add_rule_fte(struct fs_fte *fte, + struct mlx5_flow_group *fg, + struct mlx5_flow_destination *dest, + int dest_num, + bool update_action) +{ + struct mlx5_flow_handle *handle; struct mlx5_flow_table *ft; - struct mlx5_flow_rule *rule; int modify_mask = 0; int err; + bool new_rule = false; - rule = alloc_rule(dest); - if (!rule) - return ERR_PTR(-ENOMEM); + handle = create_flow_handle(fte, dest, dest_num, &modify_mask, + &new_rule); + if (IS_ERR(handle) || !new_rule) + goto out; - fs_get_obj(ft, fg->node.parent); - /* Add dest to dests list- we need flow tables to be in the - * end of the list for forward to next prio rules. - */ - tree_init_node(&rule->node, 1, del_rule); - if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) - list_add(&rule->node.list, &fte->node.children); - else - list_add_tail(&rule->node.list, &fte->node.children); - if (dest) { - fte->dests_size++; + if (update_action) + modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); - modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ? - BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) : - BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); - } - - if (fte->dests_size == 1 || !dest) + fs_get_obj(ft, fg->node.parent); + if (!(fte->status & FS_FTE_STATUS_EXISTING)) err = mlx5_cmd_create_fte(get_dev(&ft->node), ft, fg->id, fte); else err = mlx5_cmd_update_fte(get_dev(&ft->node), ft, fg->id, modify_mask, fte); if (err) - goto free_rule; + goto free_handle; fte->status |= FS_FTE_STATUS_EXISTING; - return rule; +out: + return handle; -free_rule: - list_del(&rule->node.list); - kfree(rule); - if (dest) - fte->dests_size--; +free_handle: + destroy_flow_handle(fte, handle, dest, handle->num_rules); return ERR_PTR(err); } @@ -995,15 +1117,14 @@ static unsigned int get_free_fte_index(struct mlx5_flow_group *fg, /* prev is output, prev->next = new_fte */ static struct fs_fte *create_fte(struct mlx5_flow_group *fg, u32 *match_value, - u8 action, - u32 flow_tag, + struct mlx5_flow_act *flow_act, struct list_head **prev) { struct fs_fte *fte; int index; index = get_free_fte_index(fg, prev); - fte = alloc_fte(action, flow_tag, match_value, index); + fte = alloc_fte(flow_act, match_value, index); if (IS_ERR(fte)) return fte; @@ -1067,71 +1188,81 @@ out: return fg; } +static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, + struct mlx5_flow_destination *d2) +{ + if (d1->type == d2->type) { + if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + d1->vport_num == d2->vport_num) || + (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && + d1->ft == d2->ft) || + (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && + d1->tir_num == d2->tir_num)) + return true; + } + + return false; +} + static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest) { struct mlx5_flow_rule *rule; list_for_each_entry(rule, &fte->node.children, node.list) { - if (rule->dest_attr.type == dest->type) { - if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && - dest->vport_num == rule->dest_attr.vport_num) || - (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && - dest->ft == rule->dest_attr.ft) || - (dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR && - dest->tir_num == rule->dest_attr.tir_num)) - return rule; - } + if (mlx5_flow_dests_cmp(&rule->dest_attr, dest)) + return rule; } return NULL; } -static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, - u32 *match_value, - u8 action, - u32 flow_tag, - struct mlx5_flow_destination *dest) +static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, + u32 *match_value, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) { - struct fs_fte *fte; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *handle; struct mlx5_flow_table *ft; struct list_head *prev; + struct fs_fte *fte; + int i; nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT); fs_for_each_fte(fte, fg) { nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); if (compare_match_value(&fg->mask, match_value, &fte->val) && - action == fte->action && flow_tag == fte->flow_tag) { - rule = find_flow_rule(fte, dest); - if (rule) { - atomic_inc(&rule->node.refcount); - unlock_ref_node(&fte->node); - unlock_ref_node(&fg->node); - return rule; + (flow_act->action & fte->action) && + flow_act->flow_tag == fte->flow_tag) { + int old_action = fte->action; + + fte->action |= flow_act->action; + handle = add_rule_fte(fte, fg, dest, dest_num, + old_action != flow_act->action); + if (IS_ERR(handle)) { + fte->action = old_action; + goto unlock_fte; + } else { + goto add_rules; } - rule = add_rule_fte(fte, fg, dest); - unlock_ref_node(&fte->node); - if (IS_ERR(rule)) - goto unlock_fg; - else - goto add_rule; } unlock_ref_node(&fte->node); } fs_get_obj(ft, fg->node.parent); if (fg->num_ftes >= fg->max_ftes) { - rule = ERR_PTR(-ENOSPC); + handle = ERR_PTR(-ENOSPC); goto unlock_fg; } - fte = create_fte(fg, match_value, action, flow_tag, &prev); + fte = create_fte(fg, match_value, flow_act, &prev); if (IS_ERR(fte)) { - rule = (void *)fte; + handle = (void *)fte; goto unlock_fg; } tree_init_node(&fte->node, 0, del_fte); - rule = add_rule_fte(fte, fg, dest); - if (IS_ERR(rule)) { + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); + handle = add_rule_fte(fte, fg, dest, dest_num, false); + if (IS_ERR(handle)) { kfree(fte); goto unlock_fg; } @@ -1140,19 +1271,24 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg, tree_add_node(&fte->node, &fg->node); list_add(&fte->node.list, prev); -add_rule: - tree_add_node(&rule->node, &fte->node); +add_rules: + for (i = 0; i < handle->num_rules; i++) { + if (atomic_read(&handle->rule[i]->node.refcount) == 1) + tree_add_node(&handle->rule[i]->node, &fte->node); + } +unlock_fte: + unlock_ref_node(&fte->node); unlock_fg: unlock_ref_node(&fg->node); - return rule; + return handle; } -struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule) +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle) { struct mlx5_flow_rule *dst; struct fs_fte *fte; - fs_get_obj(fte, rule->node.parent); + fs_get_obj(fte, handle->rule[0]->node.parent); fs_for_each_dst(dst, fte) { if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) @@ -1170,8 +1306,8 @@ static bool counter_is_valid(struct mlx5_fc *counter, u32 action) if (!counter) return false; - /* Hardware support counter for a drop action only */ - return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT); + return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)); } static bool dest_is_valid(struct mlx5_flow_destination *dest, @@ -1191,18 +1327,22 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, return true; } -static struct mlx5_flow_rule * -_mlx5_add_flow_rule(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, - u32 action, - u32 flow_tag, - struct mlx5_flow_destination *dest) +static struct mlx5_flow_handle * +_mlx5_add_flow_rules(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) + { struct mlx5_flow_group *g; - struct mlx5_flow_rule *rule; + struct mlx5_flow_handle *rule; + int i; - if (!dest_is_valid(dest, action, ft)) - return ERR_PTR(-EINVAL); + for (i = 0; i < dest_num; i++) { + if (!dest_is_valid(&dest[i], flow_act->action, ft)) + return ERR_PTR(-EINVAL); + } nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) @@ -1211,7 +1351,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, g->mask.match_criteria, spec->match_criteria)) { rule = add_rule_fg(g, spec->match_value, - action, flow_tag, dest); + flow_act, dest, dest_num); if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) goto unlock; } @@ -1223,8 +1363,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, goto unlock; } - rule = add_rule_fg(g, spec->match_value, - action, flow_tag, dest); + rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num); if (IS_ERR(rule)) { /* Remove assumes refcount > 0 and autogroup creates a group * with a refcount = 0. @@ -1245,22 +1384,22 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs))); } -struct mlx5_flow_rule * -mlx5_add_flow_rule(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, - u32 action, - u32 flow_tag, - struct mlx5_flow_destination *dest) +struct mlx5_flow_handle * +mlx5_add_flow_rules(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) { struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_destination gen_dest; struct mlx5_flow_table *next_ft = NULL; - struct mlx5_flow_rule *rule = NULL; - u32 sw_action = action; + struct mlx5_flow_handle *handle = NULL; + u32 sw_action = flow_act->action; struct fs_prio *prio; fs_get_obj(prio, ft->node.parent); - if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { + if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!fwd_next_prio_supported(ft)) return ERR_PTR(-EOPNOTSUPP); if (dest) @@ -1271,34 +1410,40 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; gen_dest.ft = next_ft; dest = &gen_dest; - action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest_num = 1; + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } else { mutex_unlock(&root->chain_lock); return ERR_PTR(-EOPNOTSUPP); } } - rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest); + handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num); if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { - if (!IS_ERR_OR_NULL(rule) && - (list_empty(&rule->next_ft))) { + if (!IS_ERR_OR_NULL(handle) && + (list_empty(&handle->rule[0]->next_ft))) { mutex_lock(&next_ft->lock); - list_add(&rule->next_ft, &next_ft->fwd_rules); + list_add(&handle->rule[0]->next_ft, + &next_ft->fwd_rules); mutex_unlock(&next_ft->lock); - rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; } mutex_unlock(&root->chain_lock); } - return rule; + return handle; } -EXPORT_SYMBOL(mlx5_add_flow_rule); +EXPORT_SYMBOL(mlx5_add_flow_rules); -void mlx5_del_flow_rule(struct mlx5_flow_rule *rule) +void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) { - tree_remove_node(&rule->node); + int i; + + for (i = handle->num_rules - 1; i >= 0; i--) + tree_remove_node(&handle->rule[i]->node); + kfree(handle); } -EXPORT_SYMBOL(mlx5_del_flow_rule); +EXPORT_SYMBOL(mlx5_del_flow_rules); /* Assuming prio->node.children(flow tables) is sorted by level */ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) @@ -1678,7 +1823,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (!ns) return -EINVAL; - ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); + ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); if (IS_ERR(ft)) { mlx5_core_err(steering->dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 71ff03bceabb..8e668c63f69e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -94,6 +94,11 @@ struct mlx5_flow_rule { u32 sw_action; }; +struct mlx5_flow_handle { + int num_rules; + struct mlx5_flow_rule *rule[]; +}; + /* Type of children is mlx5_flow_group */ struct mlx5_flow_table { struct fs_node node; @@ -112,6 +117,7 @@ struct mlx5_flow_table { struct mutex lock; /* FWD rules that point on this flow table */ struct list_head fwd_rules; + u32 flags; }; struct mlx5_fc_cache { @@ -145,6 +151,7 @@ struct fs_fte { u32 flow_tag; u32 index; u32 action; + u32 encap_id; enum fs_fte_status status; struct mlx5_fc *counter; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 3b026c151cf2..7431f633de31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -75,7 +75,7 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) struct rb_node *parent = NULL; while (*new) { - struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node); + struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); int result = counter->id - this->id; parent = *new; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3eb931585b3e..54e5a786f191 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -46,7 +46,6 @@ #include <linux/mlx5/srq.h> #include <linux/debugfs.h> #include <linux/kmod.h> -#include <linux/delay.h> #include <linux/mlx5/mlx5_ifc.h> #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> @@ -63,13 +62,13 @@ MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); -int mlx5_core_debug_mask; -module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); +unsigned int mlx5_core_debug_mask; +module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); #define MLX5_DEFAULT_PROF 2 -static int prof_sel = MLX5_DEFAULT_PROF; -module_param_named(prof_sel, prof_sel, int, 0444); +static unsigned int prof_sel = MLX5_DEFAULT_PROF; +module_param_named(prof_sel, prof_sel, uint, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); enum { @@ -175,6 +174,41 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) return err; } +static void mlx5_set_driver_version(struct mlx5_core_dev *dev) +{ + int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in, + driver_version); + u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0}; + u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0}; + int remaining_size = driver_ver_sz; + char *string; + + if (!MLX5_CAP_GEN(dev, driver_version)) + return; + + string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); + + strncpy(string, "Linux", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, ",", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, DRIVER_NAME, remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, ",", remaining_size); + + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); + strncat(string, DRIVER_VERSION, remaining_size); + + /*Send the command*/ + MLX5_SET(set_driver_version_in, in, opcode, + MLX5_CMD_OP_SET_DRIVER_VERSION); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static int set_dma_caps(struct pci_dev *pdev) { int err; @@ -523,7 +557,7 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) +u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) { u32 timer_h, timer_h1, timer_l; @@ -533,7 +567,7 @@ cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) if (timer_h != timer_h1) /* wrap around */ timer_l = ioread32be(&dev->iseg->internal_timer_l); - return (cycle_t)timer_l | (cycle_t)timer_h1 << 32; + return (u64)timer_l | (u64)timer_h1 << 32; } static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) @@ -733,13 +767,15 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) u8 status; mlx5_cmd_mbox_status(query_out, &status, &syndrome); - if (status == MLX5_CMD_STAT_BAD_OP_ERR) { - pr_debug("Only ISSI 0 is supported\n"); - return 0; + if (!status || syndrome == MLX5_DRIVER_SYND) { + mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n", + err, status, syndrome); + return err; } - pr_err("failed to query ISSI err(%d)\n", err); - return err; + mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n"); + dev->issi = 0; + return 0; } sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); @@ -753,7 +789,8 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); if (err) { - pr_err("failed to set ISSI=1 err(%d)\n", err); + mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n", + err); return err; } @@ -1015,6 +1052,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_pagealloc_stop; } + mlx5_set_driver_version(dev); + mlx5_start_health_poll(dev); err = mlx5_query_hca_caps(dev); @@ -1202,6 +1241,8 @@ static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_CORE_EN .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, + .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, + .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, #endif }; @@ -1228,13 +1269,6 @@ static int init_one(struct pci_dev *pdev, dev->pdev = pdev; dev->event = mlx5_core_event; - - if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { - mlx5_core_warn(dev, - "selected profile out of range, selecting default (%d)\n", - MLX5_DEFAULT_PROF); - prof_sel = MLX5_DEFAULT_PROF; - } dev->profile = &profile[prof_sel]; INIT_LIST_HEAD(&priv->ctx_list); @@ -1424,6 +1458,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ + { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5, PCIe 4.0 VF */ { 0, } }; @@ -1451,10 +1486,22 @@ static struct pci_driver mlx5_core_driver = { .sriov_configure = mlx5_core_sriov_configure, }; +static void mlx5_core_verify_params(void) +{ + if (prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n", + prof_sel, + ARRAY_SIZE(profile) - 1, + MLX5_DEFAULT_PROF); + prof_sel = MLX5_DEFAULT_PROF; + } +} + static int __init init(void) { int err; + mlx5_core_verify_params(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 187662c8ea96..d4a99c9757cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -44,11 +44,11 @@ #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) -extern int mlx5_core_debug_mask; +extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ - dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_dbg_mask(__dev, mask, format, ...) \ @@ -63,8 +63,8 @@ do { \ ##__VA_ARGS__) #define mlx5_core_warn(__dev, format, ...) \ - dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_info(__dev, format, ...) \ @@ -75,12 +75,18 @@ enum { MLX5_CMD_TIME, /* print command execution time */ }; +enum { + MLX5_DRIVER_STATUS_ABORTED = 0xfe, + MLX5_DRIVER_SYND = 0xbadd00de, +}; + int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); +void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_recover_device(struct mlx5_core_dev *dev); @@ -92,8 +98,15 @@ int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *context, u32 *element_id); +int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *context, u32 element_id, + u32 modify_bitmask); +int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + u32 element_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); -cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); +u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); @@ -114,6 +127,12 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); +int mlx5_encap_alloc(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id); +void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 34e7184e23c9..d2ec9d232a70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -548,6 +548,26 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev) return num_tc - 1; } +int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(dcbx_param)] = {0}; + + MLX5_SET(dcbx_param, in, port_number, 1); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(in), MLX5_REG_DCBX_PARAM, 0, 0); +} + +int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in) +{ + u32 out[MLX5_ST_SZ_DW(dcbx_param)]; + + MLX5_SET(dcbx_param, in, port_number, 1); + + return mlx5_core_access_reg(mdev, in, sizeof(out), out, + sizeof(out), MLX5_REG_DCBX_PARAM, 0, 1); +} + int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) { u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0}; @@ -572,6 +592,28 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) } EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc); +int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, + u8 prio, u8 *tc) +{ + u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(qtct_reg, in, port_number, 1); + MLX5_SET(qtct_reg, in, prio, prio); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_QTCT, 0, 0); + if (!err) + *tc = MLX5_GET(qtct_reg, out, tclass); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); + static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { @@ -625,6 +667,27 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) } EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); +int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, + u8 tc, u8 *bw_pct) +{ + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; + void *ets_tcn_conf; + int err; + + err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); + if (err) + return err; + + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, + tc_configuration[tc]); + + *bw_pct = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, + bw_allocation); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc); + int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_units) @@ -746,3 +809,60 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap)); *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk)); } + +static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = { + "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */ + "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */ + "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */ +}; + +static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = { + "Power budget exceeded", + "Long Range for non MLNX cable", + "Bus stuck(I2C or data shorted)", + "No EEPROM/retry timeout", + "Enforce part number list", + "Unknown identifier", + "High Temperature", + "Bad or shorted cable/module", + "Unknown status", +}; + +void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +{ + enum port_module_event_status_type module_status; + enum port_module_event_error_type error_type; + struct mlx5_eqe_port_module *module_event_eqe; + struct mlx5_priv *priv = &dev->priv; + u8 module_num; + + module_event_eqe = &eqe->data.port_module; + module_num = module_event_eqe->module; + module_status = module_event_eqe->module_status & + PORT_MODULE_EVENT_MODULE_STATUS_MASK; + error_type = module_event_eqe->error_type & + PORT_MODULE_EVENT_ERROR_TYPE_MASK; + + if (module_status < MLX5_MODULE_STATUS_ERROR) { + priv->pme_stats.status_counters[module_status - 1]++; + } else if (module_status == MLX5_MODULE_STATUS_ERROR) { + if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN) + /* Unknown error type */ + error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN; + priv->pme_stats.error_counters[error_type]++; + } + + if (!printk_ratelimit()) + return; + + if (module_status < MLX5_MODULE_STATUS_ERROR) + mlx5_core_info(dev, + "Port module event: module %u, %s\n", + module_num, mlx5_pme_status[module_status - 1]); + + else if (module_status == MLX5_MODULE_STATUS_ERROR) + mlx5_core_info(dev, + "Port module event[error]: module %u, %s, %s\n", + module_num, mlx5_pme_status[module_status - 1], + mlx5_pme_error[error_type]); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index 104902a93a0b..e651e4c02867 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -36,6 +36,71 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +/* Scheduling element fw management */ +int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *ctx, u32 *element_id) +{ + u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; + void *schedc; + int err; + + schedc = MLX5_ADDR_OF(create_scheduling_element_in, in, + scheduling_context); + MLX5_SET(create_scheduling_element_in, in, opcode, + MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT); + MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy, + hierarchy); + memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + *element_id = MLX5_GET(create_scheduling_element_out, out, + scheduling_element_id); + return 0; +} + +int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + void *ctx, u32 element_id, + u32 modify_bitmask) +{ + u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; + void *schedc; + + schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in, + scheduling_context); + MLX5_SET(modify_scheduling_element_in, in, opcode, + MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT); + MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id, + element_id); + MLX5_SET(modify_scheduling_element_in, in, modify_bitmask, + modify_bitmask); + MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy, + hierarchy); + memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, + u32 element_id) +{ + u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; + + MLX5_SET(destroy_scheduling_element_in, in, opcode, + MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT); + MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id, + element_id); + MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy, + hierarchy); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + /* Finds an entry where we can register the given rate * If the rate already exists, return the entry where it is registered, * otherwise return the first available entry. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 525f17af108e..269e4401c342 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -113,15 +113,17 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); } -void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline) { u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; + int err; - mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out)); - - *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out, - nic_vport_context.min_wqe_inline_mode); + err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out)); + if (!err) + *min_inline = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.min_wqe_inline_mode); + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 821a087c7ae2..921673c42bc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -101,13 +101,15 @@ err_db_free: int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_wq_ctrl *wq_ctrl) + struct mlx5_frag_wq_ctrl *wq_ctrl) { int err; - wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); - wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); - wq->sz_m1 = (1 << wq->log_sz) - 1; + wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + wq->sz_m1 = (1 << wq->log_sz) - 1; + wq->log_frag_strides = PAGE_SHIFT - wq->log_stride; + wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -115,14 +117,16 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), + &wq_ctrl->frag_buf, + param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", + err); goto err_db_free; } - wq->buf = wq_ctrl->buf.direct.buf; + wq->frag_buf = wq_ctrl->frag_buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -184,3 +188,9 @@ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); } + +void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl) +{ + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf); + mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 6c2a8f95093c..d8afed898c31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -47,6 +47,12 @@ struct mlx5_wq_ctrl { struct mlx5_db db; }; +struct mlx5_frag_wq_ctrl { + struct mlx5_core_dev *mdev; + struct mlx5_frag_buf frag_buf; + struct mlx5_db db; +}; + struct mlx5_wq_cyc { void *buf; __be32 *db; @@ -55,12 +61,14 @@ struct mlx5_wq_cyc { }; struct mlx5_cqwq { - void *buf; + struct mlx5_frag_buf frag_buf; __be32 *db; u32 sz_m1; + u32 frag_sz_m1; u32 cc; /* consumer counter */ u8 log_sz; u8 log_stride; + u8 log_frag_strides; }; struct mlx5_wq_ll { @@ -81,7 +89,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_wq_ctrl *wq_ctrl); + struct mlx5_frag_wq_ctrl *wq_ctrl); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, @@ -90,6 +98,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); +void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl); static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { @@ -116,7 +125,10 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) { - return wq->buf + (ix << wq->log_stride); + unsigned int frag = (ix >> wq->log_frag_strides); + + return wq->frag_buf.frags[frag].buf + + ((wq->frag_sz_m1 & ix) << wq->log_stride); } static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 5989f7cb5462..16f44b9aa076 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -19,6 +19,15 @@ config MLXSW_CORE_HWMON ---help--- Say Y here if you want to expose HWMON interface on mlxsw devices. +config MLXSW_CORE_THERMAL + bool "Thermal zone support for Mellanox Technologies Switch ASICs" + depends on MLXSW_CORE && THERMAL + depends on !(MLXSW_CORE=y && THERMAL=m) + default y + ---help--- + Say Y here if you want to automatically control fans speed according + ambient temperature reported by ASIC. + config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE @@ -29,9 +38,30 @@ config MLXSW_PCI To compile this driver as a module, choose M here: the module will be called mlxsw_pci. +config MLXSW_I2C + tristate "I2C bus implementation for Mellanox Technologies Switch ASICs" + depends on I2C && MLXSW_CORE + default m + ---help--- + This is I2C bus implementation for Mellanox Technologies Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_i2c. + +config MLXSW_SWITCHIB + tristate "Mellanox Technologies SwitchIB and SwitchIB-2 support" + depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV + default m + ---help--- + This driver supports Mellanox Technologies SwitchIB and SwitchIB-2 + Infiniband Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_switchib. + config MLXSW_SWITCHX2 tristate "Mellanox Technologies SwitchX-2 support" - depends on MLXSW_CORE && NET_SWITCHDEV + depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV default m ---help--- This driver supports Mellanox Technologies SwitchX-2 Ethernet @@ -42,7 +72,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" - depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q + depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet @@ -58,3 +88,14 @@ config MLXSW_SPECTRUM_DCB ---help--- Say Y here if you want to use Data Center Bridging (DCB) in the driver. + +config MLXSW_MINIMAL + tristate "Mellanox Technologies minimal I2C support" + depends on MLXSW_CORE && MLXSW_I2C + default m + ---help--- + This driver supports I2C access for Mellanox Technologies Switch + ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_minimal. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index d20ae1838a64..fe8dadba15ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,8 +1,13 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o +mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o +obj-$(CONFIG_MLXSW_I2C) += mlxsw_i2c.o +mlxsw_i2c-objs := i2c.o +obj-$(CONFIG_MLXSW_SWITCHIB) += mlxsw_switchib.o +mlxsw_switchib-objs := switchib.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o @@ -10,3 +15,5 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ spectrum_kvdl.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o +obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o +mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 28271bedd957..56e19b0d2f8f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -513,6 +513,11 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) * are no more sources in the table, will return resource id 0xFFF to indicate * it. */ + +#define MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID 0xffff +#define MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES 100 +#define MLXSW_CMD_QUERY_RESOURCES_PER_QUERY 32 + static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core, char *out_mbox, int index) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index aa33d58b9f81..57a98849551b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -67,6 +67,7 @@ #include "trap.h" #include "emad.h" #include "reg.h" +#include "resources.h" static LIST_HEAD(mlxsw_core_driver_list); static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); @@ -76,6 +77,7 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core"; static struct dentry *mlxsw_core_dbg_root; static struct workqueue_struct *mlxsw_wq; +static struct workqueue_struct *mlxsw_owq; struct mlxsw_core_pcpu_stats { u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; @@ -89,6 +91,23 @@ struct mlxsw_core_pcpu_stats { u32 port_rx_invalid; }; +struct mlxsw_core_port { + struct devlink_port devlink_port; + void *port_driver_priv; + u8 local_port; +}; + +void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) +{ + return mlxsw_core_port->port_driver_priv; +} +EXPORT_SYMBOL(mlxsw_core_port_driver_priv); + +static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port) +{ + return mlxsw_core_port->port_driver_priv != NULL; +} + struct mlxsw_core { struct mlxsw_driver *driver; const struct mlxsw_bus *bus; @@ -111,8 +130,10 @@ struct mlxsw_core { struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; - struct mlxsw_resources resources; + struct mlxsw_res res; struct mlxsw_hwmon *hwmon; + struct mlxsw_thermal *thermal; + struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS]; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -552,33 +573,18 @@ free_skb: dev_kfree_skb(skb); } -static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { - .func = mlxsw_emad_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_ETHEMAD, -}; - -static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) -{ - char htgt_pl[MLXSW_REG_HTGT_LEN]; - char hpkt_pl[MLXSW_REG_HPKT_LEN]; - int err; - - mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); - err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); - if (err) - return err; - - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - MLXSW_TRAP_ID_ETHEMAD); - return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); -} +static const struct mlxsw_listener mlxsw_emad_rx_listener = + MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false, + EMAD, DISCARD); static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) { u64 tid; int err; + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return 0; + /* Set the upper 32 bits of the transaction ID field to a random * number. This allows us to discard EMADs addressed to other * devices. @@ -590,39 +596,33 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); spin_lock_init(&mlxsw_core->emad.trans_list_lock); - err = mlxsw_core_rx_listener_register(mlxsw_core, - &mlxsw_emad_rx_listener, - mlxsw_core); + err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, + mlxsw_core); if (err) return err; - err = mlxsw_emad_traps_set(mlxsw_core); + err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) goto err_emad_trap_set; - mlxsw_core->emad.use_emad = true; return 0; err_emad_trap_set: - mlxsw_core_rx_listener_unregister(mlxsw_core, - &mlxsw_emad_rx_listener, - mlxsw_core); + mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, + mlxsw_core); return err; } static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) { - char hpkt_pl[MLXSW_REG_HPKT_LEN]; - mlxsw_core->emad.use_emad = false; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, - MLXSW_TRAP_ID_ETHEMAD); - mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) + return; - mlxsw_core_rx_listener_unregister(mlxsw_core, - &mlxsw_emad_rx_listener, - mlxsw_core); + mlxsw_core->emad.use_emad = false; + mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, + mlxsw_core); } static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, @@ -822,17 +822,6 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) spin_lock(&mlxsw_core_driver_list_lock); mlxsw_driver = __driver_find(kind); - if (!mlxsw_driver) { - spin_unlock(&mlxsw_core_driver_list_lock); - request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); - spin_lock(&mlxsw_core_driver_list_lock); - mlxsw_driver = __driver_find(kind); - } - if (mlxsw_driver) { - if (!try_module_get(mlxsw_driver->owner)) - mlxsw_driver = NULL; - } - spin_unlock(&mlxsw_core_driver_list_lock); return mlxsw_driver; } @@ -844,9 +833,6 @@ static void mlxsw_core_driver_put(const char *kind) spin_lock(&mlxsw_core_driver_list_lock); mlxsw_driver = __driver_find(kind); spin_unlock(&mlxsw_core_driver_list_lock); - if (!mlxsw_driver) - return; - module_put(mlxsw_driver->owner); } static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) @@ -933,6 +919,21 @@ static void *__dl_port(struct devlink_port *devlink_port) return container_of(devlink_port, struct mlxsw_core_port, devlink_port); } +static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port, + enum devlink_port_type port_type) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->port_type_set) + return -EOPNOTSUPP; + + return mlxsw_driver->port_type_set(mlxsw_core, + mlxsw_core_port->local_port, + port_type); +} + static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, unsigned int sb_index, u16 pool_index, u32 *p_threshold) @@ -941,7 +942,8 @@ static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_port_pool_get) + if (!mlxsw_driver->sb_port_pool_get || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, pool_index, p_threshold); @@ -955,7 +957,8 @@ static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_port_pool_set) + if (!mlxsw_driver->sb_port_pool_set || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, pool_index, threshold); @@ -971,7 +974,8 @@ mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_tc_pool_bind_get) + if (!mlxsw_driver->sb_tc_pool_bind_get || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, tc_index, pool_type, @@ -988,7 +992,8 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_tc_pool_bind_set) + if (!mlxsw_driver->sb_tc_pool_bind_set || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, tc_index, pool_type, @@ -1026,7 +1031,8 @@ mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_occ_port_pool_get) + if (!mlxsw_driver->sb_occ_port_pool_get || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, pool_index, p_cur, p_max); @@ -1042,7 +1048,8 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); - if (!mlxsw_driver->sb_occ_tc_port_bind_get) + if (!mlxsw_driver->sb_occ_tc_port_bind_get || + !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, sb_index, tc_index, @@ -1050,6 +1057,7 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, } static const struct devlink_ops mlxsw_devlink_ops = { + .port_type_set = mlxsw_devlink_port_type_set, .port_split = mlxsw_devlink_port_split, .port_unsplit = mlxsw_devlink_port_unsplit, .sb_pool_get = mlxsw_devlink_sb_pool_get, @@ -1101,14 +1109,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, } err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, - &mlxsw_core->resources); + &mlxsw_core->res); if (err) goto err_bus_init; - if (mlxsw_core->resources.max_lag_valid && - mlxsw_core->resources.max_ports_in_lag_valid) { - alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag * - mlxsw_core->resources.max_ports_in_lag; + if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && + MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { + alloc_size = sizeof(u8) * + MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * + MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); if (!mlxsw_core->lag.mapping) { err = -ENOMEM; @@ -1128,9 +1137,16 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_hwmon_init; - err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); + err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info, + &mlxsw_core->thermal); if (err) - goto err_driver_init; + goto err_thermal_init; + + if (mlxsw_driver->init) { + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); + if (err) + goto err_driver_init; + } err = mlxsw_core_debugfs_init(mlxsw_core); if (err) @@ -1139,8 +1155,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, return 0; err_debugfs_init: - mlxsw_core->driver->fini(mlxsw_core); + if (mlxsw_core->driver->fini) + mlxsw_core->driver->fini(mlxsw_core); err_driver_init: + mlxsw_thermal_fini(mlxsw_core->thermal); +err_thermal_init: err_hwmon_init: devlink_unregister(devlink); err_devlink_register: @@ -1165,11 +1184,13 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) struct devlink *devlink = priv_to_devlink(mlxsw_core); mlxsw_core_debugfs_fini(mlxsw_core); - mlxsw_core->driver->fini(mlxsw_core); + if (mlxsw_core->driver->fini) + mlxsw_core->driver->fini(mlxsw_core); + mlxsw_thermal_fini(mlxsw_core->thermal); devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); - mlxsw_core->bus->fini(mlxsw_core->bus_priv); kfree(mlxsw_core->lag.mapping); + mlxsw_core->bus->fini(mlxsw_core->bus_priv); free_percpu(mlxsw_core->pcpu_stats); devlink_free(devlink); mlxsw_core_driver_put(device_kind); @@ -1346,6 +1367,75 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); +static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + void *priv) +{ + if (listener->is_event) + return mlxsw_core_event_listener_register(mlxsw_core, + &listener->u.event_listener, + priv); + else + return mlxsw_core_rx_listener_register(mlxsw_core, + &listener->u.rx_listener, + priv); +} + +static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + void *priv) +{ + if (listener->is_event) + mlxsw_core_event_listener_unregister(mlxsw_core, + &listener->u.event_listener, + priv); + else + mlxsw_core_rx_listener_unregister(mlxsw_core, + &listener->u.rx_listener, + priv); +} + +int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, void *priv) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + err = mlxsw_core_listener_register(mlxsw_core, listener, priv); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id, + listener->trap_group, listener->is_ctrl); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_trap_set; + + return 0; + +err_trap_set: + mlxsw_core_listener_unregister(mlxsw_core, listener, priv); + return err; +} +EXPORT_SYMBOL(mlxsw_core_trap_register); + +void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + void *priv) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + + if (!listener->is_event) { + mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action, + listener->trap_id, listener->trap_group, + listener->is_ctrl); + mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); + } + + mlxsw_core_listener_unregister(mlxsw_core, listener, priv); +} +EXPORT_SYMBOL(mlxsw_core_trap_unregister); + static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) { return atomic64_inc_return(&mlxsw_core->emad.tid); @@ -1615,7 +1705,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive); static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, u16 lag_id, u8 port_index) { - return mlxsw_core->resources.max_ports_in_lag * lag_id + + return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id + port_index; } @@ -1644,7 +1734,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, { int i; - for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) { int index = mlxsw_core_lag_mapping_index(mlxsw_core, lag_id, i); @@ -1654,34 +1744,97 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); -struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core) +bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, + enum mlxsw_res_id res_id) { - return &mlxsw_core->resources; + return mlxsw_res_valid(&mlxsw_core->res, res_id); } -EXPORT_SYMBOL(mlxsw_core_resources_get); +EXPORT_SYMBOL(mlxsw_core_res_valid); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, - struct mlxsw_core_port *mlxsw_core_port, u8 local_port, - struct net_device *dev, bool split, u32 split_group) +u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, + enum mlxsw_res_id res_id) +{ + return mlxsw_res_get(&mlxsw_core->res, res_id); +} +EXPORT_SYMBOL(mlxsw_core_res_get); + +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) { struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + int err; - if (split) - devlink_port_split_set(devlink_port, split_group); - devlink_port_type_eth_set(devlink_port, dev); - return devlink_port_register(devlink, devlink_port, local_port); + mlxsw_core_port->local_port = local_port; + err = devlink_port_register(devlink, devlink_port, local_port); + if (err) + memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); + return err; } EXPORT_SYMBOL(mlxsw_core_port_init); -void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port) +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) { + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; devlink_port_unregister(devlink_port); + memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); } EXPORT_SYMBOL(mlxsw_core_port_fini); +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv, struct net_device *dev, + bool split, u32 split_group) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + mlxsw_core_port->port_driver_priv = port_driver_priv; + if (split) + devlink_port_split_set(devlink_port, split_group); + devlink_port_type_eth_set(devlink_port, dev); +} +EXPORT_SYMBOL(mlxsw_core_port_eth_set); + +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + mlxsw_core_port->port_driver_priv = port_driver_priv; + devlink_port_type_ib_set(devlink_port, NULL); +} +EXPORT_SYMBOL(mlxsw_core_port_ib_set); + +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + mlxsw_core_port->port_driver_priv = port_driver_priv; + devlink_port_type_clear(devlink_port); +} +EXPORT_SYMBOL(mlxsw_core_port_clear); + +enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, + u8 local_port) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + return devlink_port->type; +} +EXPORT_SYMBOL(mlxsw_core_port_type_get); + static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { @@ -1748,6 +1901,18 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) } EXPORT_SYMBOL(mlxsw_core_schedule_dw); +int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay) +{ + return queue_delayed_work(mlxsw_owq, dwork, delay); +} +EXPORT_SYMBOL(mlxsw_core_schedule_odw); + +void mlxsw_core_flush_owq(void) +{ + flush_workqueue(mlxsw_owq); +} +EXPORT_SYMBOL(mlxsw_core_flush_owq); + static int __init mlxsw_core_module_init(void) { int err; @@ -1755,6 +1920,12 @@ static int __init mlxsw_core_module_init(void) mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); if (!mlxsw_wq) return -ENOMEM; + mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, + mlxsw_core_driver_name); + if (!mlxsw_owq) { + err = -ENOMEM; + goto err_alloc_ordered_workqueue; + } mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); if (!mlxsw_core_dbg_root) { err = -ENOMEM; @@ -1763,6 +1934,8 @@ static int __init mlxsw_core_module_init(void) return 0; err_debugfs_create_dir: + destroy_workqueue(mlxsw_owq); +err_alloc_ordered_workqueue: destroy_workqueue(mlxsw_wq); return err; } @@ -1770,6 +1943,7 @@ err_debugfs_create_dir: static void __exit mlxsw_core_module_exit(void) { debugfs_remove_recursive(mlxsw_core_dbg_root); + destroy_workqueue(mlxsw_owq); destroy_workqueue(mlxsw_wq); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c4f550b6f783..a7f94fbc898b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -48,17 +48,11 @@ #include "trap.h" #include "reg.h" - #include "cmd.h" - -#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-" -#define MODULE_MLXSW_DRIVER_ALIAS(kind) \ - MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) - -#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" -#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum" +#include "resources.h" struct mlxsw_core; +struct mlxsw_core_port; struct mlxsw_driver; struct mlxsw_bus; struct mlxsw_bus_info; @@ -96,6 +90,50 @@ struct mlxsw_event_listener { enum mlxsw_event_trap_id trap_id; }; +struct mlxsw_listener { + u16 trap_id; + union { + struct mlxsw_rx_listener rx_listener; + struct mlxsw_event_listener event_listener; + } u; + enum mlxsw_reg_hpkt_action action; + enum mlxsw_reg_hpkt_action unreg_action; + u8 trap_group; + bool is_ctrl; /* should go via control buffer or not */ + bool is_event; +}; + +#define MLXSW_RXL(_func, _trap_id, _action, _is_ctrl, _trap_group, \ + _unreg_action) \ + { \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + .u.rx_listener = \ + { \ + .func = _func, \ + .local_port = MLXSW_PORT_DONT_CARE, \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + }, \ + .action = MLXSW_REG_HPKT_ACTION_##_action, \ + .unreg_action = MLXSW_REG_HPKT_ACTION_##_unreg_action, \ + .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \ + .is_ctrl = _is_ctrl, \ + .is_event = false, \ + } + +#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \ + { \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + .u.event_listener = \ + { \ + .func = _func, \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + }, \ + .action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \ + .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \ + .is_ctrl = false, \ + .is_event = true, \ + } + int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, const struct mlxsw_rx_listener *rxl, void *priv); @@ -110,6 +148,13 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, const struct mlxsw_event_listener *el, void *priv); +int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + void *priv); +void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_listener *listener, + void *priv); + typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload, size_t payload_len, unsigned long cb_priv); @@ -148,25 +193,22 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, u16 lag_id, u8 local_port); -struct mlxsw_core_port { - struct devlink_port devlink_port; -}; - -static inline void * -mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) -{ - /* mlxsw_core_port is ensured to always be the first field in driver - * port structure. - */ - return mlxsw_core_port; -} - -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, - struct mlxsw_core_port *mlxsw_core_port, u8 local_port, - struct net_device *dev, bool split, u32 split_group); -void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); +void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port); +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv, struct net_device *dev, + bool split, u32 split_group); +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv); +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, + void *port_driver_priv); +enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, + u8 local_port); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); +int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay); +void mlxsw_core_flush_owq(void); #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 @@ -221,11 +263,13 @@ struct mlxsw_config_profile { struct mlxsw_driver { struct list_head list; const char *kind; - struct module *owner; size_t priv_size; int (*init)(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info); void (*fini)(struct mlxsw_core *mlxsw_core); + int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core); + int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, + enum devlink_port_type new_type); int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, unsigned int count); int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); @@ -266,45 +310,25 @@ struct mlxsw_driver { const struct mlxsw_config_profile *profile; }; -struct mlxsw_resources { - u32 max_span_valid:1, - max_lag_valid:1, - max_ports_in_lag_valid:1, - kvd_size_valid:1, - kvd_single_min_size_valid:1, - kvd_double_min_size_valid:1, - max_virtual_routers_valid:1, - max_system_ports_valid:1, - max_vlan_groups_valid:1, - max_regions_valid:1, - max_rif_valid:1; - u8 max_span; - u8 max_lag; - u8 max_ports_in_lag; - u32 kvd_size; - u32 kvd_single_min_size; - u32 kvd_double_min_size; - u16 max_virtual_routers; - u16 max_system_ports; - u16 max_vlan_groups; - u16 max_regions; - u16 max_rif; +bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, + enum mlxsw_res_id res_id); - /* Internal resources. - * Determined by the SW, not queried from the HW. - */ - u32 kvd_single_size; - u32 kvd_double_size; - u32 kvd_linear_size; -}; +#define MLXSW_CORE_RES_VALID(res, short_res_id) \ + mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id) + +u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, + enum mlxsw_res_id res_id); -struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core); +#define MLXSW_CORE_RES_GET(res, short_res_id) \ + mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id) + +#define MLXSW_BUS_F_TXRX BIT(0) struct mlxsw_bus { const char *kind; int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources); + struct mlxsw_res *res); void (*fini)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); @@ -315,6 +339,7 @@ struct mlxsw_bus { char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size, u8 *p_status); + u8 features; }; struct mlxsw_bus_info { @@ -350,4 +375,28 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, #endif +struct mlxsw_thermal; + +#ifdef CONFIG_MLXSW_CORE_THERMAL + +int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_thermal **p_thermal); +void mlxsw_thermal_fini(struct mlxsw_thermal *thermal); + +#else + +static inline int mlxsw_thermal_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_thermal **p_thermal) +{ + return 0; +} + +static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) +{ +} + +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 1ac8bf187168..ab710e37af99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -262,7 +262,7 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) { - char mtcap_pl[MLXSW_REG_MTCAP_LEN]; + char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0}; char mtmp_pl[MLXSW_REG_MTMP_LEN]; u8 sensor_count; int i; @@ -295,7 +295,7 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) { - char mfcr_pl[MLXSW_REG_MFCR_LEN]; + char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0}; enum mlxsw_reg_mfcr_pwm_frequency freq; unsigned int type_index; unsigned int num; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c new file mode 100644 index 000000000000..d866c98c1a97 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -0,0 +1,442 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_thermal.c + * Copyright (c) 2016 Ivan Vecera <cera@cera.cz> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/thermal.h> +#include <linux/err.h> + +#include "core.h" + +#define MLXSW_THERMAL_POLL_INT 1000 /* ms */ +#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */ +#define MLXSW_THERMAL_MAX_STATE 10 +#define MLXSW_THERMAL_MAX_DUTY 255 + +struct mlxsw_thermal_trip { + int type; + int temp; + int min_state; + int max_state; +}; + +static const struct mlxsw_thermal_trip default_thermal_trips[] = { + { /* In range - 0-40% PWM */ + .type = THERMAL_TRIP_ACTIVE, + .temp = 75000, + .min_state = 0, + .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, + }, + { /* High - 40-100% PWM */ + .type = THERMAL_TRIP_ACTIVE, + .temp = 80000, + .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, + .max_state = MLXSW_THERMAL_MAX_STATE, + }, + { + /* Very high - 100% PWM */ + .type = THERMAL_TRIP_ACTIVE, + .temp = 85000, + .min_state = MLXSW_THERMAL_MAX_STATE, + .max_state = MLXSW_THERMAL_MAX_STATE, + }, + { /* Warning */ + .type = THERMAL_TRIP_HOT, + .temp = 105000, + .min_state = MLXSW_THERMAL_MAX_STATE, + .max_state = MLXSW_THERMAL_MAX_STATE, + }, + { /* Critical - soft poweroff */ + .type = THERMAL_TRIP_CRITICAL, + .temp = MLXSW_THERMAL_MAX_TEMP, + .min_state = MLXSW_THERMAL_MAX_STATE, + .max_state = MLXSW_THERMAL_MAX_STATE, + } +}; + +#define MLXSW_THERMAL_NUM_TRIPS ARRAY_SIZE(default_thermal_trips) + +/* Make sure all trips are writable */ +#define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1) + +struct mlxsw_thermal { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct thermal_zone_device *tzdev; + struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; + struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; + enum thermal_device_mode mode; +}; + +static inline u8 mlxsw_state_to_duty(int state) +{ + return DIV_ROUND_CLOSEST(state * MLXSW_THERMAL_MAX_DUTY, + MLXSW_THERMAL_MAX_STATE); +} + +static inline int mlxsw_duty_to_state(u8 duty) +{ + return DIV_ROUND_CLOSEST(duty * MLXSW_THERMAL_MAX_STATE, + MLXSW_THERMAL_MAX_DUTY); +} + +static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, + struct thermal_cooling_device *cdev) +{ + int i; + + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) + if (thermal->cdevs[i] == cdev) + return i; + + return -ENODEV; +} + +static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + struct device *dev = thermal->bus_info->dev; + int i, err; + + /* If the cooling device is one of ours bind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + const struct mlxsw_thermal_trip *trip = &thermal->trips[i]; + + err = thermal_zone_bind_cooling_device(tzdev, i, cdev, + trip->max_state, + trip->min_state, + THERMAL_WEIGHT_DEFAULT); + if (err < 0) { + dev_err(dev, "Failed to bind cooling device to trip %d\n", i); + return err; + } + } + return 0; +} + +static int mlxsw_thermal_unbind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + struct device *dev = thermal->bus_info->dev; + int i; + int err; + + /* If the cooling device is our one unbind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + err = thermal_zone_unbind_cooling_device(tzdev, i, cdev); + if (err < 0) { + dev_err(dev, "Failed to unbind cooling device\n"); + return err; + } + } + return 0; +} + +static int mlxsw_thermal_get_mode(struct thermal_zone_device *tzdev, + enum thermal_device_mode *mode) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + *mode = thermal->mode; + + return 0; +} + +static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev, + enum thermal_device_mode mode) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + mutex_lock(&tzdev->lock); + + if (mode == THERMAL_DEVICE_ENABLED) + tzdev->polling_delay = MLXSW_THERMAL_POLL_INT; + else + tzdev->polling_delay = 0; + + mutex_unlock(&tzdev->lock); + + thermal->mode = mode; + thermal_zone_device_update(tzdev, THERMAL_EVENT_UNSPECIFIED); + + return 0; +} + +static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, + int *p_temp) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + struct device *dev = thermal->bus_info->dev; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false); + + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + + *p_temp = (int) temp; + return 0; +} + +static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev, + int trip, + enum thermal_trip_type *p_type) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_type = thermal->trips[trip].type; + return 0; +} + +static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev, + int trip, int *p_temp) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_temp = thermal->trips[trip].temp; + return 0; +} + +static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev, + int trip, int temp) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || + temp > MLXSW_THERMAL_MAX_TEMP) + return -EINVAL; + + thermal->trips[trip].temp = temp; + return 0; +} + +static struct thermal_zone_device_ops mlxsw_thermal_ops = { + .bind = mlxsw_thermal_bind, + .unbind = mlxsw_thermal_unbind, + .get_mode = mlxsw_thermal_get_mode, + .set_mode = mlxsw_thermal_set_mode, + .get_temp = mlxsw_thermal_get_temp, + .get_trip_type = mlxsw_thermal_get_trip_type, + .get_trip_temp = mlxsw_thermal_get_trip_temp, + .set_trip_temp = mlxsw_thermal_set_trip_temp, +}; + +static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *p_state) +{ + *p_state = MLXSW_THERMAL_MAX_STATE; + return 0; +} + +static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *p_state) + +{ + struct mlxsw_thermal *thermal = cdev->devdata; + struct device *dev = thermal->bus_info->dev; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + int err, idx; + u8 duty; + + idx = mlxsw_get_cooling_device_idx(thermal, cdev); + if (idx < 0) + return idx; + + mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0); + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(dev, "Failed to query PWM duty\n"); + return err; + } + + duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl); + *p_state = mlxsw_duty_to_state(duty); + return 0; +} + +static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + +{ + struct mlxsw_thermal *thermal = cdev->devdata; + struct device *dev = thermal->bus_info->dev; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + int err, idx; + + idx = mlxsw_get_cooling_device_idx(thermal, cdev); + if (idx < 0) + return idx; + + mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state)); + err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(dev, "Failed to write PWM duty\n"); + return err; + } + return 0; +} + +static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { + .get_max_state = mlxsw_thermal_get_max_state, + .get_cur_state = mlxsw_thermal_get_cur_state, + .set_cur_state = mlxsw_thermal_set_cur_state, +}; + +int mlxsw_thermal_init(struct mlxsw_core *core, + const struct mlxsw_bus_info *bus_info, + struct mlxsw_thermal **p_thermal) +{ + char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 }; + enum mlxsw_reg_mfcr_pwm_frequency freq; + struct device *dev = bus_info->dev; + struct mlxsw_thermal *thermal; + u16 tacho_active; + u8 pwm_active; + int err, i; + + thermal = devm_kzalloc(dev, sizeof(*thermal), + GFP_KERNEL); + if (!thermal) + return -ENOMEM; + + thermal->core = core; + thermal->bus_info = bus_info; + memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips)); + + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl); + if (err) { + dev_err(dev, "Failed to probe PWMs\n"); + goto err_free_thermal; + } + mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); + + for (i = 0; i < MLXSW_MFCR_TACHOS_MAX; i++) { + if (tacho_active & BIT(i)) { + char mfsl_pl[MLXSW_REG_MFSL_LEN]; + + mlxsw_reg_mfsl_pack(mfsl_pl, i, 0, 0); + + /* We need to query the register to preserve maximum */ + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl), + mfsl_pl); + if (err) + goto err_free_thermal; + + /* set the minimal RPMs to 0 */ + mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0); + err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl), + mfsl_pl); + if (err) + goto err_free_thermal; + } + } + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { + if (pwm_active & BIT(i)) { + struct thermal_cooling_device *cdev; + + cdev = thermal_cooling_device_register("Fan", thermal, + &mlxsw_cooling_ops); + if (IS_ERR(cdev)) { + err = PTR_ERR(cdev); + dev_err(dev, "Failed to register cooling device\n"); + goto err_unreg_cdevs; + } + thermal->cdevs[i] = cdev; + } + } + + thermal->tzdev = thermal_zone_device_register("mlxsw", + MLXSW_THERMAL_NUM_TRIPS, + MLXSW_THERMAL_TRIP_MASK, + thermal, + &mlxsw_thermal_ops, + NULL, 0, + MLXSW_THERMAL_POLL_INT); + if (IS_ERR(thermal->tzdev)) { + err = PTR_ERR(thermal->tzdev); + dev_err(dev, "Failed to register thermal zone\n"); + goto err_unreg_cdevs; + } + + thermal->mode = THERMAL_DEVICE_ENABLED; + *p_thermal = thermal; + return 0; +err_unreg_cdevs: + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) + if (thermal->cdevs[i]) + thermal_cooling_device_unregister(thermal->cdevs[i]); +err_free_thermal: + devm_kfree(dev, thermal); + return err; +} + +void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) +{ + int i; + + if (thermal->tzdev) { + thermal_zone_device_unregister(thermal->tzdev); + thermal->tzdev = NULL; + } + + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { + if (thermal->cdevs[i]) { + thermal_cooling_device_unregister(thermal->cdevs[i]); + thermal->cdevs[i] = NULL; + } + } + + devm_kfree(thermal->bus_info->dev, thermal); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c new file mode 100644 index 000000000000..e50c8db2602a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -0,0 +1,582 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/i2c.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> + +#include "cmd.h" +#include "core.h" +#include "i2c.h" + +static const char mlxsw_i2c_driver_name[] = "mlxsw_i2c"; + +#define MLXSW_I2C_CIR2_BASE 0x72000 +#define MLXSW_I2C_CIR_STATUS_OFF 0x18 +#define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \ + MLXSW_I2C_CIR_STATUS_OFF) +#define MLXSW_I2C_OPMOD_SHIFT 12 +#define MLXSW_I2C_GO_BIT_SHIFT 23 +#define MLXSW_I2C_CIR_CTRL_STATUS_SHIFT 24 +#define MLXSW_I2C_GO_BIT BIT(MLXSW_I2C_GO_BIT_SHIFT) +#define MLXSW_I2C_GO_OPMODE BIT(MLXSW_I2C_OPMOD_SHIFT) +#define MLXSW_I2C_SET_IMM_CMD (MLXSW_I2C_GO_OPMODE | \ + MLXSW_CMD_OPCODE_QUERY_FW) +#define MLXSW_I2C_PUSH_IMM_CMD (MLXSW_I2C_GO_BIT | \ + MLXSW_I2C_SET_IMM_CMD) +#define MLXSW_I2C_SET_CMD (MLXSW_CMD_OPCODE_ACCESS_REG) +#define MLXSW_I2C_PUSH_CMD (MLXSW_I2C_GO_BIT | MLXSW_I2C_SET_CMD) +#define MLXSW_I2C_TLV_HDR_SIZE 0x10 +#define MLXSW_I2C_ADDR_WIDTH 4 +#define MLXSW_I2C_PUSH_CMD_SIZE (MLXSW_I2C_ADDR_WIDTH + 4) +#define MLXSW_I2C_READ_SEMA_SIZE 4 +#define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28) +#define MLXSW_I2C_MBOX_SIZE 20 +#define MLXSW_I2C_MBOX_OUT_PARAM_OFF 12 +#define MLXSW_I2C_MAX_BUFF_SIZE 32 +#define MLXSW_I2C_MBOX_OFFSET_BITS 20 +#define MLXSW_I2C_MBOX_SIZE_BITS 12 +#define MLXSW_I2C_ADDR_BUF_SIZE 4 +#define MLXSW_I2C_BLK_MAX 32 +#define MLXSW_I2C_RETRY 5 +#define MLXSW_I2C_TIMEOUT_MSECS 5000 + +/** + * struct mlxsw_i2c - device private data: + * @cmd.mb_size_in: input mailbox size; + * @cmd.mb_off_in: input mailbox offset in register space; + * @cmd.mb_size_out: output mailbox size; + * @cmd.mb_off_out: output mailbox offset in register space; + * @cmd.lock: command execution lock; + * @dev: I2C device; + * @core: switch core pointer; + * @bus_info: bus info block; + */ +struct mlxsw_i2c { + struct { + u32 mb_size_in; + u32 mb_off_in; + u32 mb_size_out; + u32 mb_off_out; + struct mutex lock; + } cmd; + struct device *dev; + struct mlxsw_core *core; + struct mlxsw_bus_info bus_info; +}; + +#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \ + { .addr = (_client)->addr, \ + .buf = (_addr_buf), \ + .len = MLXSW_I2C_ADDR_BUF_SIZE, \ + .flags = 0 }, \ + { .addr = (_client)->addr, \ + .buf = (_buf), \ + .len = (_len), \ + .flags = I2C_M_RD } } + +#define MLXSW_I2C_WRITE_MSG(_client, _buf, _len) \ + { .addr = (_client)->addr, \ + .buf = (u8 *)(_buf), \ + .len = (_len), \ + .flags = 0 } + +/* Routine converts in and out mail boxes offset and size. */ +static inline void +mlxsw_i2c_convert_mbox(struct mlxsw_i2c *mlxsw_i2c, u8 *buf) +{ + u32 tmp; + + /* Local in/out mailboxes: 20 bits for offset, 12 for size */ + tmp = be32_to_cpup((__be32 *) buf); + mlxsw_i2c->cmd.mb_off_in = tmp & + GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0); + mlxsw_i2c->cmd.mb_size_in = (tmp & GENMASK(31, + MLXSW_I2C_MBOX_OFFSET_BITS)) >> + MLXSW_I2C_MBOX_OFFSET_BITS; + + tmp = be32_to_cpup((__be32 *) (buf + MLXSW_I2C_ADDR_WIDTH)); + mlxsw_i2c->cmd.mb_off_out = tmp & + GENMASK(MLXSW_I2C_MBOX_OFFSET_BITS - 1, 0); + mlxsw_i2c->cmd.mb_size_out = (tmp & GENMASK(31, + MLXSW_I2C_MBOX_OFFSET_BITS)) >> + MLXSW_I2C_MBOX_OFFSET_BITS; +} + +/* Routine obtains register size from mail box buffer. */ +static inline int mlxsw_i2c_get_reg_size(u8 *in_mbox) +{ + u16 tmp = be16_to_cpup((__be16 *) (in_mbox + MLXSW_I2C_TLV_HDR_SIZE)); + + return (tmp & 0x7ff) * 4 + MLXSW_I2C_TLV_HDR_SIZE; +} + +/* Routine sets I2C device internal offset in the transaction buffer. */ +static inline void mlxsw_i2c_set_slave_addr(u8 *buf, u32 off) +{ + __be32 *val = (__be32 *) buf; + + *val = htonl(off); +} + +/* Routine waits until go bit is cleared. */ +static int mlxsw_i2c_wait_go_bit(struct i2c_client *client, + struct mlxsw_i2c *mlxsw_i2c, u8 *p_status) +{ + u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE]; + u8 buf[MLXSW_I2C_READ_SEMA_SIZE]; + int len = MLXSW_I2C_READ_SEMA_SIZE; + struct i2c_msg read_sema[] = + MLXSW_I2C_READ_MSG(client, addr_buf, buf, len); + bool wait_done = false; + unsigned long end; + int i = 0, err; + + mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_OFF_STATUS); + + end = jiffies + msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS); + do { + u32 ctrl; + + err = i2c_transfer(client->adapter, read_sema, + ARRAY_SIZE(read_sema)); + + ctrl = be32_to_cpu(*(__be32 *) buf); + if (err == ARRAY_SIZE(read_sema)) { + if (!(ctrl & MLXSW_I2C_GO_BIT)) { + wait_done = true; + *p_status = ctrl >> + MLXSW_I2C_CIR_CTRL_STATUS_SHIFT; + break; + } + } + cond_resched(); + } while ((time_before(jiffies, end)) || (i++ < MLXSW_I2C_RETRY)); + + if (wait_done) { + if (*p_status) + err = -EIO; + } else { + return -ETIMEDOUT; + } + + return err > 0 ? 0 : err; +} + +/* Routine posts a command to ASIC though mail box. */ +static int mlxsw_i2c_write_cmd(struct i2c_client *client, + struct mlxsw_i2c *mlxsw_i2c, + int immediate) +{ + __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = { + 0, cpu_to_be32(MLXSW_I2C_PUSH_IMM_CMD) + }; + __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = { + 0, 0, 0, 0, 0, 0, + cpu_to_be32(client->adapter->nr & 0xffff), + cpu_to_be32(MLXSW_I2C_SET_IMM_CMD) + }; + struct i2c_msg push_cmd = + MLXSW_I2C_WRITE_MSG(client, push_cmd_buf, + MLXSW_I2C_PUSH_CMD_SIZE); + struct i2c_msg prep_cmd = + MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE); + int err; + + if (!immediate) { + push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_CMD); + prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_SET_CMD); + } + mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf, + MLXSW_I2C_CIR2_BASE); + mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf, + MLXSW_I2C_CIR2_OFF_STATUS); + + /* Prepare Command Interface Register for transaction */ + err = i2c_transfer(client->adapter, &prep_cmd, 1); + if (err < 0) + return err; + else if (err != 1) + return -EIO; + + /* Write out Command Interface Register GO bit to push transaction */ + err = i2c_transfer(client->adapter, &push_cmd, 1); + if (err < 0) + return err; + else if (err != 1) + return -EIO; + + return 0; +} + +/* Routine obtains mail box offsets from ASIC register space. */ +static int mlxsw_i2c_get_mbox(struct i2c_client *client, + struct mlxsw_i2c *mlxsw_i2c) +{ + u8 addr_buf[MLXSW_I2C_ADDR_BUF_SIZE]; + u8 buf[MLXSW_I2C_MBOX_SIZE]; + struct i2c_msg mbox_cmd[] = + MLXSW_I2C_READ_MSG(client, addr_buf, buf, MLXSW_I2C_MBOX_SIZE); + int err; + + /* Read mail boxes offsets. */ + mlxsw_i2c_set_slave_addr(addr_buf, MLXSW_I2C_CIR2_BASE); + err = i2c_transfer(client->adapter, mbox_cmd, 2); + if (err != 2) { + dev_err(&client->dev, "Could not obtain mail boxes\n"); + if (!err) + return -EIO; + else + return err; + } + + /* Convert mail boxes. */ + mlxsw_i2c_convert_mbox(mlxsw_i2c, &buf[MLXSW_I2C_MBOX_OUT_PARAM_OFF]); + + return err; +} + +/* Routine sends I2C write transaction to ASIC device. */ +static int +mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, + u8 *p_status) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client); + unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS); + u8 tran_buf[MLXSW_I2C_MAX_BUFF_SIZE + MLXSW_I2C_ADDR_BUF_SIZE]; + int off = mlxsw_i2c->cmd.mb_off_in, chunk_size, i, j; + unsigned long end; + struct i2c_msg write_tran = + MLXSW_I2C_WRITE_MSG(client, tran_buf, MLXSW_I2C_PUSH_CMD_SIZE); + int err; + + for (i = 0; i < num; i++) { + chunk_size = (in_mbox_size > MLXSW_I2C_BLK_MAX) ? + MLXSW_I2C_BLK_MAX : in_mbox_size; + write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size; + mlxsw_i2c_set_slave_addr(tran_buf, off); + memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox + + chunk_size * i, chunk_size); + + j = 0; + end = jiffies + timeout; + do { + err = i2c_transfer(client->adapter, &write_tran, 1); + if (err == 1) + break; + + cond_resched(); + } while ((time_before(jiffies, end)) || + (j++ < MLXSW_I2C_RETRY)); + + if (err != 1) { + if (!err) + err = -EIO; + return err; + } + + off += chunk_size; + in_mbox_size -= chunk_size; + } + + /* Prepare and write out Command Interface Register for transaction. */ + err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 0); + if (err) { + dev_err(&client->dev, "Could not start transaction"); + return -EIO; + } + + /* Wait until go bit is cleared. */ + err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, p_status); + if (err) { + dev_err(&client->dev, "HW semaphore is not released"); + return err; + } + + /* Validate transaction completion status. */ + if (*p_status) { + dev_err(&client->dev, "Bad transaction completion status %x\n", + *p_status); + return -EIO; + } + + return err > 0 ? 0 : err; +} + +/* Routine executes I2C command. */ +static int +mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox, + size_t out_mbox_size, u8 *out_mbox, u8 *status) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client); + unsigned long timeout = msecs_to_jiffies(MLXSW_I2C_TIMEOUT_MSECS); + u8 tran_buf[MLXSW_I2C_ADDR_BUF_SIZE]; + int num, chunk_size, reg_size, i, j; + int off = mlxsw_i2c->cmd.mb_off_out; + unsigned long end; + struct i2c_msg read_tran[] = + MLXSW_I2C_READ_MSG(client, tran_buf, NULL, 0); + int err; + + WARN_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); + + reg_size = mlxsw_i2c_get_reg_size(in_mbox); + num = reg_size / MLXSW_I2C_BLK_MAX; + if (reg_size % MLXSW_I2C_BLK_MAX) + num++; + + if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { + dev_err(&client->dev, "Could not acquire lock"); + return -EINVAL; + } + + err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status); + if (err) + goto cmd_fail; + + /* No out mailbox is case of write transaction. */ + if (!out_mbox) { + mutex_unlock(&mlxsw_i2c->cmd.lock); + return 0; + } + + /* Send read transaction to get output mailbox content. */ + read_tran[1].buf = out_mbox; + for (i = 0; i < num; i++) { + chunk_size = (reg_size > MLXSW_I2C_BLK_MAX) ? + MLXSW_I2C_BLK_MAX : reg_size; + read_tran[1].len = chunk_size; + mlxsw_i2c_set_slave_addr(tran_buf, off); + + j = 0; + end = jiffies + timeout; + do { + err = i2c_transfer(client->adapter, read_tran, + ARRAY_SIZE(read_tran)); + if (err == ARRAY_SIZE(read_tran)) + break; + + cond_resched(); + } while ((time_before(jiffies, end)) || + (j++ < MLXSW_I2C_RETRY)); + + if (err != ARRAY_SIZE(read_tran)) { + if (!err) + err = -EIO; + + goto cmd_fail; + } + + off += chunk_size; + reg_size -= chunk_size; + read_tran[1].buf += chunk_size; + } + + mutex_unlock(&mlxsw_i2c->cmd.lock); + + return 0; + +cmd_fail: + mutex_unlock(&mlxsw_i2c->cmd.lock); + return err; +} + +static int mlxsw_i2c_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size, + u8 *status) +{ + struct mlxsw_i2c *mlxsw_i2c = bus_priv; + + return mlxsw_i2c_cmd(mlxsw_i2c->dev, in_mbox_size, in_mbox, + out_mbox_size, out_mbox, status); +} + +static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv, + const struct mlxsw_tx_info *tx_info) +{ + return false; +} + +static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + return 0; +} + +static int +mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile, + struct mlxsw_res *resources) +{ + struct mlxsw_i2c *mlxsw_i2c = bus_priv; + + mlxsw_i2c->core = mlxsw_core; + + return 0; +} + +static void mlxsw_i2c_fini(void *bus_priv) +{ + struct mlxsw_i2c *mlxsw_i2c = bus_priv; + + mlxsw_i2c->core = NULL; +} + +static const struct mlxsw_bus mlxsw_i2c_bus = { + .kind = "i2c", + .init = mlxsw_i2c_init, + .fini = mlxsw_i2c_fini, + .skb_transmit_busy = mlxsw_i2c_skb_transmit_busy, + .skb_transmit = mlxsw_i2c_skb_transmit, + .cmd_exec = mlxsw_i2c_cmd_exec, +}; + +static int mlxsw_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct mlxsw_i2c *mlxsw_i2c; + u8 status; + int err; + + mlxsw_i2c = devm_kzalloc(&client->dev, sizeof(*mlxsw_i2c), GFP_KERNEL); + if (!mlxsw_i2c) + return -ENOMEM; + + i2c_set_clientdata(client, mlxsw_i2c); + mutex_init(&mlxsw_i2c->cmd.lock); + + /* In order to use mailboxes through the i2c, special area is reserved + * on the i2c address space that can be used for input and output + * mailboxes. Such mailboxes are called local mailboxes. When using a + * local mailbox, software should specify 0 as the Input/Output + * parameters. The location of the Local Mailbox addresses on the i2c + * space can be retrieved through the QUERY_FW command. + * For this purpose QUERY_FW is to be issued with opcode modifier equal + * 0x01. For such command the output parameter is an immediate value. + * Here QUERY_FW command is invoked for ASIC probing and for getting + * local mailboxes addresses from immedate output parameters. + */ + + /* Prepare and write out Command Interface Register for transaction */ + err = mlxsw_i2c_write_cmd(client, mlxsw_i2c, 1); + if (err) { + dev_err(&client->dev, "Could not start transaction"); + goto errout; + } + + /* Wait until go bit is cleared. */ + err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status); + if (err) { + dev_err(&client->dev, "HW semaphore is not released"); + goto errout; + } + + /* Validate transaction completion status. */ + if (status) { + dev_err(&client->dev, "Bad transaction completion status %x\n", + status); + err = -EIO; + goto errout; + } + + /* Get mailbox offsets. */ + err = mlxsw_i2c_get_mbox(client, mlxsw_i2c); + if (err < 0) { + dev_err(&client->dev, "Fail to get mailboxes\n"); + goto errout; + } + + dev_info(&client->dev, "%s mb size=%x off=0x%08x out mb size=%x off=0x%08x\n", + id->name, mlxsw_i2c->cmd.mb_size_in, + mlxsw_i2c->cmd.mb_off_in, mlxsw_i2c->cmd.mb_size_out, + mlxsw_i2c->cmd.mb_off_out); + + /* Register device bus. */ + mlxsw_i2c->bus_info.device_kind = id->name; + mlxsw_i2c->bus_info.device_name = client->name; + mlxsw_i2c->bus_info.dev = &client->dev; + mlxsw_i2c->dev = &client->dev; + + err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info, + &mlxsw_i2c_bus, mlxsw_i2c); + if (err) { + dev_err(&client->dev, "Fail to register core bus\n"); + return err; + } + + return 0; + +errout: + i2c_set_clientdata(client, NULL); + + return err; +} + +static int mlxsw_i2c_remove(struct i2c_client *client) +{ + struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client); + + mlxsw_core_bus_device_unregister(mlxsw_i2c->core); + mutex_destroy(&mlxsw_i2c->cmd.lock); + + return 0; +} + +int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver) +{ + i2c_driver->probe = mlxsw_i2c_probe; + i2c_driver->remove = mlxsw_i2c_remove; + return i2c_add_driver(i2c_driver); +} +EXPORT_SYMBOL(mlxsw_i2c_driver_register); + +void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver) +{ + i2c_del_driver(i2c_driver); +} +EXPORT_SYMBOL(mlxsw_i2c_driver_unregister); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox switch I2C interface driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.h b/drivers/net/ethernet/mellanox/mlxsw/i2c.h new file mode 100644 index 000000000000..daa24b213ea4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.h @@ -0,0 +1,60 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/i2c.h + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_I2C_H +#define _MLXSW_I2C_H + +#include <linux/i2c.h> + +#if IS_ENABLED(CONFIG_MLXSW_I2C) + +int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver); +void mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver); + +#else + +static inline int +mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver) +{ + return -ENODEV; +} + +static inline void +mlxsw_i2c_driver_unregister(struct i2c_driver *i2c_driver) +{ +} + +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/ib.h b/drivers/net/ethernet/mellanox/mlxsw/ib.h new file mode 100644 index 000000000000..ce313aaa6336 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/ib.h @@ -0,0 +1,39 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/ib.h + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MLXSW_IB_H +#define _MLXSW_IB_H + +#define MLXSW_IB_DEFAULT_MTU 4096 + +#endif /* _MLXSW_IB_H */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index a94dbda6590b..3c95e3ddd9c2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -55,7 +55,7 @@ struct mlxsw_item { }; static inline unsigned int -__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, +__mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index, size_t typesize) { BUG_ON(index && !item->step); @@ -72,7 +72,8 @@ __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, typesize); } -static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item, +static inline u16 __mlxsw_item_get16(const char *buf, + const struct mlxsw_item *item, unsigned short index) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16)); @@ -87,7 +88,7 @@ static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item, return tmp; } -static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item, +static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item, unsigned short index, u16 val) { unsigned int offset = __mlxsw_item_offset(item, index, @@ -105,7 +106,8 @@ static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item, b[offset] = cpu_to_be16(tmp); } -static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item, +static inline u32 __mlxsw_item_get32(const char *buf, + const struct mlxsw_item *item, unsigned short index) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32)); @@ -120,7 +122,7 @@ static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item, return tmp; } -static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item, +static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item, unsigned short index, u32 val) { unsigned int offset = __mlxsw_item_offset(item, index, @@ -138,7 +140,8 @@ static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item, b[offset] = cpu_to_be32(tmp); } -static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item, +static inline u64 __mlxsw_item_get64(const char *buf, + const struct mlxsw_item *item, unsigned short index) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); @@ -153,7 +156,7 @@ static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item, return tmp; } -static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, +static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item, unsigned short index, u64 val) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); @@ -170,8 +173,8 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, b[offset] = cpu_to_be64(tmp); } -static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, - struct mlxsw_item *item, +static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst, + const struct mlxsw_item *item, unsigned short index) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); @@ -180,7 +183,7 @@ static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, } static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, - struct mlxsw_item *item, + const struct mlxsw_item *item, unsigned short index) { unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); @@ -189,7 +192,8 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, } static inline u16 -__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) +__mlxsw_item_bit_array_offset(const struct mlxsw_item *item, + u16 index, u8 *shift) { u16 max_index, be_index; u16 offset; /* byte offset inside the array */ @@ -212,7 +216,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) return item->offset + offset; } -static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item, +static inline u8 __mlxsw_item_bit_array_get(const char *buf, + const struct mlxsw_item *item, u16 index) { u8 shift, tmp; @@ -224,7 +229,8 @@ static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item, return tmp; } -static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item, +static inline void __mlxsw_item_bit_array_set(char *buf, + const struct mlxsw_item *item, u16 index, u8 val) { u8 shift, tmp; @@ -254,7 +260,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ @@ -275,7 +281,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline u16 \ -mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ @@ -295,7 +301,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ @@ -316,7 +322,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline u32 \ -mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ @@ -336,7 +342,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .size = {.bits = _sizebits,}, \ .name = #_type "_" #_cname "_" #_iname, \ }; \ -static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ } \ @@ -357,7 +363,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline u64 \ -mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ { \ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ index); \ @@ -377,7 +383,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline void \ -mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \ { \ __mlxsw_item_memcpy_from(buf, dst, \ &__ITEM_NAME(_type, _cname, _iname), 0); \ @@ -399,7 +405,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline void \ -mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ unsigned short index, \ char *dst) \ { \ @@ -424,7 +430,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ .name = #_type "_" #_cname "_" #_iname, \ }; \ static inline u8 \ -mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \ +mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \ { \ return __mlxsw_item_bit_array_get(buf, \ &__ITEM_NAME(_type, _cname, _iname), \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c new file mode 100644 index 000000000000..3dd16267b76c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -0,0 +1,97 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/minimal.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/types.h> + +#include "core.h" +#include "i2c.h" + +static const char mlxsw_minimal_driver_name[] = "mlxsw_minimal"; + +static const struct mlxsw_config_profile mlxsw_minimal_config_profile; + +static struct mlxsw_driver mlxsw_minimal_driver = { + .kind = mlxsw_minimal_driver_name, + .priv_size = 1, + .profile = &mlxsw_minimal_config_profile, +}; + +static const struct i2c_device_id mlxsw_minimal_i2c_id[] = { + { "mlxsw_minimal", 0}, + { }, +}; + +static struct i2c_driver mlxsw_minimal_i2c_driver = { + .driver.name = "mlxsw_minimal", + .class = I2C_CLASS_HWMON, + .id_table = mlxsw_minimal_i2c_id, +}; + +static int __init mlxsw_minimal_module_init(void) +{ + int err; + + err = mlxsw_core_driver_register(&mlxsw_minimal_driver); + if (err) + return err; + + err = mlxsw_i2c_driver_register(&mlxsw_minimal_i2c_driver); + if (err) + goto err_i2c_driver_register; + + return 0; + +err_i2c_driver_register: + mlxsw_core_driver_unregister(&mlxsw_minimal_driver); + + return err; +} + +static void __exit mlxsw_minimal_module_exit(void) +{ + mlxsw_i2c_driver_unregister(&mlxsw_minimal_i2c_driver); + mlxsw_core_driver_unregister(&mlxsw_minimal_driver); +} + +module_init(mlxsw_minimal_module_init); +module_exit(mlxsw_minimal_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox minimal driver"); +MODULE_DEVICE_TABLE(i2c, mlxsw_minimal_i2c_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 912f71f84209..a223c85dfde0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -48,33 +48,17 @@ #include <linux/seq_file.h> #include <linux/string.h> +#include "pci_hw.h" #include "pci.h" #include "core.h" #include "cmd.h" #include "port.h" +#include "resources.h" static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; -static const struct pci_device_id mlxsw_pci_id_table[] = { - {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, - {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, - {0, } -}; - static struct dentry *mlxsw_pci_dbg_root; -static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) -{ - switch (id->device) { - case PCI_DEVICE_ID_MELLANOX_SWITCHX2: - return MLXSW_DEVICE_KIND_SWITCHX2; - case PCI_DEVICE_ID_MELLANOX_SPECTRUM: - return MLXSW_DEVICE_KIND_SPECTRUM; - default: - BUG(); - } -} - #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) #define mlxsw_pci_read32(mlxsw_pci, reg) \ @@ -238,8 +222,9 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) return owner_bit != !!(q->consumer_counter & q->count); } -static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, - u32 (*get_elem_owner_func)(char *)) +static char * +mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, + u32 (*get_elem_owner_func)(const char *)) { struct mlxsw_pci_queue_elem_info *elem_info; char *elem; @@ -1154,76 +1139,8 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); } -#define MLXSW_RESOURCES_TABLE_END_ID 0xffff -#define MLXSW_MAX_SPAN_ID 0x2420 -#define MLXSW_MAX_LAG_ID 0x2520 -#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521 -#define MLXSW_KVD_SIZE_ID 0x1001 -#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002 -#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003 -#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01 -#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502 -#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906 -#define MLXSW_MAX_REGIONS_ID 0x2901 -#define MLXSW_MAX_RIF_ID 0x2C02 -#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 -#define MLXSW_RESOURCES_PER_QUERY 32 - -static void mlxsw_pci_resources_query_parse(int id, u64 val, - struct mlxsw_resources *resources) -{ - switch (id) { - case MLXSW_MAX_SPAN_ID: - resources->max_span = val; - resources->max_span_valid = 1; - break; - case MLXSW_MAX_LAG_ID: - resources->max_lag = val; - resources->max_lag_valid = 1; - break; - case MLXSW_MAX_PORTS_IN_LAG_ID: - resources->max_ports_in_lag = val; - resources->max_ports_in_lag_valid = 1; - break; - case MLXSW_KVD_SIZE_ID: - resources->kvd_size = val; - resources->kvd_size_valid = 1; - break; - case MLXSW_KVD_SINGLE_MIN_SIZE_ID: - resources->kvd_single_min_size = val; - resources->kvd_single_min_size_valid = 1; - break; - case MLXSW_KVD_DOUBLE_MIN_SIZE_ID: - resources->kvd_double_min_size = val; - resources->kvd_double_min_size_valid = 1; - break; - case MLXSW_MAX_VIRTUAL_ROUTERS_ID: - resources->max_virtual_routers = val; - resources->max_virtual_routers_valid = 1; - break; - case MLXSW_MAX_SYSTEM_PORT_ID: - resources->max_system_ports = val; - resources->max_system_ports_valid = 1; - break; - case MLXSW_MAX_VLAN_GROUPS_ID: - resources->max_vlan_groups = val; - resources->max_vlan_groups_valid = 1; - break; - case MLXSW_MAX_REGIONS_ID: - resources->max_regions = val; - resources->max_regions_valid = 1; - break; - case MLXSW_MAX_RIF_ID: - resources->max_rif = val; - resources->max_rif_valid = 1; - break; - default: - break; - } -} - static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_resources *resources, + struct mlxsw_res *res, u8 query_enabled) { int index, i; @@ -1237,19 +1154,20 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_zero(mbox); - for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) { + for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; + index++) { err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); if (err) return err; - for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) { + for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); - if (id == MLXSW_RESOURCES_TABLE_END_ID) + if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) return 0; - mlxsw_pci_resources_query_parse(id, data, resources); + mlxsw_res_parse(res, id, data); } } @@ -1259,13 +1177,14 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, return -EIO; } -static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) +static int +mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, + struct mlxsw_res *res) { - u32 singles_size, doubles_size, linear_size; + u32 single_size, double_size, linear_size; - if (!resources->kvd_single_min_size_valid || - !resources->kvd_double_min_size_valid || + if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) || + !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) || !profile->used_kvd_split_data) return -EIO; @@ -1277,31 +1196,31 @@ static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *pr * Both sizes must be a multiplications of the * granularity from the profile. */ - doubles_size = (resources->kvd_size - linear_size); - doubles_size *= profile->kvd_hash_double_parts; - doubles_size /= (profile->kvd_hash_double_parts + - profile->kvd_hash_single_parts); - doubles_size /= profile->kvd_hash_granularity; - doubles_size *= profile->kvd_hash_granularity; - singles_size = resources->kvd_size - doubles_size - - linear_size; + double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size; + double_size *= profile->kvd_hash_double_parts; + double_size /= profile->kvd_hash_double_parts + + profile->kvd_hash_single_parts; + double_size /= profile->kvd_hash_granularity; + double_size *= profile->kvd_hash_granularity; + single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size - + linear_size; /* Check results are legal. */ - if (singles_size < resources->kvd_single_min_size || - doubles_size < resources->kvd_double_min_size || - resources->kvd_size < linear_size) + if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) || + double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) || + MLXSW_RES_GET(res, KVD_SIZE) < linear_size) return -EIO; - resources->kvd_single_size = singles_size; - resources->kvd_double_size = doubles_size; - resources->kvd_linear_size = linear_size; + MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); + MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); + MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size); return 0; } static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) + struct mlxsw_res *res) { int i; int err; @@ -1390,22 +1309,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } - if (resources->kvd_size_valid) { - err = mlxsw_pci_profile_get_kvd_sizes(profile, resources); + if (MLXSW_RES_VALID(res, KVD_SIZE)) { + err = mlxsw_pci_profile_get_kvd_sizes(profile, res); if (err) return err; mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox, - resources->kvd_linear_size); + MLXSW_RES_GET(res, KVD_LINEAR_SIZE)); mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox, - resources->kvd_single_size); + MLXSW_RES_GET(res, KVD_SINGLE_SIZE)); mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( mbox, 1); mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, - resources->kvd_double_size); + MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); } for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) @@ -1543,7 +1462,7 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, - struct mlxsw_resources *resources) + struct mlxsw_res *res) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct pci_dev *pdev = mlxsw_pci->pdev; @@ -1602,12 +1521,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; - err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources, + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res, profile->resource_query_enable); if (err) goto err_query_resources; - err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources); + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); if (err) goto err_config_profile; @@ -1617,7 +1536,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, err = request_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci_eq_irq_handler, 0, - mlxsw_pci_driver_name, mlxsw_pci); + mlxsw_pci->bus_info.device_kind, mlxsw_pci); if (err) { dev_err(&pdev->dev, "IRQ request failed\n"); goto err_request_eq_irq; @@ -1836,6 +1755,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, .skb_transmit = mlxsw_pci_skb_transmit, .cmd_exec = mlxsw_pci_cmd_exec, + .features = MLXSW_BUS_F_TXRX, }; static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, @@ -1863,6 +1783,7 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + const char *driver_name = pdev->driver->name; struct mlxsw_pci *mlxsw_pci; int err; @@ -1876,7 +1797,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_pci_enable_device; } - err = pci_request_regions(pdev, mlxsw_pci_driver_name); + err = pci_request_regions(pdev, driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto err_pci_request_regions; @@ -1927,7 +1848,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_msix_init; } - mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id); + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1979,33 +1900,30 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) kfree(mlxsw_pci); } -static struct pci_driver mlxsw_pci_driver = { - .name = mlxsw_pci_driver_name, - .id_table = mlxsw_pci_id_table, - .probe = mlxsw_pci_probe, - .remove = mlxsw_pci_remove, -}; +int mlxsw_pci_driver_register(struct pci_driver *pci_driver) +{ + pci_driver->probe = mlxsw_pci_probe; + pci_driver->remove = mlxsw_pci_remove; + return pci_register_driver(pci_driver); +} +EXPORT_SYMBOL(mlxsw_pci_driver_register); -static int __init mlxsw_pci_module_init(void) +void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) { - int err; + pci_unregister_driver(pci_driver); +} +EXPORT_SYMBOL(mlxsw_pci_driver_unregister); +static int __init mlxsw_pci_module_init(void) +{ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); if (!mlxsw_pci_dbg_root) return -ENOMEM; - err = pci_register_driver(&mlxsw_pci_driver); - if (err) - goto err_register_driver; return 0; - -err_register_driver: - debugfs_remove_recursive(mlxsw_pci_dbg_root); - return err; } static void __exit mlxsw_pci_module_exit(void) { - pci_unregister_driver(&mlxsw_pci_driver); debugfs_remove_recursive(mlxsw_pci_dbg_root); } @@ -2015,4 +1933,3 @@ module_exit(mlxsw_pci_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); -MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index d942a3e6fa41..d65582325cd5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/pci.h - * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,197 +35,31 @@ #ifndef _MLXSW_PCI_H #define _MLXSW_PCI_H -#include <linux/bitops.h> +#include <linux/pci.h> -#include "item.h" +#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 +#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20 +#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08 -#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 -#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 -#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ -#define MLXSW_PCI_PAGE_SIZE 4096 +#if IS_ENABLED(CONFIG_MLXSW_PCI) -#define MLXSW_PCI_CIR_BASE 0x71000 -#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE -#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04) -#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08) -#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C) -#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10) -#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14) -#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18) -#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23) -#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22) -#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12 -#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24 -#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 +int mlxsw_pci_driver_register(struct pci_driver *pci_driver); +void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver); -#define MLXSW_PCI_SW_RESET 0xF0010 -#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 -#define MLXSW_PCI_FW_READY 0xA1844 -#define MLXSW_PCI_FW_READY_MASK 0xFF -#define MLXSW_PCI_FW_READY_MAGIC 0x5E +#else -#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 -#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 -#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400 -#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600 -#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800 -#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00 +static inline int +mlxsw_pci_driver_register(struct pci_driver *pci_driver) +{ + return 0; +} -#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ - ((offset) + (type_offset) + (num) * 4) +static inline void +mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) +{ +} -#define MLXSW_PCI_CQS_MAX 96 -#define MLXSW_PCI_EQS_COUNT 2 -#define MLXSW_PCI_EQ_ASYNC_NUM 0 -#define MLXSW_PCI_EQ_COMP_NUM 1 - -#define MLXSW_PCI_AQ_PAGES 8 -#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) -#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ -#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ -#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ -#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) -#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) -#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) -#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 - -#define MLXSW_PCI_WQE_SG_ENTRIES 3 -#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA - -/* pci_wqe_c - * If set it indicates that a completion should be reported upon - * execution of this descriptor. - */ -MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); - -/* pci_wqe_lp - * Local Processing, set if packet should be processed by the local - * switch hardware: - * For Ethernet EMAD (Direct Route and non Direct Route) - - * must be set if packet destination is local device - * For InfiniBand CTL - must be set if packet destination is local device - * Otherwise it must be clear - * Local Process packets must not exceed the size of 2K (including payload - * and headers). - */ -MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); - -/* pci_wqe_type - * Packet type. - */ -MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); - -/* pci_wqe_byte_count - * Size of i-th scatter/gather entry, 0 if entry is unused. - */ -MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); - -/* pci_wqe_address - * Physical address of i-th scatter/gather entry. - * Gather Entries must be 2Byte aligned. - */ -MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); - -/* pci_cqe_lag - * Packet arrives from a port which is a LAG - */ -MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); - -/* pci_cqe_system_port/lag_id - * When lag=0: System port on which the packet was received - * When lag=1: - * bits [15:4] LAG ID on which the packet was received - * bits [3:0] sub_port on which the packet was received - */ -MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); -MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); -MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); - -/* pci_cqe_wqe_counter - * WQE count of the WQEs completed on the associated dqn - */ -MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); - -/* pci_cqe_byte_count - * Byte count of received packets including additional two - * Reserved Bytes that are append to the end of the frame. - * Reserved for Send CQE. - */ -MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); - -/* pci_cqe_trap_id - * Trap ID that captured the packet. - */ -MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); - -/* pci_cqe_crc - * Length include CRC. Indicates the length field includes - * the packet's CRC. - */ -MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); - -/* pci_cqe_e - * CQE with Error. - */ -MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); - -/* pci_cqe_sr - * 1 - Send Queue - * 0 - Receive Queue - */ -MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); - -/* pci_cqe_dqn - * Descriptor Queue (DQ) Number. - */ -MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); - -/* pci_cqe_owner - * Ownership bit. - */ -MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); - -/* pci_eqe_event_type - * Event type. - */ -MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); -#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00 -#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A - -/* pci_eqe_event_sub_type - * Event type. - */ -MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); - -/* pci_eqe_cqn - * Completion Queue that triggeret this EQE. - */ -MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); - -/* pci_eqe_owner - * Ownership bit. - */ -MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); - -/* pci_eqe_cmd_token - * Command completion event - token - */ -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); - -/* pci_eqe_cmd_status - * Command completion event - status - */ -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); - -/* pci_eqe_cmd_out_param_h - * Command completion event - output parameter - higher part - */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); - -/* pci_eqe_cmd_out_param_l - * Command completion event - output parameter - lower part - */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); +#endif #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h new file mode 100644 index 000000000000..d147ddd97997 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -0,0 +1,229 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/pci_hw.h + * Copyright (c) 2015-2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_PCI_HW_H +#define _MLXSW_PCI_HW_H + +#include <linux/bitops.h> + +#include "item.h" + +#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ +#define MLXSW_PCI_PAGE_SIZE 4096 + +#define MLXSW_PCI_CIR_BASE 0x71000 +#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE +#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04) +#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08) +#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C) +#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10) +#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14) +#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18) +#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23) +#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22) +#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12 +#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24 +#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 + +#define MLXSW_PCI_SW_RESET 0xF0010 +#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_FW_READY 0xA1844 +#define MLXSW_PCI_FW_READY_MASK 0xFFFF +#define MLXSW_PCI_FW_READY_MAGIC 0x5E + +#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 +#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 +#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400 +#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600 +#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800 +#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00 + +#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ + ((offset) + (type_offset) + (num) * 4) + +#define MLXSW_PCI_CQS_MAX 96 +#define MLXSW_PCI_EQS_COUNT 2 +#define MLXSW_PCI_EQ_ASYNC_NUM 0 +#define MLXSW_PCI_EQ_COMP_NUM 1 + +#define MLXSW_PCI_AQ_PAGES 8 +#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) +#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) +#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) +#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) +#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 + +#define MLXSW_PCI_WQE_SG_ENTRIES 3 +#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA + +/* pci_wqe_c + * If set it indicates that a completion should be reported upon + * execution of this descriptor. + */ +MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); + +/* pci_wqe_lp + * Local Processing, set if packet should be processed by the local + * switch hardware: + * For Ethernet EMAD (Direct Route and non Direct Route) - + * must be set if packet destination is local device + * For InfiniBand CTL - must be set if packet destination is local device + * Otherwise it must be clear + * Local Process packets must not exceed the size of 2K (including payload + * and headers). + */ +MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); + +/* pci_wqe_type + * Packet type. + */ +MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); + +/* pci_wqe_byte_count + * Size of i-th scatter/gather entry, 0 if entry is unused. + */ +MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); + +/* pci_wqe_address + * Physical address of i-th scatter/gather entry. + * Gather Entries must be 2Byte aligned. + */ +MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); + +/* pci_cqe_lag + * Packet arrives from a port which is a LAG + */ +MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); + +/* pci_cqe_system_port/lag_id + * When lag=0: System port on which the packet was received + * When lag=1: + * bits [15:4] LAG ID on which the packet was received + * bits [3:0] sub_port on which the packet was received + */ +MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); +MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); + +/* pci_cqe_wqe_counter + * WQE count of the WQEs completed on the associated dqn + */ +MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); + +/* pci_cqe_byte_count + * Byte count of received packets including additional two + * Reserved Bytes that are append to the end of the frame. + * Reserved for Send CQE. + */ +MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); + +/* pci_cqe_trap_id + * Trap ID that captured the packet. + */ +MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); + +/* pci_cqe_crc + * Length include CRC. Indicates the length field includes + * the packet's CRC. + */ +MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); + +/* pci_cqe_e + * CQE with Error. + */ +MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); + +/* pci_cqe_sr + * 1 - Send Queue + * 0 - Receive Queue + */ +MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); + +/* pci_cqe_dqn + * Descriptor Queue (DQ) Number. + */ +MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); + +/* pci_cqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); + +/* pci_eqe_event_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); +#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00 +#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A + +/* pci_eqe_event_sub_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); + +/* pci_eqe_cqn + * Completion Queue that triggeret this EQE. + */ +MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); + +/* pci_eqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); + +/* pci_eqe_cmd_token + * Command completion event - token + */ +MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); + +/* pci_eqe_cmd_status + * Command completion event - status + */ +MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); + +/* pci_eqe_cmd_out_param_h + * Command completion event - output parameter - higher part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); + +/* pci_eqe_cmd_out_param_l + * Command completion event - output parameter - lower part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index af371a82c35b..3d42146473b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -44,6 +44,7 @@ #define MLXSW_PORT_SWID_DISABLED_PORT 255 #define MLXSW_PORT_SWID_ALL_SWIDS 254 +#define MLXSW_PORT_SWID_TYPE_IB 1 #define MLXSW_PORT_SWID_TYPE_ETH 2 #define MLXSW_PORT_MID 0xd000 @@ -51,6 +52,9 @@ #define MLXSW_PORT_MAX_PHY_PORTS 0x40 #define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) +#define MLXSW_PORT_MAX_IB_PHY_PORTS 36 +#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1) + #define MLXSW_PORT_DEVID_BITS_OFFSET 10 #define MLXSW_PORT_PHY_BITS_OFFSET 4 #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6460c7256f2b..1357fe04391b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -48,8 +48,16 @@ struct mlxsw_reg_info { u16 id; u16 len; /* In u8 */ + const char *name; }; +#define MLXSW_REG_DEFINE(_name, _id, _len) \ +static const struct mlxsw_reg_info mlxsw_reg_##_name = { \ + .id = _id, \ + .len = _len, \ + .name = #_name, \ +} + #define MLXSW_REG(type) (&mlxsw_reg_##type) #define MLXSW_REG_LEN(type) MLXSW_REG(type)->len #define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len) @@ -61,10 +69,7 @@ struct mlxsw_reg_info { #define MLXSW_REG_SGCR_ID 0x2000 #define MLXSW_REG_SGCR_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_sgcr = { - .id = MLXSW_REG_SGCR_ID, - .len = MLXSW_REG_SGCR_LEN, -}; +MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN); /* reg_sgcr_llb * Link Local Broadcast (Default=0) @@ -87,10 +92,7 @@ static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb) #define MLXSW_REG_SPAD_ID 0x2002 #define MLXSW_REG_SPAD_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_spad = { - .id = MLXSW_REG_SPAD_ID, - .len = MLXSW_REG_SPAD_LEN, -}; +MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN); /* reg_spad_base_mac * Base MAC address for the switch partitions. @@ -109,10 +111,7 @@ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); #define MLXSW_REG_SMID_ID 0x2007 #define MLXSW_REG_SMID_LEN 0x240 -static const struct mlxsw_reg_info mlxsw_reg_smid = { - .id = MLXSW_REG_SMID_ID, - .len = MLXSW_REG_SMID_LEN, -}; +MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN); /* reg_smid_swid * Switch partition ID. @@ -156,10 +155,7 @@ static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, #define MLXSW_REG_SSPR_ID 0x2008 #define MLXSW_REG_SSPR_LEN 0x8 -static const struct mlxsw_reg_info mlxsw_reg_sspr = { - .id = MLXSW_REG_SSPR_ID, - .len = MLXSW_REG_SSPR_LEN, -}; +MLXSW_REG_DEFINE(sspr, MLXSW_REG_SSPR_ID, MLXSW_REG_SSPR_LEN); /* reg_sspr_m * Master - if set, then the record describes the master system port. @@ -215,10 +211,7 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) #define MLXSW_REG_SFDAT_ID 0x2009 #define MLXSW_REG_SFDAT_LEN 0x8 -static const struct mlxsw_reg_info mlxsw_reg_sfdat = { - .id = MLXSW_REG_SFDAT_ID, - .len = MLXSW_REG_SFDAT_LEN, -}; +MLXSW_REG_DEFINE(sfdat, MLXSW_REG_SFDAT_ID, MLXSW_REG_SFDAT_LEN); /* reg_sfdat_swid * Switch partition ID. @@ -256,10 +249,7 @@ static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time) #define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \ MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT) -static const struct mlxsw_reg_info mlxsw_reg_sfd = { - .id = MLXSW_REG_SFD_ID, - .len = MLXSW_REG_SFD_LEN, -}; +MLXSW_REG_DEFINE(sfd, MLXSW_REG_SFD_ID, MLXSW_REG_SFD_LEN); /* reg_sfd_swid * Switch partition ID for queries. Reserved on Write. @@ -580,10 +570,7 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, #define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \ MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT) -static const struct mlxsw_reg_info mlxsw_reg_sfn = { - .id = MLXSW_REG_SFN_ID, - .len = MLXSW_REG_SFN_LEN, -}; +MLXSW_REG_DEFINE(sfn, MLXSW_REG_SFN_ID, MLXSW_REG_SFN_LEN); /* reg_sfn_swid * Switch partition ID. @@ -701,10 +688,7 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index, #define MLXSW_REG_SPMS_ID 0x200D #define MLXSW_REG_SPMS_LEN 0x404 -static const struct mlxsw_reg_info mlxsw_reg_spms = { - .id = MLXSW_REG_SPMS_ID, - .len = MLXSW_REG_SPMS_LEN, -}; +MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN); /* reg_spms_local_port * Local port number. @@ -748,10 +732,7 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, #define MLXSW_REG_SPVID_ID 0x200E #define MLXSW_REG_SPVID_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_spvid = { - .id = MLXSW_REG_SPVID_ID, - .len = MLXSW_REG_SPVID_LEN, -}; +MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN); /* reg_spvid_local_port * Local port number. @@ -792,10 +773,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) -static const struct mlxsw_reg_info mlxsw_reg_spvm = { - .id = MLXSW_REG_SPVM_ID, - .len = MLXSW_REG_SPVM_LEN, -}; +MLXSW_REG_DEFINE(spvm, MLXSW_REG_SPVM_ID, MLXSW_REG_SPVM_LEN); /* reg_spvm_pt * Priority tagged. If this bit is set, packets forwarded to the port with @@ -891,10 +869,7 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, #define MLXSW_REG_SPAFT_ID 0x2010 #define MLXSW_REG_SPAFT_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_spaft = { - .id = MLXSW_REG_SPAFT_ID, - .len = MLXSW_REG_SPAFT_LEN, -}; +MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN); /* reg_spaft_local_port * Local port number. @@ -947,10 +922,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, #define MLXSW_REG_SFGC_ID 0x2011 #define MLXSW_REG_SFGC_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_sfgc = { - .id = MLXSW_REG_SFGC_ID, - .len = MLXSW_REG_SFGC_LEN, -}; +MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN); enum mlxsw_reg_sfgc_type { MLXSW_REG_SFGC_TYPE_BROADCAST, @@ -1045,10 +1017,7 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type, #define MLXSW_REG_SFTR_ID 0x2012 #define MLXSW_REG_SFTR_LEN 0x420 -static const struct mlxsw_reg_info mlxsw_reg_sftr = { - .id = MLXSW_REG_SFTR_ID, - .len = MLXSW_REG_SFTR_LEN, -}; +MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN); /* reg_sftr_swid * Switch partition ID with which to associate the port. @@ -1118,10 +1087,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload, #define MLXSW_REG_SFDF_ID 0x2013 #define MLXSW_REG_SFDF_LEN 0x14 -static const struct mlxsw_reg_info mlxsw_reg_sfdf = { - .id = MLXSW_REG_SFDF_ID, - .len = MLXSW_REG_SFDF_LEN, -}; +MLXSW_REG_DEFINE(sfdf, MLXSW_REG_SFDF_ID, MLXSW_REG_SFDF_LEN); /* reg_sfdf_swid * Switch partition ID. @@ -1205,10 +1171,7 @@ MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10); #define MLXSW_REG_SLDR_ID 0x2014 #define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */ -static const struct mlxsw_reg_info mlxsw_reg_sldr = { - .id = MLXSW_REG_SLDR_ID, - .len = MLXSW_REG_SLDR_LEN, -}; +MLXSW_REG_DEFINE(sldr, MLXSW_REG_SLDR_ID, MLXSW_REG_SLDR_LEN); enum mlxsw_reg_sldr_op { /* Indicates a creation of a new LAG-ID, lag_id must be valid */ @@ -1288,10 +1251,7 @@ static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, #define MLXSW_REG_SLCR_ID 0x2015 #define MLXSW_REG_SLCR_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_slcr = { - .id = MLXSW_REG_SLCR_ID, - .len = MLXSW_REG_SLCR_LEN, -}; +MLXSW_REG_DEFINE(slcr, MLXSW_REG_SLCR_ID, MLXSW_REG_SLCR_LEN); enum mlxsw_reg_slcr_pp { /* Global Configuration (for all ports) */ @@ -1404,10 +1364,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) #define MLXSW_REG_SLCOR_ID 0x2016 #define MLXSW_REG_SLCOR_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_slcor = { - .id = MLXSW_REG_SLCOR_ID, - .len = MLXSW_REG_SLCOR_LEN, -}; +MLXSW_REG_DEFINE(slcor, MLXSW_REG_SLCOR_ID, MLXSW_REG_SLCOR_LEN); enum mlxsw_reg_slcor_col { /* Port is added with collector disabled */ @@ -1490,10 +1447,7 @@ static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, #define MLXSW_REG_SPMLR_ID 0x2018 #define MLXSW_REG_SPMLR_LEN 0x8 -static const struct mlxsw_reg_info mlxsw_reg_spmlr = { - .id = MLXSW_REG_SPMLR_ID, - .len = MLXSW_REG_SPMLR_LEN, -}; +MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN); /* reg_spmlr_local_port * Local port number. @@ -1544,10 +1498,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, #define MLXSW_REG_SVFA_ID 0x201C #define MLXSW_REG_SVFA_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_svfa = { - .id = MLXSW_REG_SVFA_ID, - .len = MLXSW_REG_SVFA_LEN, -}; +MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN); /* reg_svfa_swid * Switch partition ID. @@ -1636,10 +1587,7 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, #define MLXSW_REG_SVPE_ID 0x201E #define MLXSW_REG_SVPE_LEN 0x4 -static const struct mlxsw_reg_info mlxsw_reg_svpe = { - .id = MLXSW_REG_SVPE_ID, - .len = MLXSW_REG_SVPE_LEN, -}; +MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN); /* reg_svpe_local_port * Local port number @@ -1672,10 +1620,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, #define MLXSW_REG_SFMR_ID 0x201F #define MLXSW_REG_SFMR_LEN 0x18 -static const struct mlxsw_reg_info mlxsw_reg_sfmr = { - .id = MLXSW_REG_SFMR_ID, - .len = MLXSW_REG_SFMR_LEN, -}; +MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN); enum mlxsw_reg_sfmr_op { MLXSW_REG_SFMR_OP_CREATE_FID, @@ -1762,10 +1707,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload, MLXSW_REG_SPVMLR_REC_LEN * \ MLXSW_REG_SPVMLR_REC_MAX_COUNT) -static const struct mlxsw_reg_info mlxsw_reg_spvmlr = { - .id = MLXSW_REG_SPVMLR_ID, - .len = MLXSW_REG_SPVMLR_LEN, -}; +MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN); /* reg_spvmlr_local_port * Local ingress port. @@ -1815,6 +1757,146 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* QPCR - QoS Policer Configuration Register + * ----------------------------------------- + * The QPCR register is used to create policers - that limit + * the rate of bytes or packets via some trap group. + */ +#define MLXSW_REG_QPCR_ID 0x4004 +#define MLXSW_REG_QPCR_LEN 0x28 + +MLXSW_REG_DEFINE(qpcr, MLXSW_REG_QPCR_ID, MLXSW_REG_QPCR_LEN); + +enum mlxsw_reg_qpcr_g { + MLXSW_REG_QPCR_G_GLOBAL = 2, + MLXSW_REG_QPCR_G_STORM_CONTROL = 3, +}; + +/* reg_qpcr_g + * The policer type. + * Access: Index + */ +MLXSW_ITEM32(reg, qpcr, g, 0x00, 14, 2); + +/* reg_qpcr_pid + * Policer ID. + * Access: Index + */ +MLXSW_ITEM32(reg, qpcr, pid, 0x00, 0, 14); + +/* reg_qpcr_color_aware + * Is the policer aware of colors. + * Must be 0 (unaware) for cpu port. + * Access: RW for unbounded policer. RO for bounded policer. + */ +MLXSW_ITEM32(reg, qpcr, color_aware, 0x04, 15, 1); + +/* reg_qpcr_bytes + * Is policer limit is for bytes per sec or packets per sec. + * 0 - packets + * 1 - bytes + * Access: RW for unbounded policer. RO for bounded policer. + */ +MLXSW_ITEM32(reg, qpcr, bytes, 0x04, 14, 1); + +enum mlxsw_reg_qpcr_ir_units { + MLXSW_REG_QPCR_IR_UNITS_M, + MLXSW_REG_QPCR_IR_UNITS_K, +}; + +/* reg_qpcr_ir_units + * Policer's units for cir and eir fields (for bytes limits only) + * 1 - 10^3 + * 0 - 10^6 + * Access: OP + */ +MLXSW_ITEM32(reg, qpcr, ir_units, 0x04, 12, 1); + +enum mlxsw_reg_qpcr_rate_type { + MLXSW_REG_QPCR_RATE_TYPE_SINGLE = 1, + MLXSW_REG_QPCR_RATE_TYPE_DOUBLE = 2, +}; + +/* reg_qpcr_rate_type + * Policer can have one limit (single rate) or 2 limits with specific operation + * for packets that exceed the lower rate but not the upper one. + * (For cpu port must be single rate) + * Access: RW for unbounded policer. RO for bounded policer. + */ +MLXSW_ITEM32(reg, qpcr, rate_type, 0x04, 8, 2); + +/* reg_qpc_cbs + * Policer's committed burst size. + * The policer is working with time slices of 50 nano sec. By default every + * slice is granted the proportionate share of the committed rate. If we want to + * allow a slice to exceed that share (while still keeping the rate per sec) we + * can allow burst. The burst size is between the default proportionate share + * (and no lower than 8) to 32Gb. (Even though giving a number higher than the + * committed rate will result in exceeding the rate). The burst size must be a + * log of 2 and will be determined by 2^cbs. + * Access: RW + */ +MLXSW_ITEM32(reg, qpcr, cbs, 0x08, 24, 6); + +/* reg_qpcr_cir + * Policer's committed rate. + * The rate used for sungle rate, the lower rate for double rate. + * For bytes limits, the rate will be this value * the unit from ir_units. + * (Resolution error is up to 1%). + * Access: RW + */ +MLXSW_ITEM32(reg, qpcr, cir, 0x0C, 0, 32); + +/* reg_qpcr_eir + * Policer's exceed rate. + * The higher rate for double rate, reserved for single rate. + * Lower rate for double rate policer. + * For bytes limits, the rate will be this value * the unit from ir_units. + * (Resolution error is up to 1%). + * Access: RW + */ +MLXSW_ITEM32(reg, qpcr, eir, 0x10, 0, 32); + +#define MLXSW_REG_QPCR_DOUBLE_RATE_ACTION 2 + +/* reg_qpcr_exceed_action. + * What to do with packets between the 2 limits for double rate. + * Access: RW for unbounded policer. RO for bounded policer. + */ +MLXSW_ITEM32(reg, qpcr, exceed_action, 0x14, 0, 4); + +enum mlxsw_reg_qpcr_action { + /* Discard */ + MLXSW_REG_QPCR_ACTION_DISCARD = 1, + /* Forward and set color to red. + * If the packet is intended to cpu port, it will be dropped. + */ + MLXSW_REG_QPCR_ACTION_FORWARD = 2, +}; + +/* reg_qpcr_violate_action + * What to do with packets that cross the cir limit (for single rate) or the eir + * limit (for double rate). + * Access: RW for unbounded policer. RO for bounded policer. + */ +MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4); + +static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid, + enum mlxsw_reg_qpcr_ir_units ir_units, + bool bytes, u32 cir, u16 cbs) +{ + MLXSW_REG_ZERO(qpcr, payload); + mlxsw_reg_qpcr_pid_set(payload, pid); + mlxsw_reg_qpcr_g_set(payload, MLXSW_REG_QPCR_G_GLOBAL); + mlxsw_reg_qpcr_rate_type_set(payload, MLXSW_REG_QPCR_RATE_TYPE_SINGLE); + mlxsw_reg_qpcr_violate_action_set(payload, + MLXSW_REG_QPCR_ACTION_DISCARD); + mlxsw_reg_qpcr_cir_set(payload, cir); + mlxsw_reg_qpcr_ir_units_set(payload, ir_units); + mlxsw_reg_qpcr_bytes_set(payload, bytes); + mlxsw_reg_qpcr_cbs_set(payload, cbs); +} + /* QTCT - QoS Switch Traffic Class Table * ------------------------------------- * Configures the mapping between the packet switch priority and the @@ -1823,10 +1905,7 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, #define MLXSW_REG_QTCT_ID 0x400A #define MLXSW_REG_QTCT_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_qtct = { - .id = MLXSW_REG_QTCT_ID, - .len = MLXSW_REG_QTCT_LEN, -}; +MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN); /* reg_qtct_local_port * Local port number. @@ -1875,10 +1954,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, #define MLXSW_REG_QEEC_ID 0x400D #define MLXSW_REG_QEEC_LEN 0x1C -static const struct mlxsw_reg_info mlxsw_reg_qeec = { - .id = MLXSW_REG_QEEC_ID, - .len = MLXSW_REG_QEEC_LEN, -}; +MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN); /* reg_qeec_local_port * Local port number. @@ -2000,10 +2076,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, #define MLXSW_REG_PMLP_ID 0x5002 #define MLXSW_REG_PMLP_LEN 0x40 -static const struct mlxsw_reg_info mlxsw_reg_pmlp = { - .id = MLXSW_REG_PMLP_ID, - .len = MLXSW_REG_PMLP_LEN, -}; +MLXSW_REG_DEFINE(pmlp, MLXSW_REG_PMLP_ID, MLXSW_REG_PMLP_LEN); /* reg_pmlp_rxtx * 0 - Tx value is used for both Tx and Rx. @@ -2059,10 +2132,7 @@ static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) #define MLXSW_REG_PMTU_ID 0x5003 #define MLXSW_REG_PMTU_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_pmtu = { - .id = MLXSW_REG_PMTU_ID, - .len = MLXSW_REG_PMTU_LEN, -}; +MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN); /* reg_pmtu_local_port * Local port number. @@ -2116,10 +2186,7 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, #define MLXSW_REG_PTYS_ID 0x5004 #define MLXSW_REG_PTYS_LEN 0x40 -static const struct mlxsw_reg_info mlxsw_reg_ptys = { - .id = MLXSW_REG_PTYS_ID, - .len = MLXSW_REG_PTYS_LEN, -}; +MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN); /* reg_ptys_local_port * Local port number. @@ -2127,6 +2194,7 @@ static const struct mlxsw_reg_info mlxsw_reg_ptys = { */ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8); +#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0) #define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2) /* reg_ptys_proto_mask @@ -2185,18 +2253,61 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4); */ MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32); +/* reg_ptys_ib_link_width_cap + * IB port supported widths. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16); + +#define MLXSW_REG_PTYS_IB_SPEED_SDR BIT(0) +#define MLXSW_REG_PTYS_IB_SPEED_DDR BIT(1) +#define MLXSW_REG_PTYS_IB_SPEED_QDR BIT(2) +#define MLXSW_REG_PTYS_IB_SPEED_FDR10 BIT(3) +#define MLXSW_REG_PTYS_IB_SPEED_FDR BIT(4) +#define MLXSW_REG_PTYS_IB_SPEED_EDR BIT(5) + +/* reg_ptys_ib_proto_cap + * IB port supported speeds and protocols. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16); + /* reg_ptys_eth_proto_admin * Speed and protocol to set port to. * Access: RW */ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32); +/* reg_ptys_ib_link_width_admin + * IB width to set port to. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16); + +/* reg_ptys_ib_proto_admin + * IB speeds and protocols to set port to. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16); + /* reg_ptys_eth_proto_oper * The current speed and protocol configured for the port. * Access: RO */ MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32); +/* reg_ptys_ib_link_width_oper + * The current IB width to set port to. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16); + +/* reg_ptys_ib_proto_oper + * The current IB speed and protocol. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16); + /* reg_ptys_eth_proto_lp_advertise * The protocols that were advertised by the link partner during * autonegotiation. @@ -2204,8 +2315,8 @@ MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32); */ MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32); -static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port, - u32 proto_admin) +static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, + u32 proto_admin) { MLXSW_REG_ZERO(ptys, payload); mlxsw_reg_ptys_local_port_set(payload, local_port); @@ -2213,9 +2324,10 @@ static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port, mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin); } -static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap, - u32 *p_eth_proto_adm, - u32 *p_eth_proto_oper) +static inline void mlxsw_reg_ptys_eth_unpack(char *payload, + u32 *p_eth_proto_cap, + u32 *p_eth_proto_adm, + u32 *p_eth_proto_oper) { if (p_eth_proto_cap) *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload); @@ -2225,6 +2337,33 @@ static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap, *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload); } +static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port, + u16 proto_admin, u16 link_width) +{ + MLXSW_REG_ZERO(ptys, payload); + mlxsw_reg_ptys_local_port_set(payload, local_port); + mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_IB); + mlxsw_reg_ptys_ib_proto_admin_set(payload, proto_admin); + mlxsw_reg_ptys_ib_link_width_admin_set(payload, link_width); +} + +static inline void mlxsw_reg_ptys_ib_unpack(char *payload, u16 *p_ib_proto_cap, + u16 *p_ib_link_width_cap, + u16 *p_ib_proto_oper, + u16 *p_ib_link_width_oper) +{ + if (p_ib_proto_cap) + *p_ib_proto_cap = mlxsw_reg_ptys_ib_proto_cap_get(payload); + if (p_ib_link_width_cap) + *p_ib_link_width_cap = + mlxsw_reg_ptys_ib_link_width_cap_get(payload); + if (p_ib_proto_oper) + *p_ib_proto_oper = mlxsw_reg_ptys_ib_proto_oper_get(payload); + if (p_ib_link_width_oper) + *p_ib_link_width_oper = + mlxsw_reg_ptys_ib_link_width_oper_get(payload); +} + /* PPAD - Port Physical Address Register * ------------------------------------- * The PPAD register configures the per port physical MAC address. @@ -2232,10 +2371,7 @@ static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap, #define MLXSW_REG_PPAD_ID 0x5005 #define MLXSW_REG_PPAD_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_ppad = { - .id = MLXSW_REG_PPAD_ID, - .len = MLXSW_REG_PPAD_LEN, -}; +MLXSW_REG_DEFINE(ppad, MLXSW_REG_PPAD_ID, MLXSW_REG_PPAD_LEN); /* reg_ppad_single_base_mac * 0: base_mac, local port should be 0 and mac[7:0] is @@ -2273,10 +2409,7 @@ static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac, #define MLXSW_REG_PAOS_ID 0x5006 #define MLXSW_REG_PAOS_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_paos = { - .id = MLXSW_REG_PAOS_ID, - .len = MLXSW_REG_PAOS_LEN, -}; +MLXSW_REG_DEFINE(paos, MLXSW_REG_PAOS_ID, MLXSW_REG_PAOS_LEN); /* reg_paos_swid * Switch partition ID with which to associate the port. @@ -2356,10 +2489,7 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, #define MLXSW_REG_PFCC_ID 0x5007 #define MLXSW_REG_PFCC_LEN 0x20 -static const struct mlxsw_reg_info mlxsw_reg_pfcc = { - .id = MLXSW_REG_PFCC_ID, - .len = MLXSW_REG_PFCC_LEN, -}; +MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN); /* reg_pfcc_local_port * Local port number. @@ -2495,10 +2625,7 @@ static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) #define MLXSW_REG_PPCNT_ID 0x5008 #define MLXSW_REG_PPCNT_LEN 0x100 -static const struct mlxsw_reg_info mlxsw_reg_ppcnt = { - .id = MLXSW_REG_PPCNT_ID, - .len = MLXSW_REG_PPCNT_LEN, -}; +MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN); /* reg_ppcnt_swid * For HCA: must be always 0. @@ -2761,6 +2888,27 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc); } +/* PLIB - Port Local to InfiniBand Port + * ------------------------------------ + * The PLIB register performs mapping from Local Port into InfiniBand Port. + */ +#define MLXSW_REG_PLIB_ID 0x500A +#define MLXSW_REG_PLIB_LEN 0x10 + +MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN); + +/* reg_plib_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8); + +/* reg_plib_ib_port + * InfiniBand port remapping for local_port. + * Access: RW + */ +MLXSW_ITEM32(reg, plib, ib_port, 0x00, 0, 8); + /* PPTB - Port Prio To Buffer Register * ----------------------------------- * Configures the switch priority to buffer table. @@ -2768,10 +2916,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, #define MLXSW_REG_PPTB_ID 0x500B #define MLXSW_REG_PPTB_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_pptb = { - .id = MLXSW_REG_PPTB_ID, - .len = MLXSW_REG_PPTB_LEN, -}; +MLXSW_REG_DEFINE(pptb, MLXSW_REG_PPTB_ID, MLXSW_REG_PPTB_LEN); enum { MLXSW_REG_PPTB_MM_UM, @@ -2865,10 +3010,7 @@ static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio, #define MLXSW_REG_PBMC_ID 0x500C #define MLXSW_REG_PBMC_LEN 0x6C -static const struct mlxsw_reg_info mlxsw_reg_pbmc = { - .id = MLXSW_REG_PBMC_ID, - .len = MLXSW_REG_PBMC_LEN, -}; +MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN); /* reg_pbmc_local_port * Local port number. @@ -2978,10 +3120,7 @@ static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload, #define MLXSW_REG_PSPA_ID 0x500D #define MLXSW_REG_PSPA_LEN 0x8 -static const struct mlxsw_reg_info mlxsw_reg_pspa = { - .id = MLXSW_REG_PSPA_ID, - .len = MLXSW_REG_PSPA_LEN, -}; +MLXSW_REG_DEFINE(pspa, MLXSW_REG_PSPA_ID, MLXSW_REG_PSPA_LEN); /* reg_pspa_swid * Switch partition ID. @@ -3017,10 +3156,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) #define MLXSW_REG_HTGT_ID 0x7002 #define MLXSW_REG_HTGT_LEN 0x100 -static const struct mlxsw_reg_info mlxsw_reg_htgt = { - .id = MLXSW_REG_HTGT_ID, - .len = MLXSW_REG_HTGT_LEN, -}; +MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN); /* reg_htgt_swid * Switch partition ID. @@ -3038,8 +3174,21 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_EMAD, - MLXSW_REG_HTGT_TRAP_GROUP_RX, - MLXSW_REG_HTGT_TRAP_GROUP_CTRL, + MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX, + MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL, + MLXSW_REG_HTGT_TRAP_GROUP_SP_STP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4, + MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, + MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, + MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, }; /* reg_htgt_trap_group @@ -3061,6 +3210,8 @@ enum { */ MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1); +#define MLXSW_REG_HTGT_INVALID_POLICER 0xff + /* reg_htgt_pid * Policer ID for the trap group. * Access: RW @@ -3086,6 +3237,8 @@ MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2); */ MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3); +#define MLXSW_REG_HTGT_DEFAULT_PRIORITY 0 + /* reg_htgt_priority * Trap group priority. * In case a packet matches multiple classification rules, the packet will @@ -3099,52 +3252,47 @@ MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3); */ MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4); +#define MLXSW_REG_HTGT_DEFAULT_TC 7 + /* reg_htgt_local_path_cpu_tclass * CPU ingress traffic class for the trap group. * Access: RW */ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); -#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 -#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 -#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13 - +enum mlxsw_reg_htgt_local_path_rdq { + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL = 0x13, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX = 0x14, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD = 0x15, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD = 0x15, +}; /* reg_htgt_local_path_rdq * Receive descriptor queue (RDQ) to use for the trap group. * Access: RW */ MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); -static inline void mlxsw_reg_htgt_pack(char *payload, - enum mlxsw_reg_htgt_trap_group group) +static inline void mlxsw_reg_htgt_pack(char *payload, u8 group, u8 policer_id, + u8 priority, u8 tc) { - u8 swid, rdq; - MLXSW_REG_ZERO(htgt, payload); - switch (group) { - case MLXSW_REG_HTGT_TRAP_GROUP_EMAD: - swid = MLXSW_PORT_SWID_ALL_SWIDS; - rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; - break; - case MLXSW_REG_HTGT_TRAP_GROUP_RX: - swid = 0; - rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; - break; - case MLXSW_REG_HTGT_TRAP_GROUP_CTRL: - swid = 0; - rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL; - break; + + if (policer_id == MLXSW_REG_HTGT_INVALID_POLICER) { + mlxsw_reg_htgt_pide_set(payload, + MLXSW_REG_HTGT_POLICER_DISABLE); + } else { + mlxsw_reg_htgt_pide_set(payload, + MLXSW_REG_HTGT_POLICER_ENABLE); + mlxsw_reg_htgt_pid_set(payload, policer_id); } - mlxsw_reg_htgt_swid_set(payload, swid); + mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); mlxsw_reg_htgt_trap_group_set(payload, group); - mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); - mlxsw_reg_htgt_pid_set(payload, 0); mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); mlxsw_reg_htgt_mirroring_agent_set(payload, 0); - mlxsw_reg_htgt_priority_set(payload, 0); - mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7); - mlxsw_reg_htgt_local_path_rdq_set(payload, rdq); + mlxsw_reg_htgt_priority_set(payload, priority); + mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, tc); + mlxsw_reg_htgt_local_path_rdq_set(payload, group); } /* HPKT - Host Packet Trap @@ -3154,10 +3302,7 @@ static inline void mlxsw_reg_htgt_pack(char *payload, #define MLXSW_REG_HPKT_ID 0x7003 #define MLXSW_REG_HPKT_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_hpkt = { - .id = MLXSW_REG_HPKT_ID, - .len = MLXSW_REG_HPKT_LEN, -}; +MLXSW_REG_DEFINE(hpkt, MLXSW_REG_HPKT_ID, MLXSW_REG_HPKT_LEN); enum { MLXSW_REG_HPKT_ACK_NOT_REQUIRED, @@ -3221,6 +3366,7 @@ enum { /* reg_hpkt_ctrl * Configure dedicated buffer resources for control packets. + * Ignored by SwitchX-2. * 0 - Keep factory defaults. * 1 - Do not use control buffer for this trap ID. * 2 - Use control buffer for this trap ID. @@ -3228,25 +3374,18 @@ enum { */ MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); -static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) +static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id, + enum mlxsw_reg_htgt_trap_group trap_group, + bool is_ctrl) { - enum mlxsw_reg_htgt_trap_group trap_group; - MLXSW_REG_ZERO(hpkt, payload); mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); mlxsw_reg_hpkt_action_set(payload, action); - switch (trap_id) { - case MLXSW_TRAP_ID_ETHEMAD: - case MLXSW_TRAP_ID_PUDE: - trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD; - break; - default: - trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX; - break; - } mlxsw_reg_hpkt_trap_group_set(payload, trap_group); mlxsw_reg_hpkt_trap_id_set(payload, trap_id); - mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); + mlxsw_reg_hpkt_ctrl_set(payload, is_ctrl ? + MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER : + MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER); } /* RGCR - Router General Configuration Register @@ -3256,10 +3395,7 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) #define MLXSW_REG_RGCR_ID 0x8001 #define MLXSW_REG_RGCR_LEN 0x28 -static const struct mlxsw_reg_info mlxsw_reg_rgcr = { - .id = MLXSW_REG_RGCR_ID, - .len = MLXSW_REG_RGCR_LEN, -}; +MLXSW_REG_DEFINE(rgcr, MLXSW_REG_RGCR_ID, MLXSW_REG_RGCR_LEN); /* reg_rgcr_ipv4_en * IPv4 router enable. @@ -3330,10 +3466,7 @@ static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) #define MLXSW_REG_RITR_ID 0x8002 #define MLXSW_REG_RITR_LEN 0x40 -static const struct mlxsw_reg_info mlxsw_reg_ritr = { - .id = MLXSW_REG_RITR_ID, - .len = MLXSW_REG_RITR_LEN, -}; +MLXSW_REG_DEFINE(ritr, MLXSW_REG_RITR_ID, MLXSW_REG_RITR_LEN); /* reg_ritr_enable * Enables routing on the router interface. @@ -3533,10 +3666,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, #define MLXSW_REG_RATR_ID 0x8008 #define MLXSW_REG_RATR_LEN 0x2C -static const struct mlxsw_reg_info mlxsw_reg_ratr = { - .id = MLXSW_REG_RATR_ID, - .len = MLXSW_REG_RATR_LEN, -}; +MLXSW_REG_DEFINE(ratr, MLXSW_REG_RATR_ID, MLXSW_REG_RATR_LEN); enum mlxsw_reg_ratr_op { /* Read */ @@ -3663,10 +3793,7 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, #define MLXSW_REG_RALTA_ID 0x8010 #define MLXSW_REG_RALTA_LEN 0x04 -static const struct mlxsw_reg_info mlxsw_reg_ralta = { - .id = MLXSW_REG_RALTA_ID, - .len = MLXSW_REG_RALTA_LEN, -}; +MLXSW_REG_DEFINE(ralta, MLXSW_REG_RALTA_ID, MLXSW_REG_RALTA_LEN); /* reg_ralta_op * opcode (valid for Write, must be 0 on Read) @@ -3718,10 +3845,7 @@ static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc, #define MLXSW_REG_RALST_ID 0x8011 #define MLXSW_REG_RALST_LEN 0x104 -static const struct mlxsw_reg_info mlxsw_reg_ralst = { - .id = MLXSW_REG_RALST_ID, - .len = MLXSW_REG_RALST_LEN, -}; +MLXSW_REG_DEFINE(ralst, MLXSW_REG_RALST_ID, MLXSW_REG_RALST_LEN); /* reg_ralst_root_bin * The bin number of the root bin. @@ -3788,10 +3912,7 @@ static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number, #define MLXSW_REG_RALTB_ID 0x8012 #define MLXSW_REG_RALTB_LEN 0x04 -static const struct mlxsw_reg_info mlxsw_reg_raltb = { - .id = MLXSW_REG_RALTB_ID, - .len = MLXSW_REG_RALTB_LEN, -}; +MLXSW_REG_DEFINE(raltb, MLXSW_REG_RALTB_ID, MLXSW_REG_RALTB_LEN); /* reg_raltb_virtual_router * Virtual Router ID @@ -3832,10 +3953,7 @@ static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router, #define MLXSW_REG_RALUE_ID 0x8013 #define MLXSW_REG_RALUE_LEN 0x38 -static const struct mlxsw_reg_info mlxsw_reg_ralue = { - .id = MLXSW_REG_RALUE_ID, - .len = MLXSW_REG_RALUE_LEN, -}; +MLXSW_REG_DEFINE(ralue, MLXSW_REG_RALUE_ID, MLXSW_REG_RALUE_LEN); /* reg_ralue_protocol * Protocol. @@ -4095,10 +4213,7 @@ mlxsw_reg_ralue_act_ip2me_pack(char *payload) #define MLXSW_REG_RAUHT_ID 0x8014 #define MLXSW_REG_RAUHT_LEN 0x74 -static const struct mlxsw_reg_info mlxsw_reg_rauht = { - .id = MLXSW_REG_RAUHT_ID, - .len = MLXSW_REG_RAUHT_LEN, -}; +MLXSW_REG_DEFINE(rauht, MLXSW_REG_RAUHT_ID, MLXSW_REG_RAUHT_LEN); enum mlxsw_reg_rauht_type { MLXSW_REG_RAUHT_TYPE_IPV4, @@ -4234,10 +4349,7 @@ static inline void mlxsw_reg_rauht_pack4(char *payload, #define MLXSW_REG_RALEU_ID 0x8015 #define MLXSW_REG_RALEU_LEN 0x28 -static const struct mlxsw_reg_info mlxsw_reg_raleu = { - .id = MLXSW_REG_RALEU_ID, - .len = MLXSW_REG_RALEU_LEN, -}; +MLXSW_REG_DEFINE(raleu, MLXSW_REG_RALEU_ID, MLXSW_REG_RALEU_LEN); /* reg_raleu_protocol * Protocol. @@ -4309,10 +4421,7 @@ static inline void mlxsw_reg_raleu_pack(char *payload, MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN) #define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4 -static const struct mlxsw_reg_info mlxsw_reg_rauhtd = { - .id = MLXSW_REG_RAUHTD_ID, - .len = MLXSW_REG_RAUHTD_LEN, -}; +MLXSW_REG_DEFINE(rauhtd, MLXSW_REG_RAUHTD_ID, MLXSW_REG_RAUHTD_LEN); #define MLXSW_REG_RAUHTD_FILTER_A BIT(0) #define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3) @@ -4444,10 +4553,7 @@ static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, #define MLXSW_REG_MFCR_ID 0x9001 #define MLXSW_REG_MFCR_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_mfcr = { - .id = MLXSW_REG_MFCR_ID, - .len = MLXSW_REG_MFCR_LEN, -}; +MLXSW_REG_DEFINE(mfcr, MLXSW_REG_MFCR_ID, MLXSW_REG_MFCR_LEN); enum mlxsw_reg_mfcr_pwm_frequency { MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00, @@ -4464,7 +4570,7 @@ enum mlxsw_reg_mfcr_pwm_frequency { * Controls the frequency of the PWM signal. * Access: RW */ -MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6); +MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 7); #define MLXSW_MFCR_TACHOS_MAX 10 @@ -4507,10 +4613,7 @@ mlxsw_reg_mfcr_unpack(char *payload, #define MLXSW_REG_MFSC_ID 0x9002 #define MLXSW_REG_MFSC_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_mfsc = { - .id = MLXSW_REG_MFSC_ID, - .len = MLXSW_REG_MFSC_LEN, -}; +MLXSW_REG_DEFINE(mfsc, MLXSW_REG_MFSC_ID, MLXSW_REG_MFSC_LEN); /* reg_mfsc_pwm * Fan pwm to control / monitor. @@ -4541,10 +4644,7 @@ static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm, #define MLXSW_REG_MFSM_ID 0x9003 #define MLXSW_REG_MFSM_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_mfsm = { - .id = MLXSW_REG_MFSM_ID, - .len = MLXSW_REG_MFSM_LEN, -}; +MLXSW_REG_DEFINE(mfsm, MLXSW_REG_MFSM_ID, MLXSW_REG_MFSM_LEN); /* reg_mfsm_tacho * Fan tachometer index. @@ -4564,6 +4664,54 @@ static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho) mlxsw_reg_mfsm_tacho_set(payload, tacho); } +/* MFSL - Management Fan Speed Limit Register + * ------------------------------------------ + * The Fan Speed Limit register is used to configure the fan speed + * event / interrupt notification mechanism. Fan speed threshold are + * defined for both under-speed and over-speed. + */ +#define MLXSW_REG_MFSL_ID 0x9004 +#define MLXSW_REG_MFSL_LEN 0x0C + +MLXSW_REG_DEFINE(mfsl, MLXSW_REG_MFSL_ID, MLXSW_REG_MFSL_LEN); + +/* reg_mfsl_tacho + * Fan tachometer index. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsl, tacho, 0x00, 24, 4); + +/* reg_mfsl_tach_min + * Tachometer minimum value (minimum RPM). + * Access: RW + */ +MLXSW_ITEM32(reg, mfsl, tach_min, 0x04, 0, 16); + +/* reg_mfsl_tach_max + * Tachometer maximum value (maximum RPM). + * Access: RW + */ +MLXSW_ITEM32(reg, mfsl, tach_max, 0x08, 0, 16); + +static inline void mlxsw_reg_mfsl_pack(char *payload, u8 tacho, + u16 tach_min, u16 tach_max) +{ + MLXSW_REG_ZERO(mfsl, payload); + mlxsw_reg_mfsl_tacho_set(payload, tacho); + mlxsw_reg_mfsl_tach_min_set(payload, tach_min); + mlxsw_reg_mfsl_tach_max_set(payload, tach_max); +} + +static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho, + u16 *p_tach_min, u16 *p_tach_max) +{ + if (p_tach_min) + *p_tach_min = mlxsw_reg_mfsl_tach_min_get(payload); + + if (p_tach_max) + *p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload); +} + /* MTCAP - Management Temperature Capabilities * ------------------------------------------- * This register exposes the capabilities of the device and @@ -4572,10 +4720,7 @@ static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho) #define MLXSW_REG_MTCAP_ID 0x9009 #define MLXSW_REG_MTCAP_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_mtcap = { - .id = MLXSW_REG_MTCAP_ID, - .len = MLXSW_REG_MTCAP_LEN, -}; +MLXSW_REG_DEFINE(mtcap, MLXSW_REG_MTCAP_ID, MLXSW_REG_MTCAP_LEN); /* reg_mtcap_sensor_count * Number of sensors supported by the device. @@ -4593,10 +4738,7 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); #define MLXSW_REG_MTMP_ID 0x900A #define MLXSW_REG_MTMP_LEN 0x20 -static const struct mlxsw_reg_info mlxsw_reg_mtmp = { - .id = MLXSW_REG_MTMP_ID, - .len = MLXSW_REG_MTMP_LEN, -}; +MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN); /* reg_mtmp_sensor_index * Sensors index to access. @@ -4679,10 +4821,7 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, #define MLXSW_REG_MPAT_ID 0x901A #define MLXSW_REG_MPAT_LEN 0x78 -static const struct mlxsw_reg_info mlxsw_reg_mpat = { - .id = MLXSW_REG_MPAT_ID, - .len = MLXSW_REG_MPAT_LEN, -}; +MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN); /* reg_mpat_pa_id * Port Analyzer ID. @@ -4742,10 +4881,7 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, #define MLXSW_REG_MPAR_ID 0x901B #define MLXSW_REG_MPAR_LEN 0x08 -static const struct mlxsw_reg_info mlxsw_reg_mpar = { - .id = MLXSW_REG_MPAR_ID, - .len = MLXSW_REG_MPAR_LEN, -}; +MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN); /* reg_mpar_local_port * The local port to mirror the packets from. @@ -4795,10 +4931,7 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, #define MLXSW_REG_MLCR_ID 0x902B #define MLXSW_REG_MLCR_LEN 0x0C -static const struct mlxsw_reg_info mlxsw_reg_mlcr = { - .id = MLXSW_REG_MLCR_ID, - .len = MLXSW_REG_MLCR_LEN, -}; +MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN); /* reg_mlcr_local_port * Local port number. @@ -4839,10 +4972,7 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, #define MLXSW_REG_SBPR_ID 0xB001 #define MLXSW_REG_SBPR_LEN 0x14 -static const struct mlxsw_reg_info mlxsw_reg_sbpr = { - .id = MLXSW_REG_SBPR_ID, - .len = MLXSW_REG_SBPR_LEN, -}; +MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN); /* shared direstion enum for SBPR, SBCM, SBPM */ enum mlxsw_reg_sbxx_dir { @@ -4899,10 +5029,7 @@ static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, #define MLXSW_REG_SBCM_ID 0xB002 #define MLXSW_REG_SBCM_LEN 0x28 -static const struct mlxsw_reg_info mlxsw_reg_sbcm = { - .id = MLXSW_REG_SBCM_ID, - .len = MLXSW_REG_SBCM_LEN, -}; +MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN); /* reg_sbcm_local_port * Local port number. @@ -4979,10 +5106,7 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, #define MLXSW_REG_SBPM_ID 0xB003 #define MLXSW_REG_SBPM_LEN 0x28 -static const struct mlxsw_reg_info mlxsw_reg_sbpm = { - .id = MLXSW_REG_SBPM_ID, - .len = MLXSW_REG_SBPM_LEN, -}; +MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN); /* reg_sbpm_local_port * Local port number. @@ -5073,10 +5197,7 @@ static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy, #define MLXSW_REG_SBMM_ID 0xB004 #define MLXSW_REG_SBMM_LEN 0x28 -static const struct mlxsw_reg_info mlxsw_reg_sbmm = { - .id = MLXSW_REG_SBMM_ID, - .len = MLXSW_REG_SBMM_LEN, -}; +MLXSW_REG_DEFINE(sbmm, MLXSW_REG_SBMM_ID, MLXSW_REG_SBMM_LEN); /* reg_sbmm_prio * Switch Priority. @@ -5135,10 +5256,7 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, MLXSW_REG_SBSR_REC_LEN * \ MLXSW_REG_SBSR_REC_MAX_COUNT) -static const struct mlxsw_reg_info mlxsw_reg_sbsr = { - .id = MLXSW_REG_SBSR_ID, - .len = MLXSW_REG_SBSR_LEN, -}; +MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN); /* reg_sbsr_clr * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy @@ -5228,10 +5346,7 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, #define MLXSW_REG_SBIB_ID 0xB006 #define MLXSW_REG_SBIB_LEN 0x10 -static const struct mlxsw_reg_info mlxsw_reg_sbib = { - .id = MLXSW_REG_SBIB_ID, - .len = MLXSW_REG_SBIB_LEN, -}; +MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN); /* reg_sbib_local_port * Local port number @@ -5256,132 +5371,83 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, mlxsw_reg_sbib_buff_size_set(payload, buff_size); } +static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { + MLXSW_REG(sgcr), + MLXSW_REG(spad), + MLXSW_REG(smid), + MLXSW_REG(sspr), + MLXSW_REG(sfdat), + MLXSW_REG(sfd), + MLXSW_REG(sfn), + MLXSW_REG(spms), + MLXSW_REG(spvid), + MLXSW_REG(spvm), + MLXSW_REG(spaft), + MLXSW_REG(sfgc), + MLXSW_REG(sftr), + MLXSW_REG(sfdf), + MLXSW_REG(sldr), + MLXSW_REG(slcr), + MLXSW_REG(slcor), + MLXSW_REG(spmlr), + MLXSW_REG(svfa), + MLXSW_REG(svpe), + MLXSW_REG(sfmr), + MLXSW_REG(spvmlr), + MLXSW_REG(qpcr), + MLXSW_REG(qtct), + MLXSW_REG(qeec), + MLXSW_REG(pmlp), + MLXSW_REG(pmtu), + MLXSW_REG(ptys), + MLXSW_REG(ppad), + MLXSW_REG(paos), + MLXSW_REG(pfcc), + MLXSW_REG(ppcnt), + MLXSW_REG(plib), + MLXSW_REG(pptb), + MLXSW_REG(pbmc), + MLXSW_REG(pspa), + MLXSW_REG(htgt), + MLXSW_REG(hpkt), + MLXSW_REG(rgcr), + MLXSW_REG(ritr), + MLXSW_REG(ratr), + MLXSW_REG(ralta), + MLXSW_REG(ralst), + MLXSW_REG(raltb), + MLXSW_REG(ralue), + MLXSW_REG(rauht), + MLXSW_REG(raleu), + MLXSW_REG(rauhtd), + MLXSW_REG(mfcr), + MLXSW_REG(mfsc), + MLXSW_REG(mfsm), + MLXSW_REG(mfsl), + MLXSW_REG(mtcap), + MLXSW_REG(mtmp), + MLXSW_REG(mpat), + MLXSW_REG(mpar), + MLXSW_REG(mlcr), + MLXSW_REG(sbpr), + MLXSW_REG(sbcm), + MLXSW_REG(sbpm), + MLXSW_REG(sbmm), + MLXSW_REG(sbsr), + MLXSW_REG(sbib), +}; + static inline const char *mlxsw_reg_id_str(u16 reg_id) { - switch (reg_id) { - case MLXSW_REG_SGCR_ID: - return "SGCR"; - case MLXSW_REG_SPAD_ID: - return "SPAD"; - case MLXSW_REG_SMID_ID: - return "SMID"; - case MLXSW_REG_SSPR_ID: - return "SSPR"; - case MLXSW_REG_SFDAT_ID: - return "SFDAT"; - case MLXSW_REG_SFD_ID: - return "SFD"; - case MLXSW_REG_SFN_ID: - return "SFN"; - case MLXSW_REG_SPMS_ID: - return "SPMS"; - case MLXSW_REG_SPVID_ID: - return "SPVID"; - case MLXSW_REG_SPVM_ID: - return "SPVM"; - case MLXSW_REG_SPAFT_ID: - return "SPAFT"; - case MLXSW_REG_SFGC_ID: - return "SFGC"; - case MLXSW_REG_SFTR_ID: - return "SFTR"; - case MLXSW_REG_SFDF_ID: - return "SFDF"; - case MLXSW_REG_SLDR_ID: - return "SLDR"; - case MLXSW_REG_SLCR_ID: - return "SLCR"; - case MLXSW_REG_SLCOR_ID: - return "SLCOR"; - case MLXSW_REG_SPMLR_ID: - return "SPMLR"; - case MLXSW_REG_SVFA_ID: - return "SVFA"; - case MLXSW_REG_SVPE_ID: - return "SVPE"; - case MLXSW_REG_SFMR_ID: - return "SFMR"; - case MLXSW_REG_SPVMLR_ID: - return "SPVMLR"; - case MLXSW_REG_QTCT_ID: - return "QTCT"; - case MLXSW_REG_QEEC_ID: - return "QEEC"; - case MLXSW_REG_PMLP_ID: - return "PMLP"; - case MLXSW_REG_PMTU_ID: - return "PMTU"; - case MLXSW_REG_PTYS_ID: - return "PTYS"; - case MLXSW_REG_PPAD_ID: - return "PPAD"; - case MLXSW_REG_PAOS_ID: - return "PAOS"; - case MLXSW_REG_PFCC_ID: - return "PFCC"; - case MLXSW_REG_PPCNT_ID: - return "PPCNT"; - case MLXSW_REG_PPTB_ID: - return "PPTB"; - case MLXSW_REG_PBMC_ID: - return "PBMC"; - case MLXSW_REG_PSPA_ID: - return "PSPA"; - case MLXSW_REG_HTGT_ID: - return "HTGT"; - case MLXSW_REG_HPKT_ID: - return "HPKT"; - case MLXSW_REG_RGCR_ID: - return "RGCR"; - case MLXSW_REG_RITR_ID: - return "RITR"; - case MLXSW_REG_RATR_ID: - return "RATR"; - case MLXSW_REG_RALTA_ID: - return "RALTA"; - case MLXSW_REG_RALST_ID: - return "RALST"; - case MLXSW_REG_RALTB_ID: - return "RALTB"; - case MLXSW_REG_RALUE_ID: - return "RALUE"; - case MLXSW_REG_RAUHT_ID: - return "RAUHT"; - case MLXSW_REG_RALEU_ID: - return "RALEU"; - case MLXSW_REG_RAUHTD_ID: - return "RAUHTD"; - case MLXSW_REG_MFCR_ID: - return "MFCR"; - case MLXSW_REG_MFSC_ID: - return "MFSC"; - case MLXSW_REG_MFSM_ID: - return "MFSM"; - case MLXSW_REG_MTCAP_ID: - return "MTCAP"; - case MLXSW_REG_MPAT_ID: - return "MPAT"; - case MLXSW_REG_MPAR_ID: - return "MPAR"; - case MLXSW_REG_MTMP_ID: - return "MTMP"; - case MLXSW_REG_MLCR_ID: - return "MLCR"; - case MLXSW_REG_SBPR_ID: - return "SBPR"; - case MLXSW_REG_SBCM_ID: - return "SBCM"; - case MLXSW_REG_SBPM_ID: - return "SBPM"; - case MLXSW_REG_SBMM_ID: - return "SBMM"; - case MLXSW_REG_SBSR_ID: - return "SBSR"; - case MLXSW_REG_SBIB_ID: - return "SBIB"; - default: - return "*UNKNOWN*"; + const struct mlxsw_reg_info *reg_info; + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_reg_infos); i++) { + reg_info = mlxsw_reg_infos[i]; + if (reg_info->id == reg_id) + return reg_info->name; } + return "*UNKNOWN*"; } /* PUDE - Port Up / Down Event diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h new file mode 100644 index 000000000000..3c2171dbdba4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -0,0 +1,127 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/resources.h + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_RESOURCES_H +#define _MLXSW_RESOURCES_H + +#include <linux/kernel.h> +#include <linux/types.h> + +enum mlxsw_res_id { + MLXSW_RES_ID_KVD_SIZE, + MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, + MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, + MLXSW_RES_ID_MAX_TRAP_GROUPS, + MLXSW_RES_ID_MAX_SPAN, + MLXSW_RES_ID_MAX_SYSTEM_PORT, + MLXSW_RES_ID_MAX_LAG, + MLXSW_RES_ID_MAX_LAG_MEMBERS, + MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_MAX_CPU_POLICERS, + MLXSW_RES_ID_MAX_VRS, + MLXSW_RES_ID_MAX_RIFS, + + /* Internal resources. + * Determined by the SW, not queried from the HW. + */ + MLXSW_RES_ID_KVD_SINGLE_SIZE, + MLXSW_RES_ID_KVD_DOUBLE_SIZE, + MLXSW_RES_ID_KVD_LINEAR_SIZE, + + __MLXSW_RES_ID_MAX, +}; + +static u16 mlxsw_res_ids[] = { + [MLXSW_RES_ID_KVD_SIZE] = 0x1001, + [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, + [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, + [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, + [MLXSW_RES_ID_MAX_SPAN] = 0x2420, + [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, + [MLXSW_RES_ID_MAX_LAG] = 0x2520, + [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, + [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, + [MLXSW_RES_ID_MAX_VRS] = 0x2C01, + [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, +}; + +struct mlxsw_res { + bool valid[__MLXSW_RES_ID_MAX]; + u64 values[__MLXSW_RES_ID_MAX]; +}; + +static inline bool mlxsw_res_valid(struct mlxsw_res *res, + enum mlxsw_res_id res_id) +{ + return res->valid[res_id]; +} + +#define MLXSW_RES_VALID(res, short_res_id) \ + mlxsw_res_valid(res, MLXSW_RES_ID_##short_res_id) + +static inline u64 mlxsw_res_get(struct mlxsw_res *res, + enum mlxsw_res_id res_id) +{ + if (WARN_ON(!res->valid[res_id])) + return 0; + return res->values[res_id]; +} + +#define MLXSW_RES_GET(res, short_res_id) \ + mlxsw_res_get(res, MLXSW_RES_ID_##short_res_id) + +static inline void mlxsw_res_set(struct mlxsw_res *res, + enum mlxsw_res_id res_id, u64 value) +{ + res->valid[res_id] = true; + res->values[res_id] = value; +} + +#define MLXSW_RES_SET(res, short_res_id, value) \ + mlxsw_res_set(res, MLXSW_RES_ID_##short_res_id, value) + +static inline void mlxsw_res_parse(struct mlxsw_res *res, u16 id, u64 value) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_res_ids); i++) { + if (mlxsw_res_ids[i] == id) { + mlxsw_res_set(res, i, value); + return; + } + } +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index dda5761e91bc..d768c7b6c6d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -37,6 +37,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> +#include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -53,12 +54,12 @@ #include <linux/dcbnl.h> #include <linux/inetdevice.h> #include <net/switchdev.h> -#include <generated/utsrelease.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> #include <net/netevent.h> #include "spectrum.h" +#include "pci.h" #include "core.h" #include "reg.h" #include "port.h" @@ -156,7 +157,7 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) { - char spad_pl[MLXSW_REG_SPAD_LEN]; + char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; int err; err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); @@ -168,14 +169,13 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!resources->max_span_valid) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) return -EIO; - mlxsw_sp->span.entries_count = resources->max_span; + mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_SPAN); mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, sizeof(struct mlxsw_sp_span_entry), GFP_KERNEL); @@ -857,7 +857,7 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, return 0; } -static bool mlxsw_sp_port_has_offload_stats(int attr_id) +static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -1239,8 +1239,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, tcf_exts_to_list(cls->exts, &actions); list_for_each_entry(a, &actions, list) { - if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) + if (!is_tcf_mirred_egress_mirror(a) || + protocol != htons(ETH_P_ALL)) { return -ENOTSUPP; + } err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, a, ingress); @@ -1413,7 +1415,7 @@ err_port_pause_configure: struct mlxsw_sp_port_hw_stats { char str[ETH_GSTRING_LEN]; - u64 (*getter)(char *payload); + u64 (*getter)(const char *payload); }; static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { @@ -1534,7 +1536,7 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) -static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) +static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl) { u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); @@ -2002,12 +2004,12 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, int err; autoneg = mlxsw_sp_port->link.autoneg; - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; - mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, - ð_proto_oper); + mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, + ð_proto_oper); mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); @@ -2036,11 +2038,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, bool autoneg; int err; - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; - mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, NULL, NULL); + mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; eth_proto_new = autoneg ? @@ -2053,7 +2055,8 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, return -EINVAL; } - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, + eth_proto_new); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2091,8 +2094,8 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) u32 eth_proto_admin; eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_admin); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, + eth_proto_admin); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } @@ -2210,8 +2213,8 @@ static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); } -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, - bool split, u8 module, u8 width, u8 lane) +static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width, u8 lane) { struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *dev; @@ -2221,6 +2224,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev); mlxsw_sp_port = netdev_priv(dev); mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; @@ -2284,6 +2288,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC; + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU; + /* Each packet needs to have a Tx header (metadata) on top all other * headers. */ @@ -2352,20 +2359,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_register_netdev; } - err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, - mlxsw_sp_port->local_port, dev, - mlxsw_sp_port->split, module); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", - mlxsw_sp_port->local_port); - goto err_core_port_init; - } - + mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, + mlxsw_sp_port, dev, mlxsw_sp_port->split, + module); mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); return 0; -err_core_port_init: - unregister_netdev(dev); err_register_netdev: mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); @@ -2394,14 +2393,34 @@ err_port_active_vlans_alloc: return err; } -static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width, u8 lane) +{ + int err; + + err = mlxsw_core_port_init(mlxsw_sp->core, local_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", + local_port); + return err; + } + err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, + module, width, lane); + if (err) + goto err_port_create; + return 0; + +err_port_create: + mlxsw_core_port_fini(mlxsw_sp->core, local_port); + return err; +} + +static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - if (!mlxsw_sp_port) - return; cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); - mlxsw_core_port_fini(&mlxsw_sp_port->core_port); + mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); @@ -2417,12 +2436,24 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) free_netdev(mlxsw_sp_port->dev); } +static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + __mlxsw_sp_port_remove(mlxsw_sp, local_port); + mlxsw_core_port_fini(mlxsw_sp->core, local_port); +} + +static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + return mlxsw_sp->ports[local_port] != NULL; +} + static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { int i; for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) - mlxsw_sp_port_remove(mlxsw_sp, i); + if (mlxsw_sp_port_created(mlxsw_sp, i)) + mlxsw_sp_port_remove(mlxsw_sp, i); kfree(mlxsw_sp->ports); } @@ -2446,8 +2477,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (!width) continue; mlxsw_sp->port_to_module[i] = module; - err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, - lane); + err = mlxsw_sp_port_create(mlxsw_sp, i, false, + module, width, lane); if (err) goto err_port_create; } @@ -2456,7 +2487,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) err_port_create: err_port_module_info_get: for (i--; i >= 1; i--) - mlxsw_sp_port_remove(mlxsw_sp, i); + if (mlxsw_sp_port_created(mlxsw_sp, i)) + mlxsw_sp_port_remove(mlxsw_sp, i); kfree(mlxsw_sp->ports); return err; } @@ -2498,7 +2530,8 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, err_port_create: for (i--; i >= 0; i--) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); i = count; err_port_swid_set: for (i--; i >= 0; i--) @@ -2588,7 +2621,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, } for (i = 0; i < count; i++) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); if (err) { @@ -2633,7 +2667,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) base_port = base_port + 2; for (i = 0; i < count; i++) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); @@ -2663,54 +2698,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, } } -static struct mlxsw_event_listener mlxsw_sp_pude_event = { - .func = mlxsw_sp_pude_event_func, - .trap_id = MLXSW_TRAP_ID_PUDE, -}; - -static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_event_trap_id trap_id) -{ - struct mlxsw_event_listener *el; - char hpkt_pl[MLXSW_REG_HPKT_LEN]; - int err; - - switch (trap_id) { - case MLXSW_TRAP_ID_PUDE: - el = &mlxsw_sp_pude_event; - break; - } - err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); - if (err) - return err; - - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); - if (err) - goto err_event_trap_set; - - return 0; - -err_event_trap_set: - mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); - return err; -} - -static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_event_trap_id trap_id) -{ - struct mlxsw_event_listener *el; - - switch (trap_id) { - case MLXSW_TRAP_ID_PUDE: - el = &mlxsw_sp_pude_event; - break; - } - mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); -} - -static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, - void *priv) +static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, + u8 local_port, void *priv) { struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; @@ -2738,107 +2727,212 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, void *priv) { skb->offload_fwd_mark = 1; - return mlxsw_sp_rx_listener_func(skb, local_port, priv); + return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); +} + +#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ + MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \ + _is_ctrl, SP_##_trap_group, DISCARD) + +#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ + MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ + _is_ctrl, SP_##_trap_group, DISCARD) + +#define MLXSW_SP_EVENTL(_func, _trap_id) \ + MLXSW_EVENTL(_func, _trap_id, SP_EVENT) + +static const struct mlxsw_listener mlxsw_sp_listener[] = { + /* Events */ + MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE), + /* L2 traps */ + MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true), + MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true), + MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true), + MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false), + MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false), + MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), + MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), + MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), + /* L3 traps */ + MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), + MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), + MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), +}; + +static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) +{ + char qpcr_pl[MLXSW_REG_QPCR_LEN]; + enum mlxsw_reg_qpcr_ir_units ir_units; + int max_cpu_policers; + bool is_bytes; + u8 burst_size; + u32 rate; + int i, err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) + return -EIO; + + max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); + + ir_units = MLXSW_REG_QPCR_IR_UNITS_M; + for (i = 0; i < max_cpu_policers; i++) { + is_bytes = false; + switch (i) { + case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + rate = 128; + burst_size = 7; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: + rate = 16 * 1024; + burst_size = 10; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + rate = 1024; + burst_size = 7; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + is_bytes = true; + rate = 4 * 1024; + burst_size = 4; + break; + default: + continue; + } + + mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate, + burst_size); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl); + if (err) + return err; + } + + return 0; } -#define MLXSW_SP_RXL(_func, _trap_id, _action) \ - { \ - .func = _func, \ - .local_port = MLXSW_PORT_DONT_CARE, \ - .trap_id = MLXSW_TRAP_ID_##_trap_id, \ - .action = MLXSW_REG_HPKT_ACTION_##_action, \ +static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + enum mlxsw_reg_htgt_trap_group i; + int max_cpu_policers; + int max_trap_groups; + u8 priority, tc; + u16 policer_id; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS)) + return -EIO; + + max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS); + max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); + + for (i = 0; i < max_trap_groups; i++) { + policer_id = i; + switch (i) { + case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + priority = 5; + tc = 5; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: + priority = 4; + tc = 4; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + priority = 3; + tc = 3; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: + priority = 2; + tc = 2; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + priority = 1; + tc = 1; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT: + priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY; + tc = MLXSW_REG_HTGT_DEFAULT_TC; + policer_id = MLXSW_REG_HTGT_INVALID_POLICER; + break; + default: + continue; + } + + if (max_cpu_policers <= policer_id && + policer_id != MLXSW_REG_HTGT_INVALID_POLICER) + return -EIO; + + mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; } -static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU), - /* Traps for specific L2 packet types, not trapped as FDB MC */ - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU), - /* L3 traps */ - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU), - MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU), -}; + return 0; +} static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) { - char htgt_pl[MLXSW_REG_HTGT_LEN]; - char hpkt_pl[MLXSW_REG_HPKT_LEN]; int i; int err; - mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core); if (err) return err; - mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + err = mlxsw_sp_trap_groups_set(mlxsw_sp->core); if (err) return err; - for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { - err = mlxsw_core_rx_listener_register(mlxsw_sp->core, - &mlxsw_sp_rx_listener[i], - mlxsw_sp); + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { + err = mlxsw_core_trap_register(mlxsw_sp->core, + &mlxsw_sp_listener[i], + mlxsw_sp); if (err) - goto err_rx_listener_register; + goto err_listener_register; - mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action, - mlxsw_sp_rx_listener[i].trap_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); - if (err) - goto err_rx_trap_set; } return 0; -err_rx_trap_set: - mlxsw_core_rx_listener_unregister(mlxsw_sp->core, - &mlxsw_sp_rx_listener[i], - mlxsw_sp); -err_rx_listener_register: +err_listener_register: for (i--; i >= 0; i--) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, - mlxsw_sp_rx_listener[i].trap_id); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); - - mlxsw_core_rx_listener_unregister(mlxsw_sp->core, - &mlxsw_sp_rx_listener[i], - mlxsw_sp); + mlxsw_core_trap_unregister(mlxsw_sp->core, + &mlxsw_sp_listener[i], + mlxsw_sp); } return err; } static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) { - char hpkt_pl[MLXSW_REG_HPKT_LEN]; int i; - for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, - mlxsw_sp_rx_listener[i].trap_id); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); - - mlxsw_core_rx_listener_unregister(mlxsw_sp->core, - &mlxsw_sp_rx_listener[i], - mlxsw_sp); + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) { + mlxsw_core_trap_unregister(mlxsw_sp->core, + &mlxsw_sp_listener[i], + mlxsw_sp); } } @@ -2889,7 +2983,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; char slcr_pl[MLXSW_REG_SLCR_LEN]; int err; @@ -2906,11 +2999,11 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) if (err) return err; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid)) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) return -EIO; - mlxsw_sp->lags = kcalloc(resources->max_lag, + mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG), sizeof(struct mlxsw_sp_upper), GFP_KERNEL); if (!mlxsw_sp->lags) @@ -2924,6 +3017,17 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->lags); } +static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_REG_HTGT_INVALID_POLICER, + MLXSW_REG_HTGT_DEFAULT_PRIORITY, + MLXSW_REG_HTGT_DEFAULT_TC); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -2942,16 +3046,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return err; } - err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); - return err; - } - err = mlxsw_sp_traps_init(mlxsw_sp); if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); - goto err_rx_listener_register; + dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); + return err; } err = mlxsw_sp_flood_init(mlxsw_sp); @@ -3011,8 +3109,6 @@ err_lag_init: err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); -err_rx_listener_register: - mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); return err; } @@ -3027,7 +3123,6 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); - mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); WARN_ON(!list_empty(&mlxsw_sp->fids)); } @@ -3065,11 +3160,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { }; static struct mlxsw_driver mlxsw_sp_driver = { - .kind = MLXSW_DEVICE_KIND_SPECTRUM, - .owner = THIS_MODULE, + .kind = mlxsw_sp_driver_name, .priv_size = sizeof(struct mlxsw_sp), .init = mlxsw_sp_init, .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, .sb_pool_get = mlxsw_sp_sb_pool_get, @@ -3092,19 +3187,30 @@ static bool mlxsw_sp_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } +static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) +{ + struct mlxsw_sp_port **port = data; + int ret = 0; + + if (mlxsw_sp_port_dev_check(lower_dev)) { + *port = netdev_priv(lower_dev); + ret = 1; + } + + return ret; +} + static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) { - struct net_device *lower_dev; - struct list_head *iter; + struct mlxsw_sp_port *port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - netdev_for_each_all_lower_dev(dev, lower_dev, iter) { - if (mlxsw_sp_port_dev_check(lower_dev)) - return netdev_priv(lower_dev); - } - return NULL; + port = NULL; + netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); + + return port; } static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) @@ -3117,17 +3223,15 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) { - struct net_device *lower_dev; - struct list_head *iter; + struct mlxsw_sp_port *port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { - if (mlxsw_sp_port_dev_check(lower_dev)) - return netdev_priv(lower_dev); - } - return NULL; + port = NULL; + netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); + + return port; } struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) @@ -3171,11 +3275,9 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_rif; i++) + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) if (!mlxsw_sp->rifs[i]) return i; @@ -3698,14 +3800,15 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; u16 lag_id = mlxsw_sp_port->lag_id; - struct mlxsw_resources *resources; + u64 max_lag_members; int i, count = 0; if (!mlxsw_sp_port->lagged) return true; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_ports_in_lag; i++) { + max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_LAG_MEMBERS); + for (i = 0; i < max_lag_members; i++) { struct mlxsw_sp_port *lag_port; lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); @@ -3911,13 +4014,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, u16 *p_lag_id) { - struct mlxsw_resources *resources; struct mlxsw_sp_upper *lag; int free_lag_id = -1; + u64 max_lag; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_lag; i++) { + max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG); + for (i = 0; i < max_lag; i++) { lag = mlxsw_sp_lag_get(mlxsw_sp, i); if (lag->ref_count) { if (lag->dev == lag_dev) { @@ -3951,11 +4054,12 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 *p_port_index) { - struct mlxsw_resources *resources; + u64 max_lag_members; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_ports_in_lag; i++) { + max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_LAG_MEMBERS); + for (i = 0; i < max_lag_members; i++) { if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { *p_port_index = i; return 0; @@ -4652,6 +4756,16 @@ static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { .notifier_call = mlxsw_sp_router_netevent_event, }; +static const struct pci_device_id mlxsw_sp_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp_pci_driver = { + .name = mlxsw_sp_driver_name, + .id_table = mlxsw_sp_pci_id_table, +}; + static int __init mlxsw_sp_module_init(void) { int err; @@ -4663,8 +4777,15 @@ static int __init mlxsw_sp_module_init(void) err = mlxsw_core_driver_register(&mlxsw_sp_driver); if (err) goto err_core_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); + if (err) + goto err_pci_driver_register; + return 0; +err_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp_driver); err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4674,6 +4795,7 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { + mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver); unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4686,4 +4808,4 @@ module_exit(mlxsw_sp_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Spectrum driver"); -MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); +MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 97bbc1d21df8..cc1af19d699a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -316,7 +316,6 @@ struct mlxsw_sp_port_pcpu_stats { }; struct mlxsw_sp_port { - struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; @@ -479,12 +478,9 @@ static inline struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { - struct mlxsw_resources *resources; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - - for (i = 0; i < resources->max_rif; i++) + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) return mlxsw_sp->rifs[i]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index bcaed8a38037..a7468262f118 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -611,6 +611,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); enum mlxsw_reg_sbpr_mode mode; + if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) + return -EINVAL; + mode = (enum mlxsw_reg_sbpr_mode) threshold_type; return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e83072da6272..01d0efa9c5c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -382,12 +382,10 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (!vr->used) return vr; @@ -429,14 +427,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, enum mlxsw_sp_l3proto proto) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; int i; tb_id = mlxsw_sp_fix_tb_id(tb_id); - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (vr->used && vr->proto == proto && vr->tb_id == tb_id) return vr; @@ -572,21 +568,20 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_vr *vr; + u64 max_vrs; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!resources->max_virtual_routers_valid) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS)) return -EIO; - mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers, - sizeof(struct mlxsw_sp_vr), + max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); + mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr), GFP_KERNEL); if (!mlxsw_sp->router.vrs) return -ENOMEM; - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < max_vrs; i++) { vr = &mlxsw_sp->router.vrs[i]; vr->id = i; } @@ -598,6 +593,14 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) { + /* At this stage we're guaranteed not to have new incoming + * FIB notifications and the work queue is free from FIBs + * sitting on top of mlxsw netdevs. However, we can still + * have other FIBs queued. Flush the queue before flushing + * the device's tables. No need for locks, as we're the only + * writer. + */ + mlxsw_core_flush_owq(); mlxsw_sp_router_fib_flush(mlxsw_sp); kfree(mlxsw_sp->router.vrs); } @@ -939,7 +942,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) char rauht_pl[MLXSW_REG_RAUHT_LEN]; struct net_device *dev; bool entry_connected; - u8 nud_state; + u8 nud_state, dead; bool updating; bool removing; bool adding; @@ -950,10 +953,11 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) dip = ntohl(*((__be32 *) n->primary_key)); memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha)); nud_state = n->nud_state; + dead = n->dead; dev = n->dev; read_unlock_bh(&n->lock); - entry_connected = nud_state & NUD_VALID; + entry_connected = nud_state & NUD_VALID && !dead; adding = (!neigh_entry->offloaded) && entry_connected; updating = neigh_entry->offloaded && entry_connected; removing = neigh_entry->offloaded && !entry_connected; @@ -1348,7 +1352,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry; struct net_device *dev = fib_nh->nh_dev; struct neighbour *n; - u8 nud_state; + u8 nud_state, dead; /* Take a reference of neigh here ensuring that neigh would * not be detructed before the nexthop entry is finished. @@ -1380,8 +1384,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); read_lock_bh(&n->lock); nud_state = n->nud_state; + dead = n->dead; read_unlock_bh(&n->lock); - __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID)); + __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead)); return 0; } @@ -1391,6 +1396,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + __mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); /* If that is the last nexthop connected to that neigh, remove from @@ -1449,6 +1455,8 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON_ONCE(nh_grp->adj_index_valid); kfree(nh_grp); } @@ -1872,14 +1880,12 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_entry *tmp; struct mlxsw_sp_vr *vr; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_virtual_routers; i++) { + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; if (!vr->used) @@ -1903,6 +1909,9 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) { int err; + if (mlxsw_sp->router.aborted) + return; + dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n"); mlxsw_sp_router_fib_flush(mlxsw_sp); mlxsw_sp->router.aborted = true; err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); @@ -1912,21 +1921,21 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; char rgcr_pl[MLXSW_REG_RGCR_LEN]; + u64 max_rifs; int err; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - if (!resources->max_rif_valid) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO; - mlxsw_sp->rifs = kcalloc(resources->max_rif, - sizeof(struct mlxsw_sp_rif *), GFP_KERNEL); + max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), + GFP_KERNEL); if (!mlxsw_sp->rifs) return -ENOMEM; mlxsw_reg_rgcr_pack(rgcr_pl, true); - mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) goto err_rgcr_fail; @@ -1940,47 +1949,101 @@ err_rgcr_fail: static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_resources *resources; char rgcr_pl[MLXSW_REG_RGCR_LEN]; int i; mlxsw_reg_rgcr_pack(rgcr_pl, false); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_rif; i++) + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) WARN_ON_ONCE(mlxsw_sp->rifs[i]); kfree(mlxsw_sp->rifs); } -static int mlxsw_sp_router_fib_event(struct notifier_block *nb, - unsigned long event, void *ptr) +struct mlxsw_sp_fib_event_work { + struct delayed_work dw; + struct fib_entry_notifier_info fen_info; + struct mlxsw_sp *mlxsw_sp; + unsigned long event; +}; + +static void mlxsw_sp_router_fib_event_work(struct work_struct *work) { - struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); - struct fib_entry_notifier_info *fen_info = ptr; + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, dw.work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; int err; - if (!net_eq(fen_info->info.net, &init_net)) - return NOTIFY_DONE; - - switch (event) { + /* Protect internal structures from changes */ + rtnl_lock(); + switch (fib_work->event) { case FIB_EVENT_ENTRY_ADD: - err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); + err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info); if (err) mlxsw_sp_router_fib4_abort(mlxsw_sp); + fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_ENTRY_DEL: - mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info); + mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); + fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: mlxsw_sp_router_fib4_abort(mlxsw_sp); break; } + rtnl_unlock(); + kfree(fib_work); +} + +/* Called with rcu_read_lock() */ +static int mlxsw_sp_router_fib_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); + struct mlxsw_sp_fib_event_work *fib_work; + struct fib_notifier_info *info = ptr; + + if (!net_eq(info->net, &init_net)) + return NOTIFY_DONE; + + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); + if (WARN_ON(!fib_work)) + return NOTIFY_BAD; + + INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work); + fib_work->mlxsw_sp = mlxsw_sp; + fib_work->event = event; + + switch (event) { + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); + /* Take referece on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + break; + } + + mlxsw_core_schedule_odw(&fib_work->dw, 0); + return NOTIFY_DONE; } +static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) +{ + struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); + + /* Flush pending FIB notifications and then flush the device's + * table before requesting another dump. The FIB notification + * block is unregistered, so no need to take RTNL. + */ + mlxsw_core_flush_owq(); + mlxsw_sp_router_fib_flush(mlxsw_sp); +} + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { int err; @@ -1996,14 +2059,20 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_vrs_init; - err = mlxsw_sp_neigh_init(mlxsw_sp); + err = mlxsw_sp_neigh_init(mlxsw_sp); if (err) goto err_neigh_init; mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event; - register_fib_notifier(&mlxsw_sp->fib_nb); + err = register_fib_notifier(&mlxsw_sp->fib_nb, + mlxsw_sp_router_fib_dump_flush); + if (err) + goto err_register_fib_notifier; + return 0; +err_register_fib_notifier: + mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 1e2c8eca3af1..b87ba7d36bc4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1196,11 +1196,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, u16 lag_id) { struct mlxsw_sp_port *mlxsw_sp_port; - struct mlxsw_resources *resources; + u64 max_lag_members; int i; - resources = mlxsw_core_resources_get(mlxsw_sp->core); - for (i = 0; i < resources->max_ports_in_lag; i++) { + max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_LAG_MEMBERS); + for (i = 0; i < max_lag_members; i++) { mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); if (mlxsw_sp_port) return mlxsw_sp_port; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c new file mode 100644 index 000000000000..74341fe0eb25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -0,0 +1,605 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/switchib.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <net/switchdev.h> + +#include "pci.h" +#include "core.h" +#include "reg.h" +#include "port.h" +#include "trap.h" +#include "txheader.h" +#include "ib.h" + +static const char mlxsw_sib_driver_name[] = "mlxsw_switchib"; +static const char mlxsw_sib2_driver_name[] = "mlxsw_switchib2"; + +struct mlxsw_sib_port; + +struct mlxsw_sib { + struct mlxsw_sib_port **ports; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; +}; + +struct mlxsw_sib_port { + struct mlxsw_sib *mlxsw_sib; + u8 local_port; + struct { + u8 module; + } mapping; +}; + +/* tx_v1_hdr_version + * Tx header version. + * Must be set to 1. + */ +MLXSW_ITEM32(tx_v1, hdr, version, 0x00, 28, 4); + +/* tx_v1_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx_v1, hdr, ctl, 0x00, 26, 2); + +/* tx_v1_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx_v1, hdr, proto, 0x00, 21, 3); + +/* tx_v1_hdr_swid + * Switch partition ID. Must be set to 0. + */ +MLXSW_ITEM32(tx_v1, hdr, swid, 0x00, 12, 3); + +/* tx_v1_hdr_control_tclass + * Indicates if the packet should use the control TClass and not one + * of the data TClasses. + */ +MLXSW_ITEM32(tx_v1, hdr, control_tclass, 0x00, 6, 1); + +/* tx_v1_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx_v1, hdr, port_mid, 0x04, 16, 16); + +/* tx_v1_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx_v1, hdr, type, 0x0C, 0, 4); + +static void +mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + mlxsw_tx_v1_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); + mlxsw_tx_v1_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_v1_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_v1_hdr_swid_set(txhdr, 0); + mlxsw_tx_v1_hdr_control_tclass_set(txhdr, 1); + mlxsw_tx_v1_hdr_port_mid_set(txhdr, tx_info->local_port); + mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); +} + +static int +mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port, + bool is_up) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib; + char paos_pl[MLXSW_REG_PAOS_LEN]; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sib_port->local_port, + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : + MLXSW_PORT_ADMIN_STATUS_DOWN); + return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(paos), paos_pl); +} + +static int mlxsw_sib_port_mtu_set(struct mlxsw_sib_port *mlxsw_sib_port, + u16 mtu) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib; + char pmtu_pl[MLXSW_REG_PMTU_LEN]; + int max_mtu; + int err; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl); + if (err) + return err; + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); + + if (mtu > max_mtu) + return -EINVAL; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sib_port->local_port, mtu); + return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pmtu), pmtu_pl); +} + +static int mlxsw_sib_port_set(struct mlxsw_sib_port *mlxsw_sib_port, u8 port) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib; + char plib_pl[MLXSW_REG_PLIB_LEN] = {0}; + int err; + + mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sib_port->local_port); + mlxsw_reg_plib_ib_port_set(plib_pl, port); + err = mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(plib), plib_pl); + return err; +} + +static int mlxsw_sib_port_swid_set(struct mlxsw_sib_port *mlxsw_sib_port, + u8 swid) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib; + char pspa_pl[MLXSW_REG_PSPA_LEN]; + + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sib_port->local_port); + return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(pspa), pspa_pl); +} + +static int mlxsw_sib_port_module_info_get(struct mlxsw_sib *mlxsw_sib, + u8 local_port, u8 *p_module, + u8 *p_width) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); + return 0; +} + +static int mlxsw_sib_port_speed_set(struct mlxsw_sib_port *mlxsw_sib_port, + u16 speed, u16 width) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_sib_port->mlxsw_sib; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + + mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sib_port->local_port, speed, + width); + return mlxsw_reg_write(mlxsw_sib->core, MLXSW_REG(ptys), ptys_pl); +} + +static bool mlxsw_sib_port_created(struct mlxsw_sib *mlxsw_sib, u8 local_port) +{ + return mlxsw_sib->ports[local_port] != NULL; +} + +static int __mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port, + u8 module, u8 width) +{ + struct mlxsw_sib_port *mlxsw_sib_port; + int err; + + mlxsw_sib_port = kzalloc(sizeof(*mlxsw_sib_port), GFP_KERNEL); + if (!mlxsw_sib_port) + return -ENOMEM; + mlxsw_sib_port->mlxsw_sib = mlxsw_sib; + mlxsw_sib_port->local_port = local_port; + mlxsw_sib_port->mapping.module = module; + + err = mlxsw_sib_port_swid_set(mlxsw_sib_port, 0); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sib_port->local_port); + goto err_port_swid_set; + } + + /* Expose the IB port number as it's front panel name */ + err = mlxsw_sib_port_set(mlxsw_sib_port, module + 1); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set IB port\n", + mlxsw_sib_port->local_port); + goto err_port_ib_set; + } + + /* Supports all speeds from SDR to FDR (bitmask) and support bus width + * of 1x, 2x and 4x (3 bits bitmask) + */ + err = mlxsw_sib_port_speed_set(mlxsw_sib_port, + MLXSW_REG_PTYS_IB_SPEED_EDR - 1, + BIT(3) - 1); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set speed\n", + mlxsw_sib_port->local_port); + goto err_port_speed_set; + } + + /* Change to the maximum MTU the device supports, the SMA will take + * care of the active MTU + */ + err = mlxsw_sib_port_mtu_set(mlxsw_sib_port, MLXSW_IB_DEFAULT_MTU); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sib_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sib_port_admin_status_set(mlxsw_sib_port, true); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to change admin state to UP\n", + mlxsw_sib_port->local_port); + goto err_port_admin_set; + } + + mlxsw_core_port_ib_set(mlxsw_sib->core, mlxsw_sib_port->local_port, + mlxsw_sib_port); + mlxsw_sib->ports[local_port] = mlxsw_sib_port; + return 0; + +err_port_admin_set: +err_port_mtu_set: +err_port_speed_set: +err_port_ib_set: + mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: + kfree(mlxsw_sib_port); + return err; +} + +static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port, + u8 module, u8 width) +{ + int err; + + err = mlxsw_core_port_init(mlxsw_sib->core, local_port); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n", + local_port); + return err; + } + err = __mlxsw_sib_port_create(mlxsw_sib, local_port, module, width); + if (err) + goto err_port_create; + + return 0; + +err_port_create: + mlxsw_core_port_fini(mlxsw_sib->core, local_port); + return err; +} + +static void __mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port) +{ + struct mlxsw_sib_port *mlxsw_sib_port = mlxsw_sib->ports[local_port]; + + mlxsw_core_port_clear(mlxsw_sib->core, local_port, mlxsw_sib); + mlxsw_sib->ports[local_port] = NULL; + mlxsw_sib_port_admin_status_set(mlxsw_sib_port, false); + mlxsw_sib_port_swid_set(mlxsw_sib_port, MLXSW_PORT_SWID_DISABLED_PORT); + kfree(mlxsw_sib_port); +} + +static void mlxsw_sib_port_remove(struct mlxsw_sib *mlxsw_sib, u8 local_port) +{ + __mlxsw_sib_port_remove(mlxsw_sib, local_port); + mlxsw_core_port_fini(mlxsw_sib->core, local_port); +} + +static void mlxsw_sib_ports_remove(struct mlxsw_sib *mlxsw_sib) +{ + int i; + + for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++) + if (mlxsw_sib_port_created(mlxsw_sib, i)) + mlxsw_sib_port_remove(mlxsw_sib, i); + kfree(mlxsw_sib->ports); +} + +static int mlxsw_sib_ports_create(struct mlxsw_sib *mlxsw_sib) +{ + size_t alloc_size; + u8 module, width; + int i; + int err; + + alloc_size = sizeof(struct mlxsw_sib_port *) * MLXSW_PORT_MAX_IB_PORTS; + mlxsw_sib->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_sib->ports) + return -ENOMEM; + + for (i = 1; i < MLXSW_PORT_MAX_IB_PORTS; i++) { + err = mlxsw_sib_port_module_info_get(mlxsw_sib, i, &module, + &width); + if (err) + goto err_port_module_info_get; + if (!width) + continue; + err = mlxsw_sib_port_create(mlxsw_sib, i, module, width); + if (err) + goto err_port_create; + } + return 0; + +err_port_create: +err_port_module_info_get: + for (i--; i >= 1; i--) + if (mlxsw_sib_port_created(mlxsw_sib, i)) + mlxsw_sib_port_remove(mlxsw_sib, i); + kfree(mlxsw_sib->ports); + return err; +} + +static void +mlxsw_sib_pude_ib_event_func(struct mlxsw_sib_port *mlxsw_sib_port, + enum mlxsw_reg_pude_oper_status status) +{ + if (status == MLXSW_PORT_OPER_STATUS_UP) + pr_info("ib link for port %d - up\n", + mlxsw_sib_port->mapping.module + 1); + else + pr_info("ib link for port %d - down\n", + mlxsw_sib_port->mapping.module + 1); +} + +static void mlxsw_sib_pude_event_func(const struct mlxsw_reg_info *reg, + char *pude_pl, void *priv) +{ + struct mlxsw_sib *mlxsw_sib = priv; + struct mlxsw_sib_port *mlxsw_sib_port; + enum mlxsw_reg_pude_oper_status status; + u8 local_port; + + local_port = mlxsw_reg_pude_local_port_get(pude_pl); + mlxsw_sib_port = mlxsw_sib->ports[local_port]; + if (!mlxsw_sib_port) { + dev_warn(mlxsw_sib->bus_info->dev, "Port %d: Link event received for non-existent port\n", + local_port); + return; + } + + status = mlxsw_reg_pude_oper_status_get(pude_pl); + mlxsw_sib_pude_ib_event_func(mlxsw_sib_port, status); +} + +static const struct mlxsw_listener mlxsw_sib_listener[] = { + MLXSW_EVENTL(mlxsw_sib_pude_event_func, PUDE, EMAD), +}; + +static int mlxsw_sib_taps_init(struct mlxsw_sib *mlxsw_sib) +{ + int i; + int err; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) { + err = mlxsw_core_trap_register(mlxsw_sib->core, + &mlxsw_sib_listener[i], + mlxsw_sib); + if (err) + goto err_rx_listener_register; + } + + return 0; + +err_rx_listener_register: + for (i--; i >= 0; i--) { + mlxsw_core_trap_unregister(mlxsw_sib->core, + &mlxsw_sib_listener[i], + mlxsw_sib); + } + + return err; +} + +static void mlxsw_sib_traps_fini(struct mlxsw_sib *mlxsw_sib) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sib_listener); i++) { + mlxsw_core_trap_unregister(mlxsw_sib->core, + &mlxsw_sib_listener[i], mlxsw_sib); + } +} + +static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_REG_HTGT_INVALID_POLICER, + MLXSW_REG_HTGT_DEFAULT_PRIORITY, + MLXSW_REG_HTGT_DEFAULT_TC); + mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS); + mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SIB_EMAD); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); +} + +static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core); + int err; + + mlxsw_sib->core = mlxsw_core; + mlxsw_sib->bus_info = mlxsw_bus_info; + + err = mlxsw_sib_ports_create(mlxsw_sib); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n"); + return err; + } + + err = mlxsw_sib_taps_init(mlxsw_sib); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Failed to set traps\n"); + goto err_traps_init_err; + } + + return 0; + +err_traps_init_err: + mlxsw_sib_ports_remove(mlxsw_sib); + return err; +} + +static void mlxsw_sib_fini(struct mlxsw_core *mlxsw_core) +{ + struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sib_traps_fini(mlxsw_sib); + mlxsw_sib_ports_remove(mlxsw_sib); +} + +static struct mlxsw_config_profile mlxsw_sib_config_profile = { + .used_max_system_port = 1, + .max_system_port = 48000, + .used_max_ib_mc = 1, + .max_ib_mc = 27, + .used_max_pkey = 1, + .max_pkey = 32, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_IB, + } + }, + .resource_query_enable = 0, +}; + +static struct mlxsw_driver mlxsw_sib_driver = { + .kind = mlxsw_sib_driver_name, + .priv_size = sizeof(struct mlxsw_sib), + .init = mlxsw_sib_init, + .fini = mlxsw_sib_fini, + .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set, + .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sib_config_profile, +}; + +static struct mlxsw_driver mlxsw_sib2_driver = { + .kind = mlxsw_sib2_driver_name, + .priv_size = sizeof(struct mlxsw_sib), + .init = mlxsw_sib_init, + .fini = mlxsw_sib_fini, + .basic_trap_groups_set = mlxsw_sib_basic_trap_groups_set, + .txhdr_construct = mlxsw_sib_tx_v1_hdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sib_config_profile, +}; + +static const struct pci_device_id mlxsw_sib_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sib_pci_driver = { + .name = mlxsw_sib_driver_name, + .id_table = mlxsw_sib_pci_id_table, +}; + +static const struct pci_device_id mlxsw_sib2_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHIB2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sib2_pci_driver = { + .name = mlxsw_sib2_driver_name, + .id_table = mlxsw_sib2_pci_id_table, +}; + +static int __init mlxsw_sib_module_init(void) +{ + int err; + + err = mlxsw_core_driver_register(&mlxsw_sib_driver); + if (err) + return err; + + err = mlxsw_core_driver_register(&mlxsw_sib2_driver); + if (err) + goto err_sib2_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sib_pci_driver); + if (err) + goto err_sib_pci_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sib2_pci_driver); + if (err) + goto err_sib2_pci_driver_register; + + return 0; + +err_sib2_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver); +err_sib_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sib2_driver); +err_sib2_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sib_driver); + return err; +} + +static void __exit mlxsw_sib_module_exit(void) +{ + mlxsw_pci_driver_unregister(&mlxsw_sib2_pci_driver); + mlxsw_pci_driver_unregister(&mlxsw_sib_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sib2_driver); + mlxsw_core_driver_unregister(&mlxsw_sib_driver); +} + +module_init(mlxsw_sib_module_init); +module_exit(mlxsw_sib_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Elad Raz <eladr@@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox SwitchIB and SwitchIB-2 driver"); +MODULE_ALIAS("mlxsw_switchib2"); +MODULE_DEVICE_TABLE(pci, mlxsw_sib_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sib2_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 92bda8703f87..150ccf5192a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -3,7 +3,7 @@ * Copyright (c) 2015 Mellanox Technologies. All rights reserved. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> - * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,6 +37,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> +#include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/slab.h> @@ -44,13 +45,14 @@ #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <net/switchdev.h> -#include <generated/utsrelease.h> +#include "pci.h" #include "core.h" #include "reg.h" #include "port.h" #include "trap.h" #include "txheader.h" +#include "ib.h" static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2"; static const char mlxsw_sx_driver_version[] = "1.0"; @@ -74,11 +76,13 @@ struct mlxsw_sx_port_pcpu_stats { }; struct mlxsw_sx_port { - struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sx *mlxsw_sx; u8 local_port; + struct { + u8 module; + } mapping; }; /* tx_hdr_version @@ -214,14 +218,14 @@ static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port, return 0; } -static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu) +static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 mtu) { struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; char pmtu_pl[MLXSW_REG_PMTU_LEN]; int max_mtu; int err; - mtu += MLXSW_TXHDR_LEN + ETH_HLEN; mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); if (err) @@ -235,6 +239,32 @@ static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); } +static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 mtu) +{ + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; + return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); +} + +static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 mtu) +{ + return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); +} + +static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port, + u8 ib_port) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char plib_pl[MLXSW_REG_PLIB_LEN] = {0}; + int err; + + mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port); + mlxsw_reg_plib_ib_port_set(plib_pl, ib_port); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl); + return err; +} + static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid) { struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; @@ -254,18 +284,19 @@ mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl); } -static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port, - bool *p_usable) +static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx, + u8 local_port, u8 *p_module, + u8 *p_width) { - struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; char pmlp_pl[MLXSW_REG_PMLP_LEN]; int err; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl); if (err) return err; - *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); return 0; } @@ -343,7 +374,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); int err; - err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); + err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu); if (err) return err; dev->mtu = mtu; @@ -382,12 +413,26 @@ mlxsw_sx_port_get_stats64(struct net_device *dev, return stats; } +static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, + size_t len) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + int err; + + err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1); + if (err >= len) + return -EINVAL; + + return 0; +} + static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_open = mlxsw_sx_port_open, .ndo_stop = mlxsw_sx_port_stop, .ndo_start_xmit = mlxsw_sx_port_xmit, .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, + .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, @@ -410,7 +455,7 @@ static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, struct mlxsw_sx_port_hw_stats { char str[ETH_GSTRING_LEN]; - u64 (*getter)(char *payload); + u64 (*getter)(const char *payload); }; static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { @@ -642,6 +687,7 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { }; #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode) +#define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */ static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto) { @@ -741,14 +787,14 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev, u32 eth_proto_oper; int err; - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); return err; } - mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, - ð_proto_admin, ð_proto_oper); + mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | @@ -789,6 +835,18 @@ static u32 mlxsw_sx_to_ptys_speed(u32 speed) return ptys_proto; } +static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sx_port_link_mode[i].speed <= upper_speed) + ptys_proto |= mlxsw_sx_port_link_mode[i].mask; + } + return ptys_proto; +} + static int mlxsw_sx_port_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -808,13 +866,14 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev, mlxsw_sx_to_ptys_advert_link(cmd->advertising) : mlxsw_sx_to_ptys_speed(speed); - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); return err; } - mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); + mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, + NULL); eth_proto_new = eth_proto_new & eth_proto_cap; if (!eth_proto_new) { @@ -824,7 +883,8 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev, if (eth_proto_new == eth_proto_admin) return 0; - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, + eth_proto_new); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to set proto admin"); @@ -888,7 +948,7 @@ static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = { static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) { - char spad_pl[MLXSW_REG_SPAD_LEN]; + char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; int err; err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl); @@ -935,13 +995,28 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, return err; } -static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port, - u32 speed) +static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 speed, u16 width) { struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; char ptys_pl[MLXSW_REG_PTYS_LEN]; - mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed); + mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed, + width); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); +} + +static int +mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_admin; + + eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, + eth_proto_admin); return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); } @@ -956,20 +1031,22 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl); } -static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) +static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, + u8 module, u8 width) { struct mlxsw_sx_port *mlxsw_sx_port; struct net_device *dev; - bool usable; int err; dev = alloc_etherdev(sizeof(struct mlxsw_sx_port)); if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev); mlxsw_sx_port = netdev_priv(dev); mlxsw_sx_port->dev = dev; mlxsw_sx_port->mlxsw_sx = mlxsw_sx; mlxsw_sx_port->local_port = local_port; + mlxsw_sx_port->mapping.module = module; mlxsw_sx_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats); @@ -994,24 +1071,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_VLAN_CHALLENGED; + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU; + /* Each packet needs to have a Tx header (metadata) on top all other * headers. */ dev->needed_headroom = MLXSW_TXHDR_LEN; - err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable); - if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n", - mlxsw_sx_port->local_port); - goto err_port_module_check; - } - - if (!usable) { - dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n", - mlxsw_sx_port->local_port); - goto port_not_usable; - } - err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1026,15 +1093,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_port_swid_set; } - err = mlxsw_sx_port_speed_set(mlxsw_sx_port, - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4); + err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", mlxsw_sx_port->local_port); goto err_port_speed_set; } - err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN); + err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", mlxsw_sx_port->local_port); @@ -1069,19 +1135,11 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_register_netdev; } - err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port, - mlxsw_sx_port->local_port, dev, false, 0); - if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", - mlxsw_sx_port->local_port); - goto err_core_port_init; - } - + mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port, + mlxsw_sx_port, dev, false, 0); mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; -err_core_port_init: - unregister_netdev(dev); err_register_netdev: err_port_mac_learning_mode_set: err_port_stp_state_set: @@ -1091,8 +1149,6 @@ err_port_speed_set: mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: err_port_system_port_mapping_set: -port_not_usable: -err_port_module_check: err_dev_addr_get: free_percpu(mlxsw_sx_port->pcpu_stats); err_alloc_stats: @@ -1100,31 +1156,168 @@ err_alloc_stats: return err; } -static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, + u8 module, u8 width) +{ + int err; + + err = mlxsw_core_port_init(mlxsw_sx->core, local_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", + local_port); + return err; + } + err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width); + if (err) + goto err_port_create; + + return 0; + +err_port_create: + mlxsw_core_port_fini(mlxsw_sx->core, local_port); + return err; +} + +static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) { struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; - if (!mlxsw_sx_port) - return; - mlxsw_core_port_fini(&mlxsw_sx_port->core_port); + mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx); unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ + mlxsw_sx->ports[local_port] = NULL; mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); free_percpu(mlxsw_sx_port->pcpu_stats); free_netdev(mlxsw_sx_port->dev); } +static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + return mlxsw_sx->ports[local_port] != NULL; +} + +static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, + u8 module, u8 width) +{ + struct mlxsw_sx_port *mlxsw_sx_port; + int err; + + mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL); + if (!mlxsw_sx_port) + return -ENOMEM; + mlxsw_sx_port->mlxsw_sx = mlxsw_sx; + mlxsw_sx_port->local_port = local_port; + mlxsw_sx_port->mapping.module = module; + + err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", + mlxsw_sx_port->local_port); + goto err_port_system_port_mapping_set; + } + + /* Adding port to Infiniband swid (1) */ + err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sx_port->local_port); + goto err_port_swid_set; + } + + /* Expose the IB port number as it's front panel name */ + err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n", + mlxsw_sx_port->local_port); + goto err_port_ib_set; + } + + /* Supports all speeds from SDR to FDR (bitmask) and support bus width + * of 1x, 2x and 4x (3 bits bitmask) + */ + err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port, + MLXSW_REG_PTYS_IB_SPEED_EDR - 1, + BIT(3) - 1); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", + mlxsw_sx_port->local_port); + goto err_port_speed_set; + } + + /* Change to the maximum MTU the device supports, the SMA will take + * care of the active MTU + */ + err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sx_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n", + mlxsw_sx_port->local_port); + goto err_port_admin_set; + } + + mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port, + mlxsw_sx_port); + mlxsw_sx->ports[local_port] = mlxsw_sx_port; + return 0; + +err_port_admin_set: +err_port_mtu_set: +err_port_speed_set: +err_port_ib_set: + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: +err_port_system_port_mapping_set: + kfree(mlxsw_sx_port); + return err; +} + +static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + + mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx); + mlxsw_sx->ports[local_port] = NULL; + mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); + kfree(mlxsw_sx_port); +} + +static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + enum devlink_port_type port_type = + mlxsw_core_port_type_get(mlxsw_sx->core, local_port); + + if (port_type == DEVLINK_PORT_TYPE_ETH) + __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port); + else if (port_type == DEVLINK_PORT_TYPE_IB) + __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port); +} + +static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + __mlxsw_sx_port_remove(mlxsw_sx, local_port); + mlxsw_core_port_fini(mlxsw_sx->core, local_port); +} + static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) { int i; for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) - mlxsw_sx_port_remove(mlxsw_sx, i); + if (mlxsw_sx_port_created(mlxsw_sx, i)) + mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); } static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) { size_t alloc_size; + u8 module, width; int i; int err; @@ -1134,25 +1327,57 @@ static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) return -ENOMEM; for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { - err = mlxsw_sx_port_create(mlxsw_sx, i); + err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module, + &width); + if (err) + goto err_port_module_info_get; + if (!width) + continue; + err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width); if (err) goto err_port_create; } return 0; err_port_create: +err_port_module_info_get: for (i--; i >= 1; i--) - mlxsw_sx_port_remove(mlxsw_sx, i); + if (mlxsw_sx_port_created(mlxsw_sx, i)) + mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); return err; } +static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port, + enum mlxsw_reg_pude_oper_status status) +{ + if (status == MLXSW_PORT_OPER_STATUS_UP) { + netdev_info(mlxsw_sx_port->dev, "link up\n"); + netif_carrier_on(mlxsw_sx_port->dev); + } else { + netdev_info(mlxsw_sx_port->dev, "link down\n"); + netif_carrier_off(mlxsw_sx_port->dev); + } +} + +static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port, + enum mlxsw_reg_pude_oper_status status) +{ + if (status == MLXSW_PORT_OPER_STATUS_UP) + pr_info("ib link for port %d - up\n", + mlxsw_sx_port->mapping.module + 1); + else + pr_info("ib link for port %d - down\n", + mlxsw_sx_port->mapping.module + 1); +} + static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { struct mlxsw_sx *mlxsw_sx = priv; struct mlxsw_sx_port *mlxsw_sx_port; enum mlxsw_reg_pude_oper_status status; + enum devlink_port_type port_type; u8 local_port; local_port = mlxsw_reg_pude_local_port_get(pude_pl); @@ -1164,59 +1389,11 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, } status = mlxsw_reg_pude_oper_status_get(pude_pl); - if (status == MLXSW_PORT_OPER_STATUS_UP) { - netdev_info(mlxsw_sx_port->dev, "link up\n"); - netif_carrier_on(mlxsw_sx_port->dev); - } else { - netdev_info(mlxsw_sx_port->dev, "link down\n"); - netif_carrier_off(mlxsw_sx_port->dev); - } -} - -static struct mlxsw_event_listener mlxsw_sx_pude_event = { - .func = mlxsw_sx_pude_event_func, - .trap_id = MLXSW_TRAP_ID_PUDE, -}; - -static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, - enum mlxsw_event_trap_id trap_id) -{ - struct mlxsw_event_listener *el; - char hpkt_pl[MLXSW_REG_HPKT_LEN]; - int err; - - switch (trap_id) { - case MLXSW_TRAP_ID_PUDE: - el = &mlxsw_sx_pude_event; - break; - } - err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx); - if (err) - return err; - - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); - err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); - if (err) - goto err_event_trap_set; - - return 0; - -err_event_trap_set: - mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); - return err; -} - -static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx, - enum mlxsw_event_trap_id trap_id) -{ - struct mlxsw_event_listener *el; - - switch (trap_id) { - case MLXSW_TRAP_ID_PUDE: - el = &mlxsw_sx_pude_event; - break; - } - mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); + port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port); + if (port_type == DEVLINK_PORT_TYPE_ETH) + mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status); + else if (port_type == DEVLINK_PORT_TYPE_IB) + mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status); } static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, @@ -1244,142 +1421,110 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, netif_receive_skb(skb); } -static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = { - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_FDB_MC, - }, - /* Traps for specific L2 packet types, not trapped as FDB MC */ - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_STP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LACP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_EAPOL, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LLDP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MMRP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MVRP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_RPVST, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_DHCP, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, - }, - { - .func = mlxsw_sx_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, - }, +static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port, + enum devlink_port_type new_type) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); + u8 module, width; + int err; + + if (new_type == DEVLINK_PORT_TYPE_AUTO) + return -EOPNOTSUPP; + + __mlxsw_sx_port_remove(mlxsw_sx, local_port); + err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module, + &width); + if (err) + goto err_port_module_info_get; + + if (new_type == DEVLINK_PORT_TYPE_ETH) + err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, + width); + else if (new_type == DEVLINK_PORT_TYPE_IB) + err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module, + width); + +err_port_module_info_get: + return err; +} + +#define MLXSW_SX_RXL(_trap_id) \ + MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \ + false, SX2_RX, FORWARD) + +static const struct mlxsw_listener mlxsw_sx_listener[] = { + MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD), + MLXSW_SX_RXL(FDB_MC), + MLXSW_SX_RXL(STP), + MLXSW_SX_RXL(LACP), + MLXSW_SX_RXL(EAPOL), + MLXSW_SX_RXL(LLDP), + MLXSW_SX_RXL(MMRP), + MLXSW_SX_RXL(MVRP), + MLXSW_SX_RXL(RPVST), + MLXSW_SX_RXL(DHCP), + MLXSW_SX_RXL(IGMP_QUERY), + MLXSW_SX_RXL(IGMP_V1_REPORT), + MLXSW_SX_RXL(IGMP_V2_REPORT), + MLXSW_SX_RXL(IGMP_V2_LEAVE), + MLXSW_SX_RXL(IGMP_V3_REPORT), }; static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) { char htgt_pl[MLXSW_REG_HTGT_LEN]; - char hpkt_pl[MLXSW_REG_HPKT_LEN]; int i; int err; - mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX, + MLXSW_REG_HTGT_INVALID_POLICER, + MLXSW_REG_HTGT_DEFAULT_PRIORITY, + MLXSW_REG_HTGT_DEFAULT_TC); + mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); if (err) return err; - mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL, + MLXSW_REG_HTGT_INVALID_POLICER, + MLXSW_REG_HTGT_DEFAULT_PRIORITY, + MLXSW_REG_HTGT_DEFAULT_TC); + mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); if (err) return err; - for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { - err = mlxsw_core_rx_listener_register(mlxsw_sx->core, - &mlxsw_sx_rx_listener[i], - mlxsw_sx); + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) { + err = mlxsw_core_trap_register(mlxsw_sx->core, + &mlxsw_sx_listener[i], + mlxsw_sx); if (err) - goto err_rx_listener_register; + goto err_listener_register; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - mlxsw_sx_rx_listener[i].trap_id); - err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); - if (err) - goto err_rx_trap_set; } return 0; -err_rx_trap_set: - mlxsw_core_rx_listener_unregister(mlxsw_sx->core, - &mlxsw_sx_rx_listener[i], - mlxsw_sx); -err_rx_listener_register: +err_listener_register: for (i--; i >= 0; i--) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - mlxsw_sx_rx_listener[i].trap_id); - mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); - - mlxsw_core_rx_listener_unregister(mlxsw_sx->core, - &mlxsw_sx_rx_listener[i], - mlxsw_sx); + mlxsw_core_trap_unregister(mlxsw_sx->core, + &mlxsw_sx_listener[i], + mlxsw_sx); } return err; } static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) { - char hpkt_pl[MLXSW_REG_HPKT_LEN]; int i; - for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - mlxsw_sx_rx_listener[i].trap_id); - mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); - - mlxsw_core_rx_listener_unregister(mlxsw_sx->core, - &mlxsw_sx_rx_listener[i], - mlxsw_sx); + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) { + mlxsw_core_trap_unregister(mlxsw_sx->core, + &mlxsw_sx_listener[i], + mlxsw_sx); } } @@ -1451,6 +1596,20 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); } +static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_REG_HTGT_INVALID_POLICER, + MLXSW_REG_HTGT_DEFAULT_PRIORITY, + MLXSW_REG_HTGT_DEFAULT_TC); + mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS); + mlxsw_reg_htgt_local_path_rdq_set(htgt_pl, + MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); +} + static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -1472,16 +1631,10 @@ static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, return err; } - err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE); - if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n"); - goto err_event_register; - } - err = mlxsw_sx_traps_init(mlxsw_sx); if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n"); - goto err_rx_listener_register; + dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n"); + goto err_listener_register; } err = mlxsw_sx_flood_init(mlxsw_sx); @@ -1494,9 +1647,7 @@ static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, err_flood_init: mlxsw_sx_traps_fini(mlxsw_sx); -err_rx_listener_register: - mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); -err_event_register: +err_listener_register: mlxsw_sx_ports_remove(mlxsw_sx); return err; } @@ -1506,7 +1657,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sx_traps_fini(mlxsw_sx); - mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); mlxsw_sx_ports_remove(mlxsw_sx); } @@ -1529,36 +1679,66 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = { .used_flood_mode = 1, .flood_mode = 3, .used_max_ib_mc = 1, - .max_ib_mc = 0, + .max_ib_mc = 6, .used_max_pkey = 1, .max_pkey = 0, .swid_config = { { .used_type = 1, .type = MLXSW_PORT_SWID_TYPE_ETH, + }, + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_IB, } }, .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { - .kind = MLXSW_DEVICE_KIND_SWITCHX2, - .owner = THIS_MODULE, + .kind = mlxsw_sx_driver_name, .priv_size = sizeof(struct mlxsw_sx), .init = mlxsw_sx_init, .fini = mlxsw_sx_fini, + .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set, .txhdr_construct = mlxsw_sx_txhdr_construct, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sx_config_profile, + .port_type_set = mlxsw_sx_port_type_set, +}; + +static const struct pci_device_id mlxsw_sx_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sx_pci_driver = { + .name = mlxsw_sx_driver_name, + .id_table = mlxsw_sx_pci_id_table, }; static int __init mlxsw_sx_module_init(void) { - return mlxsw_core_driver_register(&mlxsw_sx_driver); + int err; + + err = mlxsw_core_driver_register(&mlxsw_sx_driver); + if (err) + return err; + + err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver); + if (err) + goto err_pci_driver_register; + + return 0; + +err_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sx_driver); + return err; } static void __exit mlxsw_sx_module_exit(void) { + mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sx_driver); } @@ -1568,4 +1748,4 @@ module_exit(mlxsw_sx_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox SwitchX-2 driver"); -MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2); +MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index ed8e30186400..7ab275deacac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -62,6 +62,7 @@ enum { MLXSW_TRAP_ID_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_BGP_IPV4 = 0x88, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, MLXSW_TRAP_ID_MAX = 0x1FF |