diff options
28 files changed, 227 insertions, 124 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 852a6a75db98..2ab505d1e8e3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -439,7 +439,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); u8 atomic_req_8B_endianness_mode = - MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); + MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 66bd213f35ce..3c95f7f53802 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -274,7 +274,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) } EXPORT_SYMBOL_GPL(mlx5_db_free); - void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) { u64 addr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 10d282841f5b..4d5bd01f1ebb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -217,7 +217,6 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent) kfree(ent); } - static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_mailbox *next = ent->out->next; @@ -786,6 +785,8 @@ static void cmd_work_handler(struct work_struct *work) struct mlx5_cmd_layout *lay; struct semaphore *sem; unsigned long flags; + bool poll_cmd = ent->polling; + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -846,7 +847,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING) { + if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); @@ -874,7 +875,7 @@ static const char *deliv_status_to_str(u8 status) case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: return "command input length error"; case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: - return "command ouput length error"; + return "command output length error"; case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return "reserved fields not cleared"; case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: @@ -890,7 +891,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) struct mlx5_cmd *cmd = &dev->cmd; int err; - if (cmd->mode == CMD_MODE_POLLING) { + if (cmd->mode == CMD_MODE_POLLING || ent->polling) { wait_for_completion(&ent->done); } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ent->ret = -ETIMEDOUT; @@ -918,7 +919,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status, - u8 token) + u8 token, bool force_polling) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -936,6 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, return PTR_ERR(ent); ent->token = token; + ent->polling = force_polling; if (!callback) init_completion(&ent->done); @@ -1001,7 +1003,6 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf, return err ? err : count; } - static const struct file_operations fops = { .owner = THIS_MODULE, .open = simple_open, @@ -1153,7 +1154,7 @@ err_alloc: } static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, - struct mlx5_cmd_msg *msg) + struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *head = msg->next; struct mlx5_cmd_mailbox *next; @@ -1537,7 +1538,8 @@ static int is_manage_pages(void *in) } static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, - int out_size, mlx5_cmd_cbk_t callback, void *context) + int out_size, mlx5_cmd_cbk_t callback, void *context, + bool force_polling) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; @@ -1582,7 +1584,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status, token); + pages_queue, &status, token, force_polling); if (err) goto out_out; @@ -1610,7 +1612,7 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, { int err; - err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); + err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); return err ? : mlx5_cmd_check(dev, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); @@ -1619,10 +1621,22 @@ int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { - return cmd_exec(dev, in, in_size, out, out_size, callback, context); + return cmd_exec(dev, in, in_size, out, out_size, callback, context, + false); } EXPORT_SYMBOL(mlx5_cmd_exec_cb); +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size) +{ + int err; + + err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + + return err ? : mlx5_cmd_check(dev, in, out); +} +EXPORT_SYMBOL(mlx5_cmd_exec_polling); + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index de40b6cfee95..7ecadb501743 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -168,7 +168,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, return ret; } - static ssize_t average_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { @@ -466,7 +465,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, return -EINVAL; } - if (is_str) ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field); else @@ -562,7 +560,6 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) rem_res_tree(qp->dbg); } - int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a0516b0a5273..8094e78292de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -822,7 +822,7 @@ void mlx5e_rx_am(struct mlx5e_rq *rq); void mlx5e_rx_am_work(struct work_struct *work); struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); -void mlx5e_update_stats(struct mlx5e_priv *priv); +void mlx5e_update_stats(struct mlx5e_priv *priv, bool full); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index f4017c06ddd2..12d3ced61114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -178,6 +178,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, struct mlx5_flow_destination dest; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; + enum mlx5e_traffic_types tt; int err = 0; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); @@ -187,24 +188,16 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - switch (type) { - case ARFS_IPV4_TCP: - dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn; - break; - case ARFS_IPV4_UDP: - dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn; - break; - case ARFS_IPV6_TCP: - dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn; - break; - case ARFS_IPV6_UDP: - dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn; - break; - default: + tt = arfs_get_tt(type); + if (tt == -EINVAL) { + netdev_err(priv->netdev, "%s: bad arfs_type: %d\n", + __func__, type); err = -EINVAL; goto out; } + dest.tir_num = tir[tt].tirn; + arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec, &flow_act, &dest, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 46e56ec4c26f..ece3fb147e3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -145,7 +145,6 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) int inlen; void *in; - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); if (!in) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b4514f247402..216752070391 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -311,7 +311,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_update_stats(priv); + mlx5e_update_stats(priv, true); channels = &priv->channels; mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 7acc4fba7ece..dfccb5305e9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -170,7 +170,6 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: rule_p = &priv->fs.vlan.untagged_rule; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c8f3aefe735d..06eb7a8b487c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -124,7 +124,8 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv) u8 port_state; port_state = mlx5_query_vport_state(mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, + 0); if (port_state == VPORT_STATE_UP) { netdev_info(priv->netdev, "Link up\n"); @@ -243,18 +244,14 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } -static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int prio; void *out; - u32 *in; - - in = kvzalloc(sz, GFP_KERNEL); - if (!in) - return; MLX5_SET(ppcnt_reg, in, local_port, 1); @@ -262,6 +259,9 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (!full) + return; + out = pstats->RFC_2863_counters; MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); @@ -287,52 +287,55 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } - - kvfree(in); } static void mlx5e_update_q_counter(struct mlx5e_priv *priv) { struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; + int err; if (!priv->q_counter) return; - mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter, - &qcnt->rx_out_of_buffer); + err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out)); + if (err) + return; + + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); } static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) { struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); void *out; - u32 *in; if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) return; - in = kvzalloc(sz, GFP_KERNEL); - if (!in) - return; - out = pcie_stats->pcie_perf_counters; MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); - - kvfree(in); } -void mlx5e_update_stats(struct mlx5e_priv *priv) +void mlx5e_update_stats(struct mlx5e_priv *priv, bool full) { - mlx5e_update_pcie_counters(priv); - mlx5e_update_pport_counters(priv); + if (full) + mlx5e_update_pcie_counters(priv); + mlx5e_update_pport_counters(priv, full); mlx5e_update_vport_counters(priv); mlx5e_update_q_counter(priv); mlx5e_update_sw_counters(priv); } +static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) +{ + mlx5e_update_stats(priv, false); +} + void mlx5e_update_stats_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); @@ -3067,7 +3070,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) */ stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); - } static void mlx5e_set_rx_mode(struct net_device *dev) @@ -3727,7 +3729,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); if (!MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_warn(mdev, "CQ modiration is not supported\n"); + mlx5_core_warn(mdev, "CQ moderation is not supported\n"); return 0; } @@ -3860,7 +3862,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* set CQE compression */ params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && - MLX5_CAP_GEN(mdev, vport_group_manager)) + MLX5_CAP_GEN(mdev, vport_group_manager)) params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); @@ -4211,7 +4213,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .cleanup_tx = mlx5e_cleanup_nic_tx, .enable = mlx5e_nic_enable, .disable = mlx5e_nic_disable, - .update_stats = mlx5e_update_stats, + .update_stats = mlx5e_update_ndo_stats, .max_nch = mlx5e_get_max_num_channels, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 70c2b8d020bd..01798e1ab667 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1019,7 +1019,6 @@ err_destroy_netdev: mlx5e_destroy_netdev(netdev_priv(netdev)); kfree(rpriv); return err; - } static void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index f81c3aa60b46..fda247587ff6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -268,7 +268,7 @@ static const struct counter_desc pport_2819_stats_desc[] = { }; static const struct counter_desc pport_phy_statistical_stats_desc[] = { - { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index ab3bb026ff9e..ef3e1918d8cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -245,7 +245,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 0ed8e90ba54f..af51a5d2b912 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_PAGE_FAULT"; case MLX5_EVENT_TYPE_PPS_EVENT: return "MLX5_EVENT_TYPE_PPS_EVENT"; + case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: + return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; case MLX5_EVENT_TYPE_FPGA_ERROR: return "MLX5_EVENT_TYPE_FPGA_ERROR"; default: @@ -189,7 +191,7 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm) { __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); - __raw_writel((__force u32) cpu_to_be32(val), addr); + __raw_writel((__force u32)cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } @@ -675,7 +677,6 @@ int mlx5_eq_init(struct mlx5_core_dev *dev) return err; } - void mlx5_eq_cleanup(struct mlx5_core_dev *dev) { mlx5_eq_debugfs_cleanup(dev); @@ -687,7 +688,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && MLX5_CAP_GEN(dev, vport_group_manager) && mlx5_core_is_pf(dev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 37927156f258..89bfda419efe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1217,7 +1217,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", vport->vport); return -EPERM; - } esw_vport_cleanup_ingress_rules(esw, vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3795943ef2d1..b8030b5707a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -691,7 +691,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, - &flow_act, &dest, 1); + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); goto out; @@ -1093,7 +1093,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) if (err) { esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); esw->offloads.encap = !encap; - (void) esw_create_offloads_fast_fdb_table(esw); + (void)esw_create_offloads_fast_fdb_table(esw); } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6380c2db355a..e8690fe46bf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -104,6 +104,7 @@ struct node_caps { size_t arr_sz; long *caps; }; + static struct init_tree_node { enum fs_node_type type; struct init_tree_node *children; @@ -1858,7 +1859,6 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) static int init_root_ns(struct mlx5_flow_steering *steering) { - steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); if (!steering->root_ns) goto cleanup; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 1bc14d0fded8..e9489e8d08bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -195,3 +195,31 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } + +int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + int force_state; + int ret; + + if (!MLX5_CAP_GEN(dev, force_teardown)) { + mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); + return -EOPNOTSUPP; + } + + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE); + + ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + force_state = MLX5_GET(teardown_hca_out, out, force_state); + if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { + mlx5_core_err(dev, "teardown with force mode failed\n"); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index c6679b21884e..0648a659b21d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -111,14 +111,14 @@ static int in_fatal(struct mlx5_core_dev *dev) return 0; } -void mlx5_enter_error_state(struct mlx5_core_dev *dev) +void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) { mutex_lock(&dev->intf_state_mutex); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock; mlx5_core_err(dev, "start\n"); - if (pci_channel_offline(dev->pdev) || in_fatal(dev)) { + if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; trigger_cmd_completions(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index b5d5519542e8..a3a836bdcfd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -61,6 +61,11 @@ struct mlx5_lag { struct lag_tracker tracker; struct delayed_work bond_work; struct notifier_block nb; + + /* Admin state. Allow lag only if allowed is true + * even if network conditions for lag were met + */ + bool allowed; }; /* General purpose, use for short periods of time. @@ -214,6 +219,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) struct lag_tracker tracker; u8 v2p_port1, v2p_port2; int i, err; + bool do_bond; if (!dev0 || !dev1) return; @@ -222,13 +228,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) tracker = ldev->tracker; mutex_unlock(&lag_mutex); - if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) { - if (mlx5_sriov_is_enabled(dev0) || - mlx5_sriov_is_enabled(dev1)) { - mlx5_core_warn(dev0, "LAG is not supported with SRIOV"); - return; - } + do_bond = tracker.is_bonded && ldev->allowed; + if (do_bond && !mlx5_lag_is_bonded(ldev)) { for (i = 0; i < MLX5_MAX_PORTS; i++) mlx5_remove_dev_by_protocol(ldev->pf[i].dev, MLX5_INTERFACE_PROTOCOL_IB); @@ -237,7 +239,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_nic_vport_enable_roce(dev1); - } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + } else if (do_bond && mlx5_lag_is_bonded(ldev)) { mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, &v2p_port2); @@ -252,7 +254,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) "Failed to modify LAG (%d)\n", err); } - } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + } else if (!do_bond && mlx5_lag_is_bonded(ldev)) { mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_nic_vport_disable_roce(dev1); @@ -411,6 +413,15 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, return NOTIFY_DONE; } +static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) +{ + if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) || + (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev))) + return false; + else + return true; +} + static struct mlx5_lag *mlx5_lag_dev_alloc(void) { struct mlx5_lag *ldev; @@ -420,6 +431,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void) return NULL; INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); + ldev->allowed = mlx5_lag_check_prereq(ldev); return ldev; } @@ -444,7 +456,9 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ldev->tracker.netdev_state[fn].link_up = 0; ldev->tracker.netdev_state[fn].tx_enabled = 0; + ldev->allowed = mlx5_lag_check_prereq(ldev); dev->priv.lag = ldev; + mutex_unlock(&lag_mutex); } @@ -464,10 +478,10 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); dev->priv.lag = NULL; + ldev->allowed = mlx5_lag_check_prereq(ldev); mutex_unlock(&lag_mutex); } - /* Must be called with intf_mutex held */ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) { @@ -543,6 +557,44 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_lag_is_active); +static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow) +{ + struct mlx5_lag *ldev; + int ret = 0; + bool lag_active; + + mlx5_dev_list_lock(); + + ldev = mlx5_lag_dev_get(dev); + if (!ldev) { + ret = -ENODEV; + goto unlock; + } + lag_active = mlx5_lag_is_bonded(ldev); + if (!mlx5_lag_check_prereq(ldev) && allow) { + ret = -EINVAL; + goto unlock; + } + if (ldev->allowed == allow) + goto unlock; + ldev->allowed = allow; + if ((lag_active && !allow) || allow) + mlx5_do_bond(ldev); +unlock: + mlx5_dev_list_unlock(); + return ret; +} + +int mlx5_lag_forbid(struct mlx5_core_dev *dev) +{ + return mlx5_lag_set_state(dev, false); +} + +int mlx5_lag_allow(struct mlx5_core_dev *dev) +{ + return mlx5_lag_set_state(dev, true); +} + struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) { struct net_device *ndev = NULL; @@ -586,4 +638,3 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) /* If bonded, we do not add an IB device for PF1. */ return false; } - diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index dc890944c4ea..715eeab59999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -356,12 +356,11 @@ static void mlx5_disable_msix(struct mlx5_core_dev *dev) kfree(priv->msix_arr); } -struct mlx5_reg_host_endianess { +struct mlx5_reg_host_endianness { u8 he; u8 rsvd[15]; }; - #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) enum { @@ -475,7 +474,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) req_endianness = MLX5_CAP_ATOMIC(dev, - supported_atomic_req_8B_endianess_mode_1); + supported_atomic_req_8B_endianness_mode_1); if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) return 0; @@ -487,7 +486,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); /* Set requestor to host endianness */ - MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, + MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode, MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC); @@ -562,8 +561,8 @@ query_ex: static int set_hca_ctrl(struct mlx5_core_dev *dev) { - struct mlx5_reg_host_endianess he_in; - struct mlx5_reg_host_endianess he_out; + struct mlx5_reg_host_endianness he_in; + struct mlx5_reg_host_endianness he_out; int err; if (!mlx5_core_is_pf(dev)) @@ -1419,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, dev_info(&pdev->dev, "%s was called\n", __func__); - mlx5_enter_error_state(dev); + mlx5_enter_error_state(dev, false); mlx5_unload_one(dev, priv, false); /* In case of kernel call drain the health wq */ if (state) { @@ -1506,15 +1505,43 @@ static const struct pci_error_handlers mlx5_err_handler = { .resume = mlx5_pci_resume }; +static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) +{ + int ret; + + if (!MLX5_CAP_GEN(dev, force_teardown)) { + mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); + return -EOPNOTSUPP; + } + + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); + return -EAGAIN; + } + + ret = mlx5_cmd_force_teardown_hca(dev); + if (ret) { + mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); + return ret; + } + + mlx5_enter_error_state(dev, true); + + return 0; +} + static void shutdown(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_priv *priv = &dev->priv; + int err; dev_info(&pdev->dev, "Shutdown was called\n"); /* Notify mlx5 clients that the kernel is being shut down */ set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); - mlx5_unload_one(dev, priv, false); + err = mlx5_try_fast_unload(dev); + if (err) + mlx5_unload_one(dev, priv, false); mlx5_pci_disable_device(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index cf69b42278df..5ccdf43e58a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -83,12 +83,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, struct mlx5_pagefault *pfault); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); -void mlx5_enter_error_state(struct mlx5_core_dev *dev); +void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_recover_device(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev); @@ -167,4 +168,7 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } +int mlx5_lag_allow(struct mlx5_core_dev *dev); +int mlx5_lag_forbid(struct mlx5_core_dev *dev); + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index efcded7ca27a..e36d3e3675f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -403,7 +403,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, for (i = 0; i < num_claimed; i++) free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); - if (nclaimed) *nclaimed = num_claimed; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 573a6b27fed8..340f281c9801 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -30,7 +30,6 @@ * SOFTWARE. */ - #include <linux/gfp.h> #include <linux/export.h> #include <linux/mlx5/cmd.h> @@ -519,23 +518,3 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); } EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); - -int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, - u32 *out_of_buffer) -{ - int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); - void *out; - int err; - - out = kvzalloc(outlen, GFP_KERNEL); - if (!out) - return -ENOMEM; - - err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen); - if (!err) - *out_of_buffer = MLX5_GET(query_q_counter_out, out, - out_of_buffer); - - kfree(out); - return err; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index e08627785590..bcdf7779c48d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -175,15 +175,20 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!mlx5_core_is_pf(dev)) return -EPERM; - if (num_vfs && mlx5_lag_is_active(dev)) { - mlx5_core_warn(dev, "can't turn sriov on while LAG is active"); - return -EINVAL; + if (num_vfs) { + int ret; + + ret = mlx5_lag_forbid(dev); + if (ret && (ret != -ENODEV)) + return ret; } - if (num_vfs) + if (num_vfs) { err = mlx5_sriov_enable(pdev, num_vfs); - else + } else { mlx5_sriov_disable(pdev); + mlx5_lag_allow(dev); + } return err ? err : num_vfs; } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6ea2f5734e37..bf15e87da8fa 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -817,6 +817,7 @@ struct mlx5_cmd_work_ent { u64 ts1; u64 ts2; u16 op; + bool polling; }; struct mlx5_pas { @@ -915,6 +916,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context); +int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 32b044e953d2..e86ef880a149 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -661,9 +661,9 @@ enum { struct mlx5_ifc_atomic_caps_bits { u8 reserved_at_0[0x40]; - u8 atomic_req_8B_endianess_mode[0x2]; + u8 atomic_req_8B_endianness_mode[0x2]; u8 reserved_at_42[0x4]; - u8 supported_atomic_req_8B_endianess_mode_1[0x1]; + u8 supported_atomic_req_8B_endianness_mode_1[0x1]; u8 reserved_at_47[0x19]; @@ -801,7 +801,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_indirection[0x8]; u8 fixed_buffer_size[0x1]; u8 log_max_mrw_sz[0x7]; - u8 reserved_at_110[0x2]; + u8 force_teardown[0x1]; + u8 reserved_at_111[0x1]; u8 log_max_bsf_list_size[0x6]; u8 umr_extended_translation_offset[0x1]; u8 null_mkey[0x1]; @@ -3094,18 +3095,25 @@ struct mlx5_ifc_tsar_element_bits { u8 reserved_at_10[0x10]; }; +enum { + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, + MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, +}; + struct mlx5_ifc_teardown_hca_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x3f]; + + u8 force_state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, - MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, + MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, }; struct mlx5_ifc_teardown_hca_in_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index bef80d0a0e30..1f637f4d1265 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -569,8 +569,6 @@ int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size); -int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, - u32 *out_of_buffer); static inline const char *mlx5_qp_type_str(int type) { |