diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-21 20:40:18 +0100 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-21 20:40:18 +0100 |
commit | 815f7480373e2993e0d6b0ee53b62fc7d28af6f5 (patch) | |
tree | 4569ac428623b645433205b5361f804ad9368ea7 /drivers/infiniband/hw | |
parent | drivers/IB,qib: Fix pinned/locked limit check in qib_get_user_pages() (diff) | |
parent | net/mlx5: Factor out HCA capabilities functions (diff) | |
download | linux-815f7480373e2993e0d6b0ee53b62fc7d28af6f5.tar.xz linux-815f7480373e2993e0d6b0ee53b62fc7d28af6f5.zip |
Merge branch 'mlx5-next' into rdma.git for-next
From
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
To resolve conflicts with net-next and pick up the first patch.
* branch 'mlx5-next':
net/mlx5: Factor out HCA capabilities functions
IB/mlx5: Add support for 50Gbps per lane link modes
net/mlx5: Add support to ext_* fields introduced in Port Type and Speed register
net/mlx5: Add new fields to Port Type and Speed register
net/mlx5: Refactor queries to speed fields in Port Type and Speed register
net/mlx5: E-Switch, Avoid magic numbers when initializing offloads mode
net/mlx5: Relocate vport macros to the vport header file
net/mlx5: E-Switch, Normalize the name of uplink vport number
net/mlx5: Provide an alternative VF upper bound for ECPF
net/mlx5: Add host params change event
net/mlx5: Add query host params command
net/mlx5: Update enable HCA dependency
net/mlx5: Introduce Mellanox SmartNIC and modify page management logic
IB/mlx5: Use unified register/load function for uplink and VF vports
net/mlx5: Use consistent vport num argument type
net/mlx5: Use void pointer as the type in address_of macro
net/mlx5: Align ODP capability function with netdev coding style
mlx5: use RCU lock in mlx5_eq_cq_get()
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.c | 73 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/ib_rep.h | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 95 |
3 files changed, 101 insertions, 77 deletions
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 95ac97af6166..0b5bf264cf1c 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -3,10 +3,11 @@ * Copyright (c) 2018 Mellanox Technologies. All rights reserved. */ +#include <linux/mlx5/vport.h> #include "ib_rep.h" #include "srq.h" -static const struct mlx5_ib_profile rep_profile = { +static const struct mlx5_ib_profile vf_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), @@ -46,30 +47,16 @@ static const struct mlx5_ib_profile rep_profile = { }; static int -mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) -{ - struct mlx5_ib_dev *ibdev; - - ibdev = mlx5_ib_rep_to_dev(rep); - if (!__mlx5_ib_add(ibdev, ibdev->profile)) - return -EINVAL; - return 0; -} - -static void -mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep) -{ - struct mlx5_ib_dev *ibdev; - - ibdev = mlx5_ib_rep_to_dev(rep); - __mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX); -} - -static int mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + const struct mlx5_ib_profile *profile; struct mlx5_ib_dev *ibdev; + if (rep->vport == MLX5_VPORT_UPLINK) + profile = &uplink_rep_profile; + else + profile = &vf_rep_profile; + ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!ibdev) return -ENOMEM; @@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ibdev->mdev = dev; ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports), MLX5_CAP_GEN(dev, num_vhca_ports)); - if (!__mlx5_ib_add(ibdev, &rep_profile)) { + if (!__mlx5_ib_add(ibdev, profile)) { ib_dealloc_device(&ibdev->ib_dev); return -EINVAL; } @@ -107,15 +94,14 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) return mlx5_ib_rep_to_dev(rep); } -static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev) +void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vports = MLX5_TOTAL_VPORTS(mdev); + struct mlx5_eswitch_rep_if rep_if = {}; int vport; - for (vport = 1; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep_if rep_if = {}; - + for (vport = 0; vport < total_vports; vport++) { rep_if.load = mlx5_ib_vport_rep_load; rep_if.unload = mlx5_ib_vport_rep_unload; rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; @@ -123,39 +109,16 @@ static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev) } } -static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev) +void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev); + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vports = MLX5_TOTAL_VPORTS(mdev); int vport; - for (vport = 1; vport < total_vfs; vport++) + for (vport = total_vports - 1; vport >= 0; vport--) mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB); } -void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) -{ - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - struct mlx5_eswitch_rep_if rep_if = {}; - - rep_if.load = mlx5_ib_nic_rep_load; - rep_if.unload = mlx5_ib_nic_rep_unload; - rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev; - rep_if.priv = dev; - - mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB); - - mlx5_ib_rep_register_vf_vports(dev); -} - -void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) -{ - struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; - - mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */ - mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/ -} - u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) { return mlx5_eswitch_mode(esw); diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h index 2ba73636a2fb..798d41e61fb4 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.h +++ b/drivers/infiniband/hw/mlx5/ib_rep.h @@ -10,14 +10,16 @@ #include "mlx5_ib.h" #ifdef CONFIG_MLX5_ESWITCH +extern const struct mlx5_ib_profile uplink_rep_profile; + u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw); struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, int vport_index); struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw); struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport_index); -void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev); -void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev); +void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev); +void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev); int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq); struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, @@ -48,8 +50,8 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, return NULL; } -static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {} -static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {} +static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {} +static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {} static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index f9cddc6f2ab6..45afe1d9e125 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -331,8 +331,8 @@ out: spin_unlock(&port->mp.mpi_lock); } -static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, - u8 *active_width) +static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width) { switch (eth_proto_oper) { case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): @@ -389,10 +389,66 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } +static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width) +{ + switch (eth_proto_oper) { + case MLX5E_PROT_MASK(MLX5E_SGMII_100M): + case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_SDR; + break; + case MLX5E_PROT_MASK(MLX5E_5GBASE_R): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_DDR; + break; + case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_QDR; + break; + case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_QDR; + break; + case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_EDR; + break; + case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): + case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_HDR; + break; + case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_HDR; + break; + case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_HDR; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, + u8 *active_width, bool ext) +{ + return ext ? + translate_eth_ext_proto_oper(eth_proto_oper, active_speed, + active_width) : + translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, + active_width); +} + static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; struct mlx5_core_dev *mdev; struct net_device *ndev, *upper; enum ib_mtu ndev_ib_mtu; @@ -400,6 +456,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, u16 qkey_viol_cntr; u32 eth_prot_oper; u8 mdev_port_num; + bool ext; int err; mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); @@ -416,16 +473,18 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, - mdev_port_num); + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, + mdev_port_num); if (err) goto out; + ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_QDR; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, - &props->active_width); + &props->active_width, ext); props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; @@ -6435,7 +6494,7 @@ static const struct mlx5_ib_profile pf_profile = { mlx5_ib_stage_delay_drop_cleanup), }; -static const struct mlx5_ib_profile nic_rep_profile = { +const struct mlx5_ib_profile uplink_rep_profile = { STAGE_CREATE(MLX5_IB_STAGE_INIT, mlx5_ib_stage_init_init, mlx5_ib_stage_init_cleanup), @@ -6528,6 +6587,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) printk_once(KERN_INFO "%s", mlx5_version); + if (MLX5_ESWITCH_MANAGER(mdev) && + mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + mlx5_ib_register_vport_reps(mdev); + return mdev; + } + port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); @@ -6542,14 +6607,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); - if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { - dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); - dev->profile = &nic_rep_profile; - mlx5_ib_register_vport_reps(dev); - return dev; - } - return __mlx5_ib_add(dev, &pf_profile); } @@ -6558,6 +6615,11 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_dev *dev; + if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { + mlx5_ib_unregister_vport_reps(mdev); + return; + } + if (mlx5_core_is_mp_slave(mdev)) { mpi = context; mutex_lock(&mlx5_ib_multiport_mutex); @@ -6569,10 +6631,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) } dev = context; - if (dev->profile == &nic_rep_profile) - mlx5_ib_unregister_vport_reps(dev); - else - __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); + __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); ib_dealloc_device((struct ib_device *)dev); } |