diff options
author | David S. Miller <davem@davemloft.net> | 2015-01-28 02:13:01 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-28 02:13:01 +0100 |
commit | 5cce1cf718b02c8ea89576230db2bc2fc2e2a35a (patch) | |
tree | 2e6934bb871f78f669e996c91bfd35f766bfabc2 | |
parent | vxlan: advertise link netns in fdb messages (diff) | |
parent | net/mlx4_core: Update the HCA core clock frequency after INIT_PORT (diff) | |
download | linux-5cce1cf718b02c8ea89576230db2bc2fc2e2a35a.tar.xz linux-5cce1cf718b02c8ea89576230db2bc2fc2e2a35a.zip |
Merge branch 'mlx4-next'
Amir Vadai says:
====================
Mellanox ethernet driver updates Jan-27-2015
This patchset introduces some bug fixes, code cleanups and support in a new
firmware event called recoverable error events.
Patches were applied and tested against commit b8665c6 ("net: dsa/mv88e6352:
make mv88e6352_wait generic")
Changes from V0:
- Patch 6/11 ("net/mlx4_core: Fix struct mlx4_vhcr_cmd to make implicit padding
explicit"):
- Removed __packed
- Rephrased commit message
- Added a new patch by Majd ("net/mlx4_core: Update the HCA core clock frequency
after INIT_PORT")
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/eq.c | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.c | 88 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/pd.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 16 | ||||
-rw-r--r-- | include/linux/mlx4/cmd.h | 6 | ||||
-rw-r--r-- | include/linux/mlx4/device.h | 14 |
12 files changed, 136 insertions, 72 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 2b48932855e7..154effbfd8be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -901,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, index = be32_to_cpu(smp->attr_mod); if (port < 1 || port > dev->caps.num_ports) return -EINVAL; - table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); + table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, + sizeof(*table) * 32, GFP_KERNEL); + if (!table) return -ENOMEM; /* need to get the full pkey table because the paravirtualized @@ -1221,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = { { .opcode = MLX4_CMD_HW2SW_EQ, .has_inbox = false, - .has_outbox = true, + .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 569eda9e83d6..a7b58ba8492b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return 0; } - proto_admin = cpu_to_be32(ptys_adv); - if (speed >= 0 && speed != priv->port_state.link_speed) - /* If speed was set then speed decides :-) */ - proto_admin = speed_set_ptys_admin(priv, speed, - ptys_reg.eth_proto_cap); + proto_admin = cmd->autoneg == AUTONEG_ENABLE ? + cpu_to_be32(ptys_adv) : + speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); proto_admin &= ptys_reg.eth_proto_cap; - - if (proto_admin == ptys_reg.eth_proto_admin) - return 0; /* Nothing to change */ - if (!proto_admin) { en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); return -EINVAL; /* nothing to change due to bad input */ } + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", be32_to_cpu(proto_admin)); @@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return ret; } - en_warn(priv, "Port link mode changed, restarting port...\n"); mutex_lock(&priv->mdev->state_lock); if (priv->port_up) { + en_warn(priv, "Port link mode changed, restarting port...\n"); mlx4_en_stop_port(dev, 1); if (mlx4_en_start_port(dev)) en_err(priv, "Failed restarting port %d\n", priv->port); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2f2e6067426d..264bc15c1ff2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev) u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); return async_ev_mask; } @@ -736,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) (unsigned long) eqe); break; + case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: + switch (eqe->subtype) { + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: + mlx4_warn(dev, "Bad cable detected on port %u\n", + eqe->event.bad_cable.port); + break; + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: + mlx4_warn(dev, "Unsupported cable detected\n"); + break; + default: + mlx4_dbg(dev, + "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + } + break; + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: @@ -846,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, MLX4_CMD_WRAPPED); } -static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, - int eq_num) +static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) { - return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, - 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_num_eq_uar(struct mlx4_dev *dev) @@ -1024,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_cmd_mailbox *mailbox; int err; int i; /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with @@ -1032,24 +1051,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, */ int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return; - - err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); + err = mlx4_HW2SW_EQ(dev, eq->eqn); if (err) mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); - if (0) { - mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); - for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { - if (i % 4 == 0) - pr_cont("[%02x] ", i * 4); - pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); - if ((i + 1) % 4 == 0) - pr_cont("\n"); - } - } synchronize_irq(eq->irq); tasklet_disable(&eq->tasklet_ctx.task); @@ -1061,7 +1066,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev, kfree(eq->page_list); mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); - mlx4_free_cmd_mailbox(dev, mailbox); } static void mlx4_free_irqs(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 982861d1df44..dbabfae3a3de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [ 1] = "UC transport", [ 2] = "UD transport", [ 3] = "XRC transport", - [ 4] = "reliable multicast", - [ 5] = "FCoIB support", [ 6] = "SRQ support", [ 7] = "IPoIB checksum offload", [ 8] = "P_Key violation counter", [ 9] = "Q_Key violation counter", - [10] = "VMM", [12] = "Dual Port Different Protocol (DPDP) support", [15] = "Big LSO headers", [16] = "MW support", @@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [19] = "Raw multicast support", [20] = "Address vector port checking support", [21] = "UD multicast support", - [24] = "Demand paging support", - [25] = "Router support", [30] = "IBoE support", [32] = "Unicast loopback support", [34] = "FCS header control", - [38] = "Wake On LAN support", + [37] = "Wake On LAN (port1) support", + [38] = "Wake On LAN (port2) support", [40] = "UDP RSS support", [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", @@ -145,7 +141,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [16] = "CONFIG DEV support", [17] = "Asymmetric EQs support", [18] = "More than 80 VFs support", - [19] = "Performance optimized for limited rule configuration flow steering support" + [19] = "Performance optimized for limited rule configuration flow steering support", + [20] = "Recoverable error events support" }; int i; @@ -259,6 +256,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 +#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 @@ -273,6 +271,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 +#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) @@ -344,9 +343,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, } else if (vhcr->op_modifier == 0) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); - /* enable rdma and ethernet interfaces, and new quota locations */ + /* enable rdma and ethernet interfaces, new quota locations, + * and reserved lkey + */ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | - QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); + QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | + QUERY_FUNC_CAP_FLAG_RESD_LKEY); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = min( @@ -411,6 +413,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + + size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); } else err = -EINVAL; @@ -503,6 +508,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); func_cap->reserved_eq = size & 0xFFFFFF; + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); + func_cap->reserved_lkey = size; + } else { + func_cap->reserved_lkey = 0; + } + func_cap->extra_flags = 0; /* Mailbox data from 0x6c and onward should only be treated if @@ -859,6 +871,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); if (field32 & (1 << 0)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; + if (field32 & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -1562,6 +1576,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_VXLAN_OFFSET 0x0c #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 #define INIT_HCA_QPC_OFFSET 0x020 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) @@ -1668,6 +1683,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1752,8 +1770,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); } - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, - MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); @@ -1879,6 +1897,36 @@ out: return err; } +static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); + return PTR_ERR(mailbox); + } + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) { + mlx4_warn(dev, "hca_core_clock update failed\n"); + goto out; + } + + MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 * and real QP0 are active, so that the paravirtualized QP0 is ready * to operate */ @@ -1983,6 +2031,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + mlx4_hca_core_clock_update(dev); + return err; } EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); @@ -2007,7 +2058,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { if (priv->mfunc.master.init_port_ref[port] == 1) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; } @@ -2018,7 +2069,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, if (!priv->mfunc.master.qp0_state[port].qp0_active && priv->mfunc.master.qp0_state[port].port_active) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); @@ -2033,15 +2084,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } struct mlx4_config_dev { @@ -2180,7 +2231,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); } int mlx4_get_phys_port_id(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 62562b60fa87..f44f7f6017ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -147,6 +147,7 @@ struct mlx4_func_cap { u32 qp0_proxy_qpn; u32 qp1_tunnel_qpn; u32 qp1_proxy_qpn; + u32 reserved_lkey; u8 physical_port; u8 port_flags; u8 flags1; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ff2fffeab4c7..cc9f48439244 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -797,6 +797,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; dev->caps.num_eqs = func_cap.max_eq; dev->caps.reserved_eqs = func_cap.reserved_eq; + dev->caps.reserved_lkey = func_cap.reserved_lkey; dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; @@ -2978,8 +2979,10 @@ err_free_eq: mlx4_free_eq_table(dev); err_master_mfunc: - if (mlx4_is_master(dev)) + if (mlx4_is_master(dev)) { + mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); mlx4_multi_func_cleanup(dev); + } if (mlx4_is_slave(dev)) { kfree(dev->caps.qp0_qkey); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 096a81c16a9b..148dc0945aab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -196,6 +196,7 @@ struct mlx4_vhcr { struct mlx4_vhcr_cmd { __be64 in_param; __be32 in_modifier; + u32 reserved1; __be64 out_param; __be16 token; u16 reserved; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 8dbdf1d29357..d21e884a0838 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -1155,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free); int mlx4_SYNC_TPT(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index a42b4c0a9ed9..609c59dc854e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -214,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) list_add(&uar->bf_list, &priv->bf_list); } - bf->uar = uar; idx = ffz(uar->free_bf_bmap); uar->free_bf_bmap |= 1 << idx; bf->uar = uar; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 3e93879bccce..79feeb6b0d87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4677,7 +4677,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) int state; LIST_HEAD(tlist); int eqn; - struct mlx4_cmd_mailbox *mailbox; err = move_all_busy(dev, slave, RES_EQ); if (err) @@ -4703,20 +4702,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) break; case RES_EQ_HW: - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - cond_resched(); - continue; - } - err = mlx4_cmd_box(dev, slave, 0, - eqn & 0xff, 0, - MLX4_CMD_HW2SW_EQ, - MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + err = mlx4_cmd(dev, slave, eqn & 0xff, + 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", slave, eqn); - mlx4_free_cmd_mailbox(dev, mailbox); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; break; diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index c989442ffc6a..ae95adc78509 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -165,9 +165,9 @@ enum { }; enum { - MLX4_CMD_TIME_CLASS_A = 10000, - MLX4_CMD_TIME_CLASS_B = 10000, - MLX4_CMD_TIME_CLASS_C = 10000, + MLX4_CMD_TIME_CLASS_A = 60000, + MLX4_CMD_TIME_CLASS_B = 60000, + MLX4_CMD_TIME_CLASS_C = 60000, }; enum { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 5ef54e145e4d..c95d659a39f2 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -200,7 +200,8 @@ enum { MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, - MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20 }; enum { @@ -280,6 +281,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, + MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -289,6 +291,11 @@ enum { }; enum { + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, +}; + +enum { MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, }; @@ -860,6 +867,11 @@ struct mlx4_eqe { } __packed tbl_change_info; } params; } __packed port_mgmt_change; + struct { + u8 reserved[3]; + u8 port; + u32 reserved1[5]; + } __packed bad_cable; } event; u8 slave_id; u8 reserved3[2]; |