diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-09-21 23:32:23 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-09-21 23:32:24 +0200 |
commit | 6287e55cc080bab4b02b57fde51260ce8b23bd6e (patch) | |
tree | 9e2722bb63e6512e65e671df8fcb63ae8e7ef2d6 | |
parent | docs: net: add an explanation of VF (and other) Representors (diff) | |
parent | net: hns3: add judge fd ability for sync and clear process of flow director (diff) | |
download | linux-6287e55cc080bab4b02b57fde51260ce8b23bd6e.tar.xz linux-6287e55cc080bab4b02b57fde51260ce8b23bd6e.zip |
Merge branch 'net-hns3-updates-for-next'
Guangbin Huang says:
====================
net: hns3: updates for -next
This series includes some updates for the HNS3 ethernet driver.
====================
Link: https://lore.kernel.org/r/20220916023803.23756-1-huangguangbin2@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
11 files changed, 454 insertions, 219 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 7d4ae467f3ad..abcd7877f7d2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -233,6 +233,17 @@ struct hclgevf_mbx_arq_ring { __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; +struct hclge_dev; + +#define HCLGE_MBX_OPCODE_MAX 256 +struct hclge_mbx_ops_param { + struct hclge_vport *vport; + struct hclge_mbx_vf_to_pf_cmd *req; + struct hclge_respond_to_vf_msg *resp_msg; +}; + +typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param); + #define hclge_mbx_ring_ptr_move_crq(crq) \ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) #define hclge_mbx_tail_ptr_move_arq(arq) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 9fb4cc303301..0179fc288f5f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -195,6 +195,7 @@ struct hns3_mac_stats { /* hnae3 loop mode */ enum hnae3_loop { + HNAE3_LOOP_EXTERNAL, HNAE3_LOOP_APP, HNAE3_LOOP_SERIAL_SERDES, HNAE3_LOOP_PARALLEL_SERDES, @@ -797,6 +798,8 @@ struct hnae3_tc_info { bool mqprio_active; }; +#define HNAE3_MAX_DSCP 64 +#define HNAE3_PRIO_ID_INVALID 0xff struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ @@ -808,6 +811,8 @@ struct hnae3_knic_private_info { struct hnae3_tc_info tc_info; u8 tc_map_mode; + u8 dscp_app_cnt; + u8 dscp_prio[HNAE3_MAX_DSCP]; u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ @@ -839,6 +844,7 @@ struct hnae3_roce_private_info { #define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2) #define HNAE3_SUPPORT_VF BIT(3) #define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) +#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5) #define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ #define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 82f83e3f8162..39b75b68474c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2987,22 +2987,19 @@ static u16 hns3_nic_select_queue(struct net_device *netdev, struct net_device *sb_dev) { struct hnae3_handle *h = hns3_get_handle(netdev); - u8 dscp, priority; - int ret; + u8 dscp; if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || !h->ae_algo->ops->get_dscp_prio) goto out; dscp = hns3_get_skb_dscp(skb); - if (unlikely(dscp == HNS3_INVALID_DSCP)) - goto out; - - ret = h->ae_algo->ops->get_dscp_prio(h, dscp, NULL, &priority); - if (ret) + if (unlikely(dscp >= HNAE3_MAX_DSCP)) goto out; - skb->priority = priority; + skb->priority = h->kinfo.dscp_prio[dscp]; + if (skb->priority == HNAE3_PRIO_ID_INVALID) + skb->priority = 0; out: return netdev_pick_tx(netdev, skb, sb_dev); @@ -5827,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev, return 0; } +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + netif_carrier_off(ndev); + netif_tx_disable(ndev); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + /* delay ring buffer clearing to hns3_reset_notify_uninit_enet + * during reset process, because driver may not be able + * to disable the ring through firmware when downing the netdev. + */ + if (!hns3_nic_resetting(ndev)) + hns3_nic_reset_all_ring(priv->ae_handle); + + hns3_reset_tx_queue(priv->ae_handle); +} + +void hns3_external_lb_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + if (!if_running) + return; + + hns3_nic_reset_all_ring(priv->ae_handle); + + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); + + netif_tx_wake_all_queues(ndev); + + if (h->ae_algo->ops->get_status(h)) + netif_carrier_on(ndev); +} + static const struct hns3_hw_error_info hns3_hw_err[] = { { .type = HNAE3_PPU_POISON_ERROR, .msg = "PPU poison" }, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 4a3253692dcc..133a054af6b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -744,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h); void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, enum dim_cq_period_mode tx_mode, enum dim_cq_period_mode rx_mode); + +void hns3_external_lb_prepare(struct net_device *ndev, bool if_running); +void hns3_external_lb_restore(struct net_device *ndev, bool if_running); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 45cd19ef3c5b..cdf76fb58d45 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -69,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) -#define HNS3_SELF_TEST_TYPE_NUM 4 #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 @@ -95,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) case HNAE3_LOOP_PARALLEL_SERDES: case HNAE3_LOOP_APP: case HNAE3_LOOP_PHY: + case HNAE3_LOOP_EXTERNAL: ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: @@ -304,6 +304,10 @@ out: static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) { + st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL; + st_param[HNAE3_LOOP_EXTERNAL][1] = + h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK; + st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -322,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; } -static void hns3_selftest_prepare(struct net_device *ndev, - bool if_running, int (*st_param)[2]) +static void hns3_selftest_prepare(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; - if (netif_msg_ifdown(h)) - netdev_info(ndev, "self test start\n"); - - hns3_set_selftest_param(h, st_param); - if (if_running) ndev->netdev_ops->ndo_stop(ndev); @@ -371,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running) if (if_running) ndev->netdev_ops->ndo_open(ndev); - - if (netif_msg_ifdown(h)) - netdev_info(ndev, "self test end\n"); } static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], struct ethtool_test *eth_test, u64 *data) { - int test_index = 0; + int test_index = HNAE3_LOOP_APP; u32 i; - for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { + for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) { enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; if (!st_param[i][1]) @@ -401,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], } } +static void hns3_do_external_lb(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL); + if (!data[HNAE3_LOOP_EXTERNAL]) + data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL); + hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL); + + if (data[HNAE3_LOOP_EXTERNAL]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; +} + /** * hns3_self_test - self test * @ndev: net device @@ -410,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], static void hns3_self_test(struct net_device *ndev, struct ethtool_test *eth_test, u64 *data) { - int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int st_param[HNAE3_LOOP_NONE][2]; bool if_running = netif_running(ndev); if (hns3_nic_resetting(ndev)) { @@ -418,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev, return; } - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) + if (!(eth_test->flags & ETH_TEST_FL_OFFLINE)) return; - hns3_selftest_prepare(ndev, if_running, st_param); + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + hns3_set_selftest_param(h, st_param); + + /* external loopback test requires that the link is up and the duplex is + * full, do external test first to reduce the whole test time + */ + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + hns3_external_lb_prepare(ndev, if_running); + hns3_do_external_lb(ndev, eth_test, data); + hns3_external_lb_restore(ndev, if_running); + } + + hns3_selftest_prepare(ndev, if_running); hns3_do_selftest(ndev, st_param, eth_test, data); hns3_selftest_restore(ndev, if_running); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); } static void hns3_update_limit_promisc_mode(struct net_device *netdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index fbb159f48b6c..c4aded65e848 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -368,14 +368,14 @@ static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app) int ret; if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || - app->protocol >= HCLGE_MAX_DSCP || + app->protocol >= HNAE3_MAX_DSCP || app->priority >= HNAE3_MAX_USER_PRIO) return -EINVAL; dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n", app->protocol, app->priority); - if (app->priority == hdev->tm_info.dscp_prio[app->protocol]) + if (app->priority == h->kinfo.dscp_prio[app->protocol]) return 0; ret = dcb_ieee_setapp(netdev, app); @@ -384,21 +384,21 @@ static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app) old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP; old_app.protocol = app->protocol; - old_app.priority = hdev->tm_info.dscp_prio[app->protocol]; + old_app.priority = h->kinfo.dscp_prio[app->protocol]; - hdev->tm_info.dscp_prio[app->protocol] = app->priority; + h->kinfo.dscp_prio[app->protocol] = app->priority; ret = hclge_dscp_to_tc_map(hdev); if (ret) { dev_err(&hdev->pdev->dev, "failed to set dscp to tc map, ret = %d\n", ret); - hdev->tm_info.dscp_prio[app->protocol] = old_app.priority; + h->kinfo.dscp_prio[app->protocol] = old_app.priority; (void)dcb_ieee_delapp(netdev, app); return ret; } vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP; - if (old_app.priority == HCLGE_PRIO_ID_INVALID) - hdev->tm_info.dscp_app_cnt++; + if (old_app.priority == HNAE3_PRIO_ID_INVALID) + h->kinfo.dscp_app_cnt++; else ret = dcb_ieee_delapp(netdev, &old_app); @@ -413,9 +413,9 @@ static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app) int ret; if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || - app->protocol >= HCLGE_MAX_DSCP || + app->protocol >= HNAE3_MAX_DSCP || app->priority >= HNAE3_MAX_USER_PRIO || - app->priority != hdev->tm_info.dscp_prio[app->protocol]) + app->priority != h->kinfo.dscp_prio[app->protocol]) return -EINVAL; dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n", @@ -425,20 +425,20 @@ static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app) if (ret) return ret; - hdev->tm_info.dscp_prio[app->protocol] = HCLGE_PRIO_ID_INVALID; + h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID; ret = hclge_dscp_to_tc_map(hdev); if (ret) { dev_err(&hdev->pdev->dev, "failed to del dscp to tc map, ret = %d\n", ret); - hdev->tm_info.dscp_prio[app->protocol] = app->priority; + h->kinfo.dscp_prio[app->protocol] = app->priority; (void)dcb_ieee_setapp(netdev, app); return ret; } - if (hdev->tm_info.dscp_app_cnt) - hdev->tm_info.dscp_app_cnt--; + if (h->kinfo.dscp_app_cnt) + h->kinfo.dscp_app_cnt--; - if (!hdev->tm_info.dscp_app_cnt) { + if (!h->kinfo.dscp_app_cnt) { vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; ret = hclge_up_to_tc_map(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 55f696d071e5..142415c84c6b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1158,17 +1158,18 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf, static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf, int len) { + struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo; struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; u8 *req0 = (u8 *)desc[0].data; u8 *req1 = (u8 *)desc[1].data; - u8 dscp_tc[HCLGE_MAX_DSCP]; + u8 dscp_tc[HNAE3_MAX_DSCP]; int pos, ret; u8 i, j; pos = scnprintf(buf, len, "tc map mode: %s\n", - tc_map_mode_str[hdev->vport[0].nic.kinfo.tc_map_mode]); + tc_map_mode_str[kinfo->tc_map_mode]); - if (hdev->vport[0].nic.kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP) + if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP) return 0; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true); @@ -1184,8 +1185,8 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf, pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n"); /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ - for (i = 0; i < HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { - j = i + HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; /* Each dscp setting has 4 bits, so each byte saves two dscp * setting */ @@ -1195,12 +1196,12 @@ static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf, dscp_tc[j] &= HCLGE_DBG_TC_MASK; } - for (i = 0; i < HCLGE_MAX_DSCP; i++) { - if (hdev->tm_info.dscp_prio[i] == HCLGE_PRIO_ID_INVALID) + for (i = 0; i < HNAE3_MAX_DSCP; i++) { + if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID) continue; pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n", - i, hdev->tm_info.dscp_prio[i], dscp_tc[i]); + i, kinfo->dscp_prio[i], dscp_tc[i]); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c760fed50da2..7b25d8f89427 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -149,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, HCLGE_TQP_INTR_RL_REG}; static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { - "App Loopback test", - "Serdes serial Loopback test", - "Serdes parallel Loopback test", - "Phy Loopback test" + "External Loopback test", + "App Loopback test", + "Serdes serial Loopback test", + "Serdes parallel Loopback test", + "Phy Loopback test" }; static const struct hclge_comm_stats_str g_mac_stats_string[] = { @@ -718,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ HNAE3_SUPPORT_PHY_LOOPBACK | \ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ - HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) + HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ + HNAE3_SUPPORT_EXTERNAL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -740,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count += 2; + count += 1; handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + count += 1; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; + count += 1; + handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && hdev->hw.mac.phydev->drv->set_loopback) || @@ -773,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, size, p); p = hclge_comm_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], ETH_GSTRING_LEN); @@ -6618,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, struct hlist_node *node; u16 location; - if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) - return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, @@ -6645,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, static void hclge_del_all_fd_entries(struct hclge_dev *hdev) { + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + hclge_clear_fd_rules_in_list(hdev, true); hclge_fd_disable_user_def(hdev); } @@ -7478,6 +7488,9 @@ out: static void hclge_sync_fd_table(struct hclge_dev *hdev) { + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) + return; + if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; @@ -7901,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int ret; + int ret = 0; /* Loopback can be enabled in three places: SSU, MAC, and serdes. By * default, SSU loopback is enabled, so if the SMAC and the DMAC are @@ -7928,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle, case HNAE3_LOOP_PHY: ret = hclge_set_phy_loopback(hdev, en); break; + case HNAE3_LOOP_EXTERNAL: + break; default: ret = -ENOTSUPP; dev_err(&hdev->pdev->dev, @@ -12969,17 +12984,14 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, u8 *priority) { - struct hclge_vport *vport = hclge_get_vport(h); - struct hclge_dev *hdev = vport->back; - - if (dscp >= HCLGE_MAX_DSCP) + if (dscp >= HNAE3_MAX_DSCP) return -EINVAL; if (tc_mode) - *tc_mode = vport->nic.kinfo.tc_map_mode; + *tc_mode = h->kinfo.tc_map_mode; if (priority) - *priority = hdev->tm_info.dscp_prio[dscp] == HCLGE_PRIO_ID_INVALID ? 0 : - hdev->tm_info.dscp_prio[dscp]; + *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : + h->kinfo.dscp_prio[dscp]; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 163240adbcce..495b639b0dc2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -351,15 +351,11 @@ struct hclge_cfg { u16 umv_space; }; -#define HCLGE_MAX_DSCP 64 -#define HCLGE_PRIO_ID_INVALID 0xff struct hclge_tm_info { u8 num_tc; u8 num_pg; /* It must be 1 if vNET-Base schd */ - u8 dscp_app_cnt; u8 pg_dwrr[HCLGE_PG_NUM]; u8 prio_tc[HNAE3_MAX_USER_PRIO]; - u8 dscp_prio[HCLGE_MAX_DSCP]; struct hclge_pg_info pg_info[HCLGE_PG_NUM]; struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e1012f7f9b73..a7b06c63143c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -779,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport, } } +static int +hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, true, + param->req); +} + +static int +hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_map_unmap_ring_to_vf_vector(param->vport, false, + param->req); +} + +static int +hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_get_vf_ring_vector_map(param->vport, param->req, + param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + return ret; +} + +static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_set_vf_promisc_mode(param->vport, param->req); + return 0; +} + +static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_uc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF UC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mc_mac_addr(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF fail(%d) to set VF MC MAC Addr\n", + ret); + return ret; +} + +static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to config VF's VLAN\n", + ret); + return ret; +} + +static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_alive(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "PF failed(%d) to set VF's ALIVE\n", + ret); + return ret; +} + +static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_queue_depth(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_basic_info(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_push_vf_link_status(param->vport); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "failed to inform link stat to VF, ret = %d\n", + ret); + return ret; +} + +static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_mbx_reset_vf_queue(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_reset_vf(param->vport); +} + +static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param) +{ + hclge_vf_keep_alive(param->vport); + return 0; +} + +static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param) +{ + int ret; + + ret = hclge_set_vf_mtu(param->vport, param->req); + if (ret) + dev_err(¶m->vport->back->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + return ret; +} + +static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_queue_id_in_pf(param->vport, param->req, + param->resp_msg); +} + +static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param) +{ + return hclge_get_rss_key(param->vport, param->req, param->resp_msg); +} + +static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_link_mode(param->vport, param->req); + return 0; +} + +static int +hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, false, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, false); + return 0; +} + +static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) +{ + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(param->vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(param->vport, true); + return 0; +} + +static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_media_type(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_link_change_event(param->vport->back, param->req); + return 0; +} + +static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param) +{ + hclge_get_vf_mac_addr(param->vport, param->resp_msg); + return 0; +} + +static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_ncsi_error(param->vport->back); + return 0; +} + +static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_vf_tbl(param->vport, param->req); + return 0; +} + +static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { + [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, + [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, + [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler, + [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler, + [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler, + [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler, + [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler, + [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler, + [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler, + [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler, + [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler, + [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler, + [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler, + [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler, + [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler, + [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler, + [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler, + [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler, + [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler, + [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler, + [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, + [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, + [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, + [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, + [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, + [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, +}; + +static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) +{ + hclge_mbx_ops_fn cmd_func = NULL; + struct hclge_dev *hdev; + int ret = 0; + + hdev = param->vport->back; + cmd_func = hclge_mbx_ops_list[param->req->msg.code]; + if (cmd_func) + ret = cmd_func(param); + else + dev_err(&hdev->pdev->dev, + "un-supported mailbox message, code = %u\n", + param->req->msg.code); + + /* PF driver should not reply IMP */ + if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && + param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { + param->resp_msg->status = ret; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "resp vport%u mbx(%u,%u) late\n", + param->req->mbx_src_vfid, + param->req->msg.code, + param->req->msg.subcode); + + hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg); + } +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; struct hclge_respond_to_vf_msg resp_msg; struct hclge_mbx_vf_to_pf_cmd *req; - struct hclge_vport *vport; + struct hclge_mbx_ops_param param; struct hclge_desc *desc; - bool is_del = false; unsigned int flag; - int ret = 0; + param.resp_msg = &resp_msg; /* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, @@ -814,152 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev) continue; } - vport = &hdev->vport[req->mbx_src_vfid]; - trace_hclge_pf_mbx_get(hdev, req); /* clear the resp_msg before processing every mailbox message */ memset(&resp_msg, 0, sizeof(resp_msg)); - - switch (req->msg.code) { - case HCLGE_MBX_MAP_RING_TO_VECTOR: - ret = hclge_map_unmap_ring_to_vf_vector(vport, true, - req); - break; - case HCLGE_MBX_UNMAP_RING_TO_VECTOR: - ret = hclge_map_unmap_ring_to_vf_vector(vport, false, - req); - break; - case HCLGE_MBX_GET_RING_VECTOR_MAP: - ret = hclge_get_vf_ring_vector_map(vport, req, - &resp_msg); - if (ret) - dev_err(&hdev->pdev->dev, - "PF fail(%d) to get VF ring vector map\n", - ret); - break; - case HCLGE_MBX_SET_PROMISC_MODE: - hclge_set_vf_promisc_mode(vport, req); - break; - case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req); - if (ret) - dev_err(&hdev->pdev->dev, - "PF fail(%d) to set VF UC MAC Addr\n", - ret); - break; - case HCLGE_MBX_SET_MULTICAST: - ret = hclge_set_vf_mc_mac_addr(vport, req); - if (ret) - dev_err(&hdev->pdev->dev, - "PF fail(%d) to set VF MC MAC Addr\n", - ret); - break; - case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg); - if (ret) - dev_err(&hdev->pdev->dev, - "PF failed(%d) to config VF's VLAN\n", - ret); - break; - case HCLGE_MBX_SET_ALIVE: - ret = hclge_set_vf_alive(vport, req); - if (ret) - dev_err(&hdev->pdev->dev, - "PF failed(%d) to set VF's ALIVE\n", - ret); - break; - case HCLGE_MBX_GET_QINFO: - hclge_get_vf_queue_info(vport, &resp_msg); - break; - case HCLGE_MBX_GET_QDEPTH: - hclge_get_vf_queue_depth(vport, &resp_msg); - break; - case HCLGE_MBX_GET_BASIC_INFO: - hclge_get_basic_info(vport, &resp_msg); - break; - case HCLGE_MBX_GET_LINK_STATUS: - ret = hclge_push_vf_link_status(vport); - if (ret) - dev_err(&hdev->pdev->dev, - "failed to inform link stat to VF, ret = %d\n", - ret); - break; - case HCLGE_MBX_QUEUE_RESET: - ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg); - break; - case HCLGE_MBX_RESET: - ret = hclge_reset_vf(vport); - break; - case HCLGE_MBX_KEEP_ALIVE: - hclge_vf_keep_alive(vport); - break; - case HCLGE_MBX_SET_MTU: - ret = hclge_set_vf_mtu(vport, req); - if (ret) - dev_err(&hdev->pdev->dev, - "VF fail(%d) to set mtu\n", ret); - break; - case HCLGE_MBX_GET_QID_IN_PF: - ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg); - break; - case HCLGE_MBX_GET_RSS_KEY: - ret = hclge_get_rss_key(vport, req, &resp_msg); - break; - case HCLGE_MBX_GET_LINK_MODE: - hclge_get_link_mode(vport, req); - break; - case HCLGE_MBX_GET_VF_FLR_STATUS: - case HCLGE_MBX_VF_UNINIT: - is_del = req->msg.code == HCLGE_MBX_VF_UNINIT; - hclge_rm_vport_all_mac_table(vport, is_del, - HCLGE_MAC_ADDR_UC); - hclge_rm_vport_all_mac_table(vport, is_del, - HCLGE_MAC_ADDR_MC); - hclge_rm_vport_all_vlan_table(vport, is_del); - break; - case HCLGE_MBX_GET_MEDIA_TYPE: - hclge_get_vf_media_type(vport, &resp_msg); - break; - case HCLGE_MBX_PUSH_LINK_STATUS: - hclge_handle_link_change_event(hdev, req); - break; - case HCLGE_MBX_GET_MAC_ADDR: - hclge_get_vf_mac_addr(vport, &resp_msg); - break; - case HCLGE_MBX_NCSI_ERROR: - hclge_handle_ncsi_error(hdev); - break; - case HCLGE_MBX_HANDLE_VF_TBL: - hclge_handle_vf_tbl(vport, req); - break; - default: - dev_err(&hdev->pdev->dev, - "un-supported mailbox message, code = %u\n", - req->msg.code); - break; - } - - /* PF driver should not reply IMP */ - if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && - req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { - resp_msg.status = ret; - if (time_is_before_jiffies(hdev->last_mbx_scheduled + - HCLGE_MBX_SCHED_TIMEOUT)) - dev_warn(&hdev->pdev->dev, - "resp vport%u mbx(%u,%u) late\n", - req->mbx_src_vfid, - req->msg.code, - req->msg.subcode); - - hclge_gen_resp_to_vf(vport, req, &resp_msg); - } + param.vport = &hdev->vport[req->mbx_src_vfid]; + param.req = req; + hclge_mbx_request_handling(¶m); crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - - /* reinitialize ret after complete the mbx message processing */ - ret = 0; } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 7630d1f01e04..4a33f65190e2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -271,9 +271,9 @@ static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) u8 i; hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; - hdev->tm_info.dscp_app_cnt = 0; - for (i = 0; i < HCLGE_MAX_DSCP; i++) - hdev->tm_info.dscp_prio[i] = HCLGE_PRIO_ID_INVALID; + hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; + for (i = 0; i < HNAE3_MAX_DSCP; i++) + hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; } int hclge_dscp_to_tc_map(struct hclge_dev *hdev) @@ -288,18 +288,18 @@ int hclge_dscp_to_tc_map(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ - for (i = 0; i < HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { - pri_id = hdev->tm_info.dscp_prio[i]; - pri_id = pri_id == HCLGE_PRIO_ID_INVALID ? 0 : pri_id; + for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; tc_id = hdev->tm_info.prio_tc[pri_id]; /* Each dscp setting has 4 bits, so each byte saves two dscp * setting */ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); - j = i + HCLGE_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; - pri_id = hdev->tm_info.dscp_prio[j]; - pri_id = pri_id == HCLGE_PRIO_ID_INVALID ? 0 : pri_id; + j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; + pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; + pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; tc_id = hdev->tm_info.prio_tc[pri_id]; req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); } |