diff options
Diffstat (limited to 'drivers/net/ethernet')
95 files changed, 1838 insertions, 881 deletions
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index a3c79848a69a..2babea110991 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf, if (err < 0) { dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n", name, ERR_PTR(err)); - goto err_out; + kfree(padev); + return ERR_PTR(err); } err = auxiliary_device_add(aux_dev); if (err) { dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n", name, ERR_PTR(err)); - goto err_out_uninit; + auxiliary_device_uninit(aux_dev); + return ERR_PTR(err); } return padev; - -err_out_uninit: - auxiliary_device_uninit(aux_dev); -err_out: - kfree(padev); - return ERR_PTR(err); } int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a15e6d31fc22..493b724848c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -14523,6 +14523,70 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_bridge_setlink = bnxt_bridge_setlink, }; +static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_cp_ring_info *cpr; + u64 *sw; + + cpr = &bp->bnapi[i]->cp_ring; + sw = cpr->stats.sw_stats; + + stats->packets = 0; + stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); + stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); + + stats->bytes = 0; + stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); + stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); + stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); + + stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards; +} + +static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_napi *bnapi; + u64 *sw; + + bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; + sw = bnapi->cp_ring.stats.sw_stats; + + stats->packets = 0; + stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); + stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); + stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); + + stats->bytes = 0; + stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); + stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); + stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); +} + +static void bnxt_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct bnxt *bp = netdev_priv(dev); + + rx->packets = bp->net_stats_prev.rx_packets; + rx->bytes = bp->net_stats_prev.rx_bytes; + rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; + + tx->packets = bp->net_stats_prev.tx_packets; + tx->bytes = bp->net_stats_prev.tx_bytes; +} + +static const struct netdev_stat_ops bnxt_stat_ops = { + .get_queue_stats_rx = bnxt_get_queue_stats_rx, + .get_queue_stats_tx = bnxt_get_queue_stats_tx, + .get_base_stats = bnxt_get_base_stats, +}; + static void bnxt_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -14970,6 +15034,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_free; dev->netdev_ops = &bnxt_netdev_ops; + dev->stat_ops = &bnxt_stat_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; pci_set_drvdata(pdev, dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b5ff2e1a9975..49d5808b7d11 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb, } /** - * calc_tx_descs - calculate the number of Tx descriptors for a packet - * @skb: the packet - * @chip_ver: chip version - * - * Returns the number of Tx descriptors needed for the given Ethernet - * packet, including the needed WR and CPL headers. - */ -static inline unsigned int calc_tx_descs(const struct sk_buff *skb, - unsigned int chip_ver) -{ - return flits_to_desc(calc_tx_flits(skb, chip_ver)); -} - -/** * cxgb4_write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @q: the Tx queue we are writing into diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d7e175a9cb49..f19f1e1d1f9f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -388,6 +388,7 @@ struct hnae3_dev_specs { u16 mc_mac_size; u32 mac_stats_num; u8 tnl_num; + u8 hilink_version; }; struct hnae3_client_ops { @@ -819,6 +820,7 @@ struct hnae3_tc_info { u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; + bool mqprio_destroy; bool dcb_ets_active; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index d92ad6082d8e..652d71326231 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -351,7 +351,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) { static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { - {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, + {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT}, }; u32 i; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 533c19d25e4f..552396518e08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -55,7 +55,7 @@ #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 #define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 #define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000 -#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000 +#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000 enum hclge_opcode_type { /* Generic commands */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index 3b6dbf158b98..f72dc0cee30e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app) if (hns3_nic_resetting(ndev)) return -EBUSY; - if (h->kinfo.dcb_ops->ieee_setapp) + if (h->kinfo.dcb_ops->ieee_delapp) return h->kinfo.dcb_ops->ieee_delapp(h, app); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index c083d1d10767..807eb3bbb11c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1097,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) *pos += scnprintf(buf + *pos, len - *pos, "TX timeout threshold: %d seconds\n", dev->watchdog_timeo / HZ); + *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n", + dev_specs->hilink_version); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 4d15eb73b972..9bb708fa42f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -828,7 +828,8 @@ struct hclge_dev_specs_1_cmd { __le16 mc_mac_size; u8 rsv1[6]; u8 tnl_num; - u8 rsv2[5]; + u8 hilink_version; + u8 rsv2[4]; }; /* mac speed type defined in firmware command */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index b98301e205f7..eabbacb1c714 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h, return ret; } + kinfo->tc_info.mqprio_destroy = !tc; + ret = hclge_notify_down_uinit(hdev); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5ea9e59569ef..b4afb66efe5c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -645,8 +645,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count += 1; - handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + if (hdev->ae_dev->dev_specs.hilink_version != + HCLGE_HILINK_H60) { + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + } + count += 1; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; count += 1; @@ -884,7 +888,7 @@ static const struct hclge_speed_bit_map speed_bit_map[] = { {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, - {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, }; static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) @@ -940,7 +944,7 @@ static void hclge_update_fec_support(struct hclge_mac *mac) mac->supported); } -static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = { +static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, @@ -948,10 +952,12 @@ static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = { {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, - {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, }; -static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = { +static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, @@ -959,11 +965,13 @@ static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = { ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, - {HCLGE_SUPPORT_200G_BIT, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, }; -static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = { +static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, @@ -971,10 +979,12 @@ static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = { {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, - {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, }; -static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = { +static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, @@ -983,7 +993,9 @@ static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = { {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, - {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, }; static void hclge_convert_setting_sr(u16 speed_ability, @@ -1154,7 +1166,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) static u32 hclge_get_max_speed(u16 speed_ability) { - if (speed_ability & HCLGE_SUPPORT_200G_BIT) + if (speed_ability & HCLGE_SUPPORT_200G_BITS) return HCLGE_MAC_SPEED_200G; if (speed_ability & HCLGE_SUPPORT_100G_BITS) @@ -1350,6 +1362,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); ae_dev->dev_specs.tnl_num = req1->tnl_num; + ae_dev->dev_specs.hilink_version = req1->hilink_version; } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -2890,7 +2903,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) int ret; hdev->support_sfp_query = true; - hdev->hw.mac.duplex = HCLGE_MAC_FULL; + + if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex, hdev->hw.mac.lane_num); if (ret) @@ -12092,6 +12108,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_tc_config(hdev); + ret = hclge_tm_init_hw(hdev, true); if (ret) { dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 51979cf71262..e821dd2f1528 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -191,9 +191,10 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_40G_BIT BIT(5) #define HCLGE_SUPPORT_100M_BIT BIT(6) #define HCLGE_SUPPORT_10M_BIT BIT(7) -#define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8) #define HCLGE_SUPPORT_50G_R1_BIT BIT(9) #define HCLGE_SUPPORT_100G_R2_BIT BIT(10) +#define HCLGE_SUPPORT_200G_R4_BIT BIT(11) #define HCLGE_SUPPORT_GE \ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) @@ -201,6 +202,8 @@ enum HLCGE_PORT_TYPE { (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT) #define HCLGE_SUPPORT_100G_BITS \ (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT) +#define HCLGE_SUPPORT_200G_BITS \ + (HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -253,6 +256,12 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; +/* hilink version */ +enum hclge_hilink_version { + HCLGE_HILINK_H32 = 0, + HCLGE_HILINK_H60 = 1, +}; + #define QUERY_SFP_SPEED 0 #define QUERY_ACTIVE_SPEED 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 4b0d07ca2505..d4a0e0be7a72 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -1123,10 +1123,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) || + req->mbx_src_vfid > hdev->num_req_vfs)) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %u\n", - req->msg.code); + "dropped invalid mailbox message, code = %u, vfid = %u\n", + req->msg.code, req->mbx_src_vfid); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 80a2a0073d97..507d7ce26d83 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u64 ns = nsec; u32 sec_h; - if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) + if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) return; /* Since the BD does not have enough space for the higher 16 bits of diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index c58c31221762..00c3f2548bf6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -2143,3 +2143,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable) return ret; } + +void hclge_reset_tc_config(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_knic_private_info *kinfo; + + kinfo = &vport->nic.kinfo; + + if (!kinfo->tc_info.mqprio_destroy) + return; + + /* clear tc info, including mqprio_destroy and mqprio_active */ + memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info)); + hclge_tm_schd_info_update(hdev, 0); + hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 53eec6df5194..0985916629d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -277,4 +277,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev, int hclge_up_to_tc_map(struct hclge_dev *hdev); int hclge_dscp_to_tc_map(struct hclge_dev *hdev); int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable); +void hclge_reset_tc_config(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a2788fd5f8bb..19e450a5bd31 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), - FIELD_GET(E1000_RAH_AV, mac_reg)); + (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 306758428aef..b32071ee84af 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -148,8 +148,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev) u32 reg_idx; qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1); wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK); } @@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev, for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; v_idx = qv_info->v_idx; /* Validate vector id belongs to this client */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3fada49b8ae2..f86578857e8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13523,9 +13523,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); - i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index af4269330581..ce1f11b8ad65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) **/ static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) { - return (hw->aq.fw_maj_ver > maj || - (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min)); + return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min); } #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index b34c71770887..83a34e98bdc7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -491,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf) u32 v_idx, reg_idx, reg; qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; v_idx = qv_info->v_idx; if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { /* Figure out the queue after CEQ and make that the @@ -562,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; /* Validate vector id belongs to this vf */ if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index aefec6bd3b67..ef2440f3abf8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2170,19 +2170,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_add_cloud_filter(adapter); return 0; } - - if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { - iavf_del_cloud_filter(adapter); - return 0; - } if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) { iavf_del_cloud_filter(adapter); return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) { - iavf_add_cloud_filter(adapter); - return 0; - } if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) { iavf_add_fdir_filter(adapter); return IAVF_SUCCESS; diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index cca0e753f38f..7cee365cc7d1 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -2,6 +2,7 @@ /* Copyright (C) 2018-2020, Intel Corporation. */ #include "ice.h" +#include <net/rps.h> /** * ice_is_arfs_active - helper to check is aRFS is active diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 2bceee6d30f3..e92be6f130a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -1599,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) } if (WARN_ON_ONCE(!vsi || !vsi->netdev)) return; - netdev_dpll_pin_clear(vsi->netdev); + dpll_netdev_pin_clear(vsi->netdev); dpll_pin_put(rclk->pin); } @@ -1643,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (WARN_ON((!vsi || !vsi->netdev))) return -EINVAL; - netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; @@ -2122,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf) struct ice_dplls *d = &pf->dplls; int err = 0; + mutex_init(&d->lock); err = ice_dpll_init_info(pf, cgu); if (err) goto err_exit; @@ -2134,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf) err = ice_dpll_init_pins(pf, cgu); if (err) goto deinit_pps; - mutex_init(&d->lock); if (cgu) { err = ice_dpll_init_worker(pf); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index be32ace96da9..ee3f0d3e3f6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3045,7 +3045,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) } } - tx_ring_stats = vsi_stat->rx_ring_stats; + tx_ring_stats = vsi_stat->tx_ring_stats; vsi_stat->tx_ring_stats = krealloc_array(vsi_stat->tx_ring_stats, req_txq, sizeof(*vsi_stat->tx_ring_stats), diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 062b4ea8e2c3..5d7660bc8846 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7998,6 +7998,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, pf_sw = pf->first_sw; /* find the attribute in the netlink message */ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a2f8dbe3cb82..a958fcf3e6be 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1067,6 +1067,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) struct ice_pf *pf = pci_get_drvdata(pdev); u16 prev_msix, prev_queues, queues; bool needs_rebuild = false; + struct ice_vsi *vsi; struct ice_vf *vf; int id; @@ -1101,6 +1102,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (!vf) return -ENOENT; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -ENOENT; + prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; @@ -1121,7 +1126,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (vf->first_vector_idx < 0) goto unroll; - if (ice_vf_reconfig_vsi(vf)) { + if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) { /* Try to rebuild with previous values */ needs_rebuild = true; goto unroll; @@ -1147,8 +1152,10 @@ unroll: if (vf->first_vector_idx < 0) return -EINVAL; - if (needs_rebuild) + if (needs_rebuild) { ice_vf_reconfig_vsi(vf); + ice_vf_init_host_cfg(vf, vsi); + } ice_ena_vf_mappings(vf); ice_put_vf(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 6b88b9bfb51e..1ff9818b4c84 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vf->driver_caps = *(u32 *)msg; else vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; @@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, vf->driver_caps); - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; - } else { - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; - else - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; - } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 5e19d48a05b4..d796dbd2a440 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -13,8 +13,6 @@ * - opcodes needed by VF when caps are activated * * Caps that don't use new opcodes (no opcodes should be allowed): - * - VIRTCHNL_VF_OFFLOAD_RSS_AQ - * - VIRTCHNL_VF_OFFLOAD_RSS_REG * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR * - VIRTCHNL_VF_OFFLOAD_CRC * - VIRTCHNL_VF_OFFLOAD_RX_POLLING diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 8a051420fa19..1857220d27fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) return -EBUSY; usleep_range(1000, 2000); } + + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + ice_qvec_toggle_napi(vsi, q_vector, false); + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); ice_fill_txq_meta(vsi, tx_ring, &txq_meta); @@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } - ice_qvec_dis_irq(vsi, rx_ring, q_vector); - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; - ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx); @@ -245,11 +246,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) return err; - clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + clear_bit(ICE_CFG_BUSY, vsi->state); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index a602ff8d74e0..a5f9b7a5effe 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1978,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); /* schedule the napi to receive all the marker packets */ + local_bh_disable(); for (i = 0; i < vport->num_q_vectors; i++) napi_schedule(&vport->q_vectors[i].napi); + local_bh_enable(); return idpf_wait_for_marker_event(vport); } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 34820f6a78b9..14924dd8a4b9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6488,7 +6488,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, int cpu = smp_processor_id(); struct netdev_queue *nq; struct igc_ring *ring; - int i, drops; + int i, nxmit; if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; @@ -6504,16 +6504,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, /* Avoid transmit queue timeout since we share it with the slow path */ txq_trans_cond_update(nq); - drops = 0; + nxmit = 0; for (i = 0; i < num_frames; i++) { int err; struct xdp_frame *xdpf = frames[i]; err = igc_xdp_init_tx_descriptor(ring, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err) + break; + nxmit++; } if (flags & XDP_XMIT_FLUSH) @@ -6521,7 +6520,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_unlock(nq); - return num_frames - drops; + return nxmit; } static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 595098a4c488..f985252c8c8d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1106,6 +1106,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev, } /** + * ixgbe_update_tx_ring_stats - Update Tx ring specific counters + * @tx_ring: ring to update + * @q_vector: queue vector ring belongs to + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring, + struct ixgbe_q_vector *q_vector, u64 pkts, + u64 bytes) +{ + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += bytes; + tx_ring->stats.packets += pkts; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += pkts; +} + +/** + * ixgbe_update_rx_ring_stats - Update Rx ring specific counters + * @rx_ring: ring to update + * @q_vector: queue vector ring belongs to + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring, + struct ixgbe_q_vector *q_vector, u64 pkts, + u64 bytes) +{ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.bytes += bytes; + rx_ring->stats.packets += pkts; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_bytes += bytes; + q_vector->rx.total_packets += pkts; +} + +/** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean @@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; + ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets, + total_bytes); adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { @@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ixgbe_xdp_ring_update_tail_locked(ring); } - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; + ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets, + total_rx_bytes); return total_rx_packets; } @@ -2939,8 +2969,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { - u32 mask; struct ixgbe_hw *hw = &adapter->hw; + u32 mask; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -10525,6 +10555,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) } /** + * ixgbe_irq_disable_single - Disable single IRQ vector + * @adapter: adapter structure + * @ring: ring index + **/ +static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 qmask = BIT_ULL(ring); + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = qmask & IXGBE_EIMC_RTX_QUEUE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[ring].vector); + else + synchronize_irq(adapter->pdev->irq); +} + +/** * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index @@ -10540,6 +10608,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; + ixgbe_irq_disable_single(adapter, ring); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + ixgbe_disable_txr(adapter, tx_ring); if (xdp_ring) ixgbe_disable_txr(adapter, xdp_ring); @@ -10548,9 +10621,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) if (xdp_ring) synchronize_rcu(); - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_disable(&rx_ring->q_vector->napi); - ixgbe_clean_tx_ring(tx_ring); if (xdp_ring) ixgbe_clean_tx_ring(xdp_ring); @@ -10578,9 +10648,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_enable(&rx_ring->q_vector->napi); - ixgbe_configure_tx_ring(adapter, tx_ring); if (xdp_ring) ixgbe_configure_tx_ring(adapter, xdp_ring); @@ -10589,6 +10656,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + ixgbe_irq_enable_queues(adapter, BIT_ULL(ring)); + IXGBE_WRITE_FLUSH(&adapter->hw); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index f1f69ce67420..78deea5ec536 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); +void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring, + struct ixgbe_q_vector *q_vector, u64 pkts, + u64 bytes); +void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring, + struct ixgbe_q_vector *q_vector, u64 pkts, + u64 bytes); + #endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 59798bc33298..d34d715c59eb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -359,12 +359,8 @@ construct_skb: ixgbe_xdp_ring_update_tail_locked(ring); } - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; + ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets, + total_rx_bytes); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) @@ -499,13 +495,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, } tx_ring->next_to_clean = ntc; - - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; + ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets, + total_bytes); if (xsk_frames) xsk_tx_completed(pool, xsk_frames); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index d5c4f810da61..61ab7f66f053 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1556,6 +1556,7 @@ struct flow_msg { u32 mpls_lse[4]; u8 icmp_type; u8 icmp_code; + __be16 tcp_flags; }; struct npc_install_flow_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3e6de9d7dde3..d883157393ea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -217,6 +217,7 @@ enum key_fields { NPC_MPLS4_TTL, NPC_TYPE_ICMP, NPC_CODE_ICMP, + NPC_TCP_FLAGS, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index e6d7914ce61c..2500f5ba4f5a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2870,6 +2870,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "%d ", ntohs(rule->packet.dport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; + case NPC_TCP_FLAGS: + seq_printf(s, "%d ", rule->packet.tcp_flags); + seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags); + break; case NPC_IPSEC_SPI: seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index c75669c8fde7..c181e7aa9eb6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = { [NPC_MPLS4_TTL] = "lse depth 4", [NPC_TYPE_ICMP] = "icmp type", [NPC_CODE_ICMP] = "icmp code", + [NPC_TCP_FLAGS] = "tcp flags", [NPC_UNKNOWN] = "unknown", }; @@ -530,6 +531,7 @@ do { \ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1); NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1); + NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); @@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | - BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP); + BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) | + BIT_ULL(NPC_TCP_FLAGS); /* for tcp/udp/sctp corresponding layer type should be in the key */ if (*features & proto_flags) { @@ -982,7 +985,8 @@ do { \ mask->icmp_type, 0); NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0, mask->icmp_code, 0); - + NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0, + ntohs(mask->tcp_flags), 0); NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, ntohl(mask->spi), 0); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 4fd44b6eecea..87bdb93cb066 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, BIT(FLOW_DISSECTOR_KEY_IPSEC) | BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); @@ -857,6 +858,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp match; + + flow_rule_match_tcp(rule, &match); + + flow_spec->tcp_flags = match.key->flags; + flow_mask->tcp_flags = match.mask->flags; + req->features |= BIT_ULL(NPC_TCP_FLAGS); + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match; u8 bit; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d7da62cda821..5d3fde63b273 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -42,6 +42,7 @@ #include <net/ip.h> #include <net/vxlan.h> #include <net/devlink.h> +#include <net/rps.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index c44870b175f9..76dc5a9b9648 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \ - lib/crypto.o + lib/crypto.o lib/sd.o # # Netdev extra diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3e064234f6fe..98d4306929f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return -EOPNOTSUPP; } + if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE && + !dev->priv.fw_reset) { + NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function"); + return -EOPNOTSUPP; + } + if (mlx5_core_is_pf(dev) && pci_num_vf(pdev)) NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index c9c7fddb246f..904e08de852e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -285,7 +285,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll, { if (mdpll->tracking_netdev) return; - netdev_dpll_pin_set(netdev, mdpll->dpll_pin); + dpll_netdev_pin_set(netdev, mdpll->dpll_pin); mdpll->tracking_netdev = netdev; } @@ -293,7 +293,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll) { if (!mdpll->tracking_netdev) return; - netdev_dpll_pin_clear(mdpll->tracking_netdev); + dpll_netdev_pin_clear(mdpll->tracking_netdev); mdpll->tracking_netdev = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 55c6ace0acd5..84db05fb9389 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -60,6 +60,7 @@ #include "lib/clock.h" #include "en/rx_res.h" #include "en/selq.h" +#include "lib/sd.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -791,6 +792,8 @@ struct mlx5e_channel { struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); int ix; + int vec_ix; + int sd_ix; int cpu; /* Sync between icosq recovery and XSK enable/disable. */ struct mutex icosq_recovery_lock; @@ -914,7 +917,7 @@ struct mlx5e_priv { bool tx_ptp_opened; bool rx_ptp_opened; struct hwtstamp_config tstamp; - u16 q_counter; + u16 q_counter[MLX5_SD_MAX_GROUP_SZ]; u16 drop_rq_q_counter; struct notifier_block events_nb; struct notifier_block blocking_events_nb; @@ -1029,12 +1032,12 @@ struct mlx5e_xsk_param; struct mlx5e_rq_param; int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, - struct mlx5e_xsk_param *xsk, int node, + struct mlx5e_xsk_param *xsk, int node, u16 q_counter, struct mlx5e_rq *rq); #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time); void mlx5e_close_rq(struct mlx5e_rq *rq); -int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); void mlx5e_destroy_rq(struct mlx5e_rq *rq); struct mlx5e_sq_param; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c index 48581ea3adcb..874a1016623c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -23,20 +23,26 @@ bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix) return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); } -void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, + u32 *vhca_id) { struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); *rqn = c->rq.rqn; + if (vhca_id) + *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id); } -void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, + u32 *vhca_id) { struct mlx5e_channel *c = mlx5e_channels_get(chs, ix); WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)); *rqn = c->xskrq.rqn; + if (vhca_id) + *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id); } bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h index 637ca90daaa8..6715aa9383b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -10,8 +10,10 @@ struct mlx5e_channels; unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs); bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix); -void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); -void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, + u32 *vhca_id); +void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, + u32 *vhca_id); bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); #endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 40c8df111754..e2d8d2754be0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -20,10 +20,8 @@ #define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 #define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 -int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) +static int mlx5e_monitor_counter_cap(struct mlx5_core_dev *mdev) { - struct mlx5_core_dev *mdev = priv->mdev; - if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) return false; if (MLX5_CAP_PCAM_REG(mdev, ppcnt) && @@ -36,24 +34,38 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) return true; } -static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) +int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *pos; + int i; + + mlx5_sd_for_each_dev(i, priv->mdev, pos) + if (!mlx5e_monitor_counter_cap(pos)) + return false; + return true; +} + +static void mlx5e_monitor_counter_arm(struct mlx5_core_dev *mdev) { u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; MLX5_SET(arm_monitor_counter_in, in, opcode, MLX5_CMD_OP_ARM_MONITOR_COUNTER); - mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in); + mlx5_cmd_exec_in(mdev, arm_monitor_counter, in); } static void mlx5e_monitor_counters_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, monitor_counters_work); + struct mlx5_core_dev *pos; + int i; mutex_lock(&priv->state_lock); mlx5e_stats_update_ndo_stats(priv); mutex_unlock(&priv->state_lock); - mlx5e_monitor_counter_arm(priv); + mlx5_sd_for_each_dev(i, priv->mdev, pos) + mlx5e_monitor_counter_arm(pos); } static int mlx5e_monitor_event_handler(struct notifier_block *nb, @@ -97,15 +109,13 @@ static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in) } /* check if mlx5e_monitor_counter_supported before calling this function*/ -static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv) +static void mlx5e_set_monitor_counter(struct mlx5_core_dev *mdev, int q_counter) { - struct mlx5_core_dev *mdev = priv->mdev; int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 : MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; - int q_counter = priv->q_counter; int cnt = 0; if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 && @@ -127,13 +137,17 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv) /* check if mlx5e_monitor_counter_supported before calling this function*/ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv) { + struct mlx5_core_dev *pos; + int i; + INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work); MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, MONITOR_COUNTER); - mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb); - - mlx5e_set_monitor_counter(priv); - mlx5e_monitor_counter_arm(priv); + mlx5_sd_for_each_dev(i, priv->mdev, pos) { + mlx5_eq_notifier_register(pos, &priv->monitor_counters_nb); + mlx5e_set_monitor_counter(pos, priv->q_counter[i]); + mlx5e_monitor_counter_arm(pos); + } queue_work(priv->wq, &priv->update_stats_work); } @@ -141,11 +155,15 @@ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv) void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv) { u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; + struct mlx5_core_dev *pos; + int i; MLX5_SET(set_monitor_counter_in, in, opcode, MLX5_CMD_OP_SET_MONITOR_COUNTER); - mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in); - mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb); + mlx5_sd_for_each_dev(i, priv->mdev, pos) { + mlx5_cmd_exec_in(pos, set_monitor_counter, in); + mlx5_eq_notifier_unregister(pos, &priv->monitor_counters_nb); + } cancel_work_sync(&priv->monitor_counters_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 5757f4f10c12..a3f31d9d527e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -688,7 +688,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e .napi = &c->napi, .ch_stats = c->stats, .node = cpu_to_node(c->cpu), - .ix = c->ix, + .ix = c->vec_ix, }; } @@ -959,7 +959,6 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - u16 q_counter, struct mlx5e_rq_param *param) { void *rqc = param->rqc; @@ -1021,7 +1020,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, MLX5_SET(wq, wq, log_wq_stride, mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); - MLX5_SET(rqc, rqc, counter_set_id, q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); @@ -1032,7 +1030,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, } void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, - u16 q_counter, struct mlx5e_rq_param *param) { void *rqc = param->rqc; @@ -1041,7 +1038,6 @@ void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, log_wq_stride, mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); - MLX5_SET(rqc, rqc, counter_set_id, q_counter); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); } @@ -1306,13 +1302,12 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 q_counter, struct mlx5e_channel_param *cparam) { u8 icosq_log_wq_sz, async_icosq_log_wq_sz; int err; - err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); + err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 6800949dafbc..9a781f18b57f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -130,10 +130,8 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - u16 q_counter, struct mlx5e_rq_param *param); void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, - u16 q_counter, struct mlx5e_rq_param *param); void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param); @@ -149,7 +147,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param); int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, struct mlx5e_params *params, - u16 q_counter, struct mlx5e_channel_param *cparam); u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index fd4ef6431142..d0af7271da34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad WARN_ON_ONCE(tracker->inuse); tracker->inuse = true; - spin_lock(&list->tracker_list_lock); + spin_lock_bh(&list->tracker_list_lock); list_add_tail(&tracker->entry, &list->tracker_list_head); - spin_unlock(&list->tracker_list_lock); + spin_unlock_bh(&list->tracker_list_lock); } static void @@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me WARN_ON_ONCE(!tracker->inuse); tracker->inuse = false; - spin_lock(&list->tracker_list_lock); + spin_lock_bh(&list->tracker_list_lock); list_del(&tracker->entry); - spin_unlock(&list->tracker_list_lock); + spin_unlock_bh(&list->tracker_list_lock); } void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata) @@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n; - spin_lock(&cqe_list->tracker_list_lock); + spin_lock_bh(&cqe_list->tracker_list_lock); list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) { struct sk_buff *skb = mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id); @@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, pos->inuse = false; list_del(&pos->entry); } - spin_unlock(&cqe_list->tracker_list_lock); + spin_unlock_bh(&cqe_list->tracker_list_lock); } #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask) @@ -646,7 +646,6 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev, static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev, struct net_device *netdev, - u16 q_counter, struct mlx5e_ptp_params *ptp_params) { struct mlx5e_rq_param *rq_params = &ptp_params->rq_param; @@ -655,7 +654,7 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev, params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; mlx5e_init_rq_type_params(mdev, params); params->sw_mtu = netdev->max_mtu; - mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params); + mlx5e_build_rq_param(mdev, params, NULL, rq_params); } static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, @@ -681,7 +680,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, /* RQ */ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { params->vlan_strip_disable = orig->vlan_strip_disable; - mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams); + mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams); } } @@ -714,13 +713,16 @@ static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { int node = dev_to_node(c->mdev->device); - int err; + int err, sd_ix; + u16 q_counter; err = mlx5e_init_ptp_rq(c, params, &c->rq); if (err) return err; - return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq); + sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX); + q_counter = c->priv->q_counter[sd_ix]; + return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq); } static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 34adf8c3f81a..e87e26f2c669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -122,8 +122,8 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, memset(¶m_sq, 0, sizeof(param_sq)); memset(¶m_cq, 0, sizeof(param_cq)); - mlx5e_build_sq_param(priv->mdev, params, ¶m_sq); - mlx5e_build_tx_cq_param(priv->mdev, params, ¶m_cq); + mlx5e_build_sq_param(c->mdev, params, ¶m_sq); + mlx5e_build_tx_cq_param(c->mdev, params, ¶m_cq); err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq); if (err) goto err_free_sq; @@ -176,7 +176,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) */ smp_wmb(); - qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid); + qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid); mlx5e_activate_txqsq(sq); return 0; @@ -190,7 +190,7 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) if (!sq) /* Handle the case when the SQ failed to open. */ return; - qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid); + qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 4358798d6ce1..25d751eba99b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -294,8 +294,8 @@ static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, params = &priv->channels.params; rq_sz = mlx5e_rqwq_get_size(rq); - real_time = mlx5_is_real_time_rq(priv->mdev); - rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); + real_time = mlx5_is_real_time_rq(rq->mdev); + rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL)); mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 6b44ddce14e9..0ab9db319530 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -219,7 +219,6 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq, int tc) { bool stopped = netif_xmit_stopped(sq->txq); - struct mlx5e_priv *priv = sq->priv; u8 state; int err; @@ -227,7 +226,7 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); - err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state); if (!err) devlink_fmsg_u8_pair_put(fmsg, "HW state", state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c index 7b8ff7a71003..bcafb4bf9415 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c @@ -4,6 +4,33 @@ #include "rqt.h" #include <linux/mlx5/transobj.h> +static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids, + unsigned int size) +{ + unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id); + int i; + + /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */ + for (i = 0; i < size; i++) + if (vhca_ids[i] >= max_num_vhca_id) + return false; + return true; +} + +static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids, + unsigned int size) +{ + if (!vhca_ids) + return true; + + if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt)) + return false; + if (!verify_num_vhca_ids(mdev, vhca_ids, size)) + return false; + + return true; +} + void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, unsigned int num_channels) { @@ -13,19 +40,38 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, indir->table[i] = i % num_channels; } +static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size) +{ + unsigned int i; + + if (vhca_ids) { + MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1); + for (i = 0; i < size; i++) { + MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]); + MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]); + } + } else { + for (i = 0; i < size; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]); + } +} static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - u16 max_size, u32 *init_rqns, u16 init_size) + u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size) { + int entry_sz; void *rqtc; int inlen; int err; u32 *in; - int i; + + if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size)) + return -EOPNOTSUPP; rqt->mdev = mdev; rqt->size = max_size; - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size; + entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num); + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -33,10 +79,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size); - MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size); - for (i = 0; i < init_size; i++) - MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]); + + fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size); err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn); @@ -49,7 +94,7 @@ int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, { u16 max_size = indir_enabled ? indir_table_size : 1; - return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); + return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1); } static int mlx5e_bits_invert(unsigned long a, int size) @@ -63,7 +108,8 @@ static int mlx5e_bits_invert(unsigned long a, int size) return inv; } -static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns, +static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids, + unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir) { unsigned int i; @@ -82,30 +128,42 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns */ return -EINVAL; rss_rqns[i] = rqns[ix]; + if (vhca_ids) + rss_vhca_ids[i] = vhca_ids[ix]; } return 0; } int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - u32 *rqns, unsigned int num_rqns, + u32 *rqns, u32 *vhca_ids, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir) { - u32 *rss_rqns; + u32 *rss_rqns, *rss_vhca_ids = NULL; int err; rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); if (!rss_rqns) return -ENOMEM; - err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (vhca_ids) { + rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids), + GFP_KERNEL); + if (!rss_vhca_ids) { + kvfree(rss_rqns); + return -ENOMEM; + } + } + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir); if (err) goto out; - err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, + err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids, indir->actual_table_size); out: + kvfree(rss_vhca_ids); kvfree(rss_rqns); return err; } @@ -126,15 +184,20 @@ void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); } -static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size) +static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids, + unsigned int size) { - unsigned int i; + int entry_sz; void *rqtc; int inlen; u32 *in; int err; - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size; + if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size)) + return -EINVAL; + + entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num); + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -143,8 +206,8 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); MLX5_SET(rqtc, rqtc, rqt_actual_size, size); - for (i = 0; i < size; i++) - MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]); + + fill_rqn_list(rqtc, rqns, vhca_ids, size); err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen); @@ -152,17 +215,21 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz return err; } -int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn) +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id) { - return mlx5e_rqt_redirect(rqt, &rqn, 1); + return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1); } -int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids, + unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir) { - u32 *rss_rqns; + u32 *rss_rqns, *rss_vhca_ids = NULL; int err; + if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns)) + return -EINVAL; + if (WARN_ON(rqt->size != indir->max_table_size)) return -EINVAL; @@ -170,13 +237,23 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ if (!rss_rqns) return -ENOMEM; - err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (vhca_ids) { + rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids), + GFP_KERNEL); + if (!rss_vhca_ids) { + kvfree(rss_rqns); + return -ENOMEM; + } + } + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir); if (err) goto out; - err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size); + err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size); out: + kvfree(rss_vhca_ids); kvfree(rss_rqns); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h index 77fba3ebd18d..e0bc30308c77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h @@ -20,7 +20,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, unsigned int num_channels); struct mlx5e_rqt { - struct mlx5_core_dev *mdev; + struct mlx5_core_dev *mdev; /* primary */ u32 rqtn; u16 size; }; @@ -28,7 +28,7 @@ struct mlx5e_rqt { int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, bool indir_enabled, u32 init_rqn, u32 indir_table_size); int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - u32 *rqns, unsigned int num_rqns, + u32 *rqns, u32 *vhca_ids, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt); @@ -38,8 +38,9 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) } u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels); -int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); -int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id); +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids, + unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); #endif /* __MLX5_EN_RQT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index c1545a2e8d6d..5f742f896600 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -74,7 +74,7 @@ struct mlx5e_rss { struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_rqt rqt; - struct mlx5_core_dev *mdev; + struct mlx5_core_dev *mdev; /* primary */ u32 drop_rqn; bool inner_ft_support; bool enabled; @@ -473,21 +473,22 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, return 0; } -static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns) { int err; - err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir); + err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc, + &rss->indir); if (err) mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", mlx5e_rqt_get_rqtn(&rss->rqt), err); return err; } -void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns) { rss->enabled = true; - mlx5e_rss_apply(rss, rqns, num_rqns); + mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns); } void mlx5e_rss_disable(struct mlx5e_rss *rss) @@ -495,7 +496,7 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss) int err; rss->enabled = false; - err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn); + err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL); if (err) mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n", mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err); @@ -568,7 +569,7 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, const u8 *key, const u8 *hfunc, - u32 *rqns, unsigned int num_rqns) + u32 *rqns, u32 *vhca_ids, unsigned int num_rqns) { bool changed_indir = false; bool changed_hash = false; @@ -608,7 +609,7 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, } if (changed_indir && rss->enabled) { - err = mlx5e_rss_apply(rss, rqns, num_rqns); + err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns); if (err) { mlx5e_rss_copy(rss, old_rss); goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index d1d0bc350e92..d0df98963c8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -39,7 +39,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, const struct mlx5e_packet_merge_param *init_pkt_merge_param, bool inner, u32 *tirn); -void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns); +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns); void mlx5e_rss_disable(struct mlx5e_rss *rss); int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, @@ -47,7 +47,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc); int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, const u8 *key, const u8 *hfunc, - u32 *rqns, unsigned int num_rqns); + u32 *rqns, u32 *vhca_ids, unsigned int num_rqns); struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss); u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt); int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index b23e224e3763..a86eade9a9e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -8,7 +8,7 @@ #define MLX5E_MAX_NUM_RSS 16 struct mlx5e_rx_res { - struct mlx5_core_dev *mdev; + struct mlx5_core_dev *mdev; /* primary */ enum mlx5e_rx_res_features features; unsigned int max_nch; u32 drop_rqn; @@ -19,6 +19,7 @@ struct mlx5e_rx_res { struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; u32 *rss_rqns; + u32 *rss_vhca_ids; unsigned int rss_nch; struct { @@ -34,6 +35,13 @@ struct mlx5e_rx_res { /* API for rx_res_rss_* */ +static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset) +{ + bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA; + + return multi_vhca ? res->rss_vhca_ids + offset : NULL; +} + void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch) { int i; @@ -85,8 +93,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i return PTR_ERR(rss); mlx5e_rss_set_indir_uniform(rss, init_nch); - if (res->rss_active) - mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + if (res->rss_active) { + u32 *vhca_ids = get_vhca_ids(res, 0); + + mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch); + } res->rss[i] = rss; *rss_idx = i; @@ -153,10 +164,12 @@ static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res) for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { struct mlx5e_rss *rss = res->rss[i]; + u32 *vhca_ids; if (!rss) continue; - mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + vhca_ids = get_vhca_ids(res, 0); + mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch); } } @@ -200,6 +213,7 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, const u32 *indir, const u8 *key, const u8 *hfunc) { + u32 *vhca_ids = get_vhca_ids(res, 0); struct mlx5e_rss *rss; if (rss_idx >= MLX5E_MAX_NUM_RSS) @@ -209,7 +223,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, if (!rss) return -ENOENT; - return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch); + return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids, + res->rss_nch); } int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, @@ -280,11 +295,13 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) static void mlx5e_rx_res_free(struct mlx5e_rx_res *res) { + kvfree(res->rss_vhca_ids); kvfree(res->rss_rqns); kvfree(res); } -static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch) +static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch, + bool multi_vhca) { struct mlx5e_rx_res *rx_res; @@ -298,6 +315,15 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig return NULL; } + if (multi_vhca) { + rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL); + if (!rx_res->rss_vhca_ids) { + kvfree(rx_res->rss_rqns); + kvfree(rx_res); + return NULL; + } + } + return rx_res; } @@ -424,10 +450,11 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int init_nch) { + bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA; struct mlx5e_rx_res *res; int err; - res = mlx5e_rx_res_alloc(mdev, max_nch); + res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca); if (!res) return ERR_PTR(-ENOMEM); @@ -504,10 +531,11 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, unsigned int ix) { + u32 *vhca_id = get_vhca_ids(res, ix); u32 rqn = res->rss_rqns[ix]; int err; - err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn); + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id); if (err) mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n", mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), @@ -519,7 +547,7 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res, { int err; - err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn); + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL); if (err) mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), @@ -534,10 +562,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann nch = mlx5e_channels_get_num(chs); for (ix = 0; ix < chs->num; ix++) { + u32 *vhca_id = get_vhca_ids(res, ix); + if (mlx5e_channels_is_xsk(chs, ix)) - mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]); + mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id); else - mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id); } res->rss_nch = chs->num; @@ -554,7 +584,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann if (!mlx5e_channels_get_ptp_rqn(chs, &rqn)) rqn = res->drop_rqn; - err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL); if (err) mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n", mlx5e_rqt_get_rqtn(&res->ptp.rqt), @@ -573,7 +603,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res) mlx5e_rx_res_channel_deactivate_direct(res, ix); if (res->features & MLX5E_RX_RES_FEATURE_PTP) { - err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn); + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL); if (err) mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n", mlx5e_rqt_get_rqtn(&res->ptp.rqt), @@ -584,10 +614,12 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res) void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, unsigned int ix, bool xsk) { + u32 *vhca_id = get_vhca_ids(res, ix); + if (xsk) - mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]); + mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id); else - mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id); mlx5e_rx_res_rss_enable(res); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 82aaba8a82b3..7b1a9f0f1874 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -18,6 +18,7 @@ struct mlx5e_rss_params_hash; enum mlx5e_rx_res_features { MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), MLX5E_RX_RES_FEATURE_PTP = BIT(1), + MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2), }; /* Setup */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 86bf007fd05b..b500cc2c9689 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) { if (priv->mdev->coredev_type == MLX5_COREDEV_PF) - mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n"); err = -EOPNOTSUPP; goto err_check; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index ac458a8d10e0..53ca16cb9c41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -63,10 +63,12 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t) struct mlx5e_create_cq_param ccp = {}; struct dim_cq_moder trap_moder = {}; struct mlx5e_rq *rq = &t->rq; + u16 q_counter; int node; int err; node = dev_to_node(mdev->device); + q_counter = priv->q_counter[0]; ccp.netdev = priv->netdev; ccp.wq = priv->wq; @@ -79,7 +81,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t) return err; mlx5e_init_trap_rq(t, &t->params, rq); - err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq); + err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq); if (err) goto err_destroy_cq; @@ -116,15 +118,14 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml } static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev, - int max_mtu, u16 q_counter, - struct mlx5e_trap *t) + int max_mtu, struct mlx5e_trap *t) { struct mlx5e_params *params = &t->params; params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; mlx5e_init_rq_type_params(mdev, params); params->sw_mtu = max_mtu; - mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param); + mlx5e_build_rq_param(mdev, params, NULL, &t->rq_param); } static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) @@ -138,7 +139,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) if (!t) return ERR_PTR(-ENOMEM); - mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t); + mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, t); t->priv = priv; t->mdev = priv->mdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index ebada0c5af3c..db776e515b6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -6,10 +6,10 @@ #include "setup.h" #include "en/params.h" -static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, +static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev, struct xsk_buff_pool *pool) { - struct device *dev = mlx5_core_dma_dev(priv->mdev); + struct device *dev = mlx5_core_dma_dev(mdev); return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC); } @@ -89,7 +89,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, if (unlikely(!mlx5e_xsk_is_pool_sane(pool))) return -EINVAL; - err = mlx5e_xsk_map_pool(priv, pool); + err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool); if (unlikely(err)) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 82e6abbc1734..06592b9f0424 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -49,10 +49,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, - u16 q_counter, struct mlx5e_channel_param *cparam) { - mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq); + mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq); mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq); } @@ -93,6 +92,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk) { + u16 q_counter = c->priv->q_counter[c->sd_ix]; struct mlx5e_rq *xskrq = &c->xskrq; int err; @@ -100,7 +100,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param if (err) return err; - err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq); + err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq); if (err) return err; @@ -125,7 +125,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (!cparam) return -ENOMEM; - mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam); + mlx5e_build_xsk_cparam(priv->mdev, params, xsk, cparam); err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, &c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 984fa04bd331..e3e57c849436 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -96,7 +96,7 @@ bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) { u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev); - if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx)) + if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx) || mlx5_get_sd(mdev)) return false; /* Check the possibility to post the required ICOSQ WQEs. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index f11075e67658..adc6d8ea0960 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -11,6 +11,7 @@ #ifdef CONFIG_MLX5_EN_TLS #include "lib/crypto.h" +#include "lib/mlx5.h" struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool, struct tls_crypto_info *crypto_info); @@ -61,7 +62,8 @@ void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx) && + !mlx5_get_sd(mdev); } bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 9b597cb24598..65ccb33edafb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -267,7 +267,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, goto err_out; } - pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); + pdev = mlx5_core_dma_dev(sq->channel->mdev); buf->dma_addr = dma_map_single(pdev, &buf->progress, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { @@ -425,14 +425,12 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, { struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; struct mlx5e_ktls_offload_context_rx *priv_rx; - struct mlx5e_ktls_rx_resync_ctx *resync; u8 tracker_state, auth_state, *ctx; struct device *dev; u32 hw_seq; priv_rx = buf->priv_rx; - resync = &priv_rx->resync; - dev = mlx5_core_dma_dev(resync->priv->mdev); + dev = mlx5_core_dma_dev(sq->channel->mdev); if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index d4ebd8743114..b2cabd6ab86c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, - struct mlx5e_macsec_sa *sa, - bool is_tx, struct net_device *netdev, u32 fs_id) +static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *sa, bool is_tx, + struct net_device *netdev, u32 fs_id) { int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; @@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev, fs_id); - mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); sa->macsec_rule = NULL; } +static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, + struct mlx5e_macsec_sa *sa, bool is_tx, + struct net_device *netdev, u32 fs_id) +{ + mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id); + mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); +} + +static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx, + struct mlx5e_macsec_sa *sa, bool encrypt, + bool is_tx, u32 *fs_id) +{ + struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); + struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs; + struct mlx5_macsec_rule_attrs rule_attrs; + union mlx5_macsec_rule *macsec_rule; + + rule_attrs.macsec_obj_id = sa->macsec_obj_id; + rule_attrs.sci = sa->sci; + rule_attrs.assoc_num = sa->assoc_num; + rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : + MLX5_ACCEL_MACSEC_ACTION_DECRYPT; + + macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id); + if (!macsec_rule) + return -ENOMEM; + + sa->macsec_rule = macsec_rule; + + return 0; +} + static int mlx5e_macsec_init_sa(struct macsec_context *ctx, struct mlx5e_macsec_sa *sa, bool encrypt, bool is_tx, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; - struct mlx5_macsec_rule_attrs rule_attrs; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_macsec_obj_attrs obj_attrs; - union mlx5_macsec_rule *macsec_rule; int err; obj_attrs.next_pn = sa->next_pn; @@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, if (err) return err; - rule_attrs.macsec_obj_id = sa->macsec_obj_id; - rule_attrs.sci = sa->sci; - rule_attrs.assoc_num = sa->assoc_num; - rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : - MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - - macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id); - if (!macsec_rule) { - err = -ENOMEM; - goto destroy_macsec_object; + if (sa->active) { + err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id); + if (err) + goto destroy_macsec_object; } - sa->macsec_rule = macsec_rule; - return 0; destroy_macsec_object: @@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx) goto destroy_sa; macsec_device->tx_sa[assoc_num] = tx_sa; - if (!secy->operational || - assoc_num != tx_sc->encoding_sa || - !tx_sa->active) + if (!secy->operational) goto out; err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); @@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; if (ctx_tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); + err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } else { @@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); + mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0); } out: mutex_unlock(&macsec->lock); @@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, - rx_sc->sc_xarray_element->fs_id); + if (rx_sa->active) + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); rx_sc->rx_sa[assoc_num] = NULL; @@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, if (!rx_sa || !rx_sa->macsec_rule) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, - rx_sc->sc_xarray_element->fs_id); + mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); } } @@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, continue; if (rx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, - &rx_sc->sc_xarray_element->fs_id); + err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); + mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0); } for (i = 0; i < MACSEC_NUM_AN; ++i) { @@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) continue; if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); + err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index e66f486faafe..c7f542d0b8f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -34,6 +34,7 @@ #include <linux/mlx5/fs.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <net/rps.h> #include "en.h" #define ARFS_HASH_SHIFT BITS_PER_BYTE diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index be809556b2e1..91848eae4565 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -70,6 +70,7 @@ #include "qos.h" #include "en/trap.h" #include "lib/devcom.h" +#include "lib/sd.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) @@ -1024,7 +1025,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) mlx5_wq_destroy(&rq->wq_ctrl); } -int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter) { struct mlx5_core_dev *mdev = rq->mdev; u8 ts_format; @@ -1051,6 +1052,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, ts_format, ts_format); + MLX5_SET(rqc, rqc, counter_set_id, q_counter); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); @@ -1274,7 +1276,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) } int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, - struct mlx5e_xsk_param *xsk, int node, + struct mlx5e_xsk_param *xsk, int node, u16 q_counter, struct mlx5e_rq *rq) { struct mlx5_core_dev *mdev = rq->mdev; @@ -1287,7 +1289,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (err) return err; - err = mlx5e_create_rq(rq, param); + err = mlx5e_create_rq(rq, param, q_counter); if (err) goto err_free_rq; @@ -2335,13 +2337,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rq_params) { + u16 q_counter = c->priv->q_counter[c->sd_ix]; int err; err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq); if (err) return err; - return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq); + return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq); } static int mlx5e_open_queues(struct mlx5e_channel *c, @@ -2528,14 +2531,20 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct xsk_buff_pool *xsk_pool, struct mlx5e_channel **cp) { - int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix); struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev; struct mlx5e_xsk_param xsk; struct mlx5e_channel *c; unsigned int irq; + int vec_ix; + int cpu; int err; - err = mlx5_comp_irqn_get(priv->mdev, ix, &irq); + mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix); + vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix); + cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix); + + err = mlx5_comp_irqn_get(mdev, vec_ix, &irq); if (err) return err; @@ -2548,18 +2557,20 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, return -ENOMEM; c->priv = priv; - c->mdev = priv->mdev; + c->mdev = mdev; c->tstamp = &priv->tstamp; c->ix = ix; + c->vec_ix = vec_ix; + c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix); c->cpu = cpu; - c->pdev = mlx5_core_dma_dev(priv->mdev); + c->pdev = mlx5_core_dma_dev(mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); + c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix]->ch; c->aff_mask = irq_get_effective_affinity_mask(irq); - c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); + c->lag_port = mlx5e_enumerate_lag_port(mdev, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll); netif_napi_set_irq(&c->napi, irq); @@ -2654,7 +2665,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, if (!chs->c || !cparam) goto err_free; - err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam); + err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam); if (err) goto err_free; @@ -2942,15 +2953,18 @@ static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues); static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, struct mlx5e_params *params) { - struct mlx5_core_dev *mdev = priv->mdev; - int num_comp_vectors, ix, irq; - - num_comp_vectors = mlx5_comp_vectors_max(mdev); + int ix; for (ix = 0; ix < params->num_channels; ix++) { + int num_comp_vectors, irq, vec_ix; + struct mlx5_core_dev *mdev; + + mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix); + num_comp_vectors = mlx5_comp_vectors_max(mdev); cpumask_clear(priv->scratchpad.cpumask); + vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix); - for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { + for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) { int cpu = mlx5_comp_vector_get_cpu(mdev, irq); cpumask_set_cpu(cpu, priv->scratchpad.cpumask); @@ -3342,7 +3356,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param); + mlx5e_build_drop_rq_param(mdev, &rq_param); err = mlx5e_alloc_drop_cq(priv, cq, &cq_param); if (err) @@ -3356,7 +3370,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, if (err) goto err_destroy_cq; - err = mlx5e_create_rq(drop_rq, &rq_param); + err = mlx5e_create_rq(drop_rq, &rq_param, priv->drop_rq_q_counter); if (err) goto err_free_rq; @@ -5271,13 +5285,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv) u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; - int err; + struct mlx5_core_dev *pos; + int err, i; MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); - err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); - if (!err) - priv->q_counter = - MLX5_GET(alloc_q_counter_out, out, counter_set_id); + + mlx5_sd_for_each_dev(i, mdev, pos) { + err = mlx5_cmd_exec_inout(pos, alloc_q_counter, in, out); + if (!err) + priv->q_counter[i] = + MLX5_GET(alloc_q_counter_out, out, counter_set_id); + } err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out); if (!err) @@ -5288,13 +5306,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv) void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; + struct mlx5_core_dev *pos; + int i; MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); - if (priv->q_counter) { - MLX5_SET(dealloc_q_counter_in, in, counter_set_id, - priv->q_counter); - mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in); + mlx5_sd_for_each_dev(i, priv->mdev, pos) { + if (priv->q_counter[i]) { + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, + priv->q_counter[i]); + mlx5_cmd_exec_in(pos, dealloc_q_counter, in); + } } if (priv->drop_rq_q_counter) { @@ -5378,6 +5400,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) features = MLX5E_RX_RES_FEATURE_PTP; if (mlx5_tunnel_inner_ft_supported(mdev)) features |= MLX5E_RX_RES_FEATURE_INNER_FT; + if (mlx5_get_sd(priv->mdev)) + features |= MLX5E_RX_RES_FEATURE_MULTI_VHCA; priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn, &priv->channels.params.packet_merge, @@ -5987,28 +6011,52 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) free_netdev(netdev); } -static int mlx5e_resume(struct auxiliary_device *adev) +static int _mlx5e_resume(struct auxiliary_device *adev) { struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = edev->mdev; - int err; + struct mlx5_core_dev *pos, *to; + int err, i; if (netif_device_present(netdev)) return 0; - err = mlx5e_create_mdev_resources(mdev, true); - if (err) - return err; + mlx5_sd_for_each_dev(i, mdev, pos) { + err = mlx5e_create_mdev_resources(pos, true); + if (err) + goto err_destroy_mdev_res; + } err = mlx5e_attach_netdev(priv); - if (err) { - mlx5e_destroy_mdev_resources(mdev); + if (err) + goto err_destroy_mdev_res; + + return 0; + +err_destroy_mdev_res: + to = pos; + mlx5_sd_for_each_dev_to(i, mdev, to, pos) + mlx5e_destroy_mdev_resources(pos); + return err; +} + +static int mlx5e_resume(struct auxiliary_device *adev) +{ + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct auxiliary_device *actual_adev; + int err; + + err = mlx5_sd_init(mdev); + if (err) return err; - } + actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); + if (actual_adev) + return _mlx5e_resume(actual_adev); return 0; } @@ -6018,21 +6066,36 @@ static int _mlx5e_suspend(struct auxiliary_device *adev) struct mlx5e_priv *priv = mlx5e_dev->priv; struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *pos; + int i; if (!netif_device_present(netdev)) { if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) - mlx5e_destroy_mdev_resources(mdev); + mlx5_sd_for_each_dev(i, mdev, pos) + mlx5e_destroy_mdev_resources(pos); return -ENODEV; } mlx5e_detach_netdev(priv); - mlx5e_destroy_mdev_resources(mdev); + mlx5_sd_for_each_dev(i, mdev, pos) + mlx5e_destroy_mdev_resources(pos); + return 0; } static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) { - return _mlx5e_suspend(adev); + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct auxiliary_device *actual_adev; + int err = 0; + + actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); + if (actual_adev) + err = _mlx5e_suspend(actual_adev); + + mlx5_sd_cleanup(mdev); + return err; } static int _mlx5e_probe(struct auxiliary_device *adev) @@ -6078,9 +6141,9 @@ static int _mlx5e_probe(struct auxiliary_device *adev) goto err_destroy_netdev; } - err = mlx5e_resume(adev); + err = _mlx5e_resume(adev); if (err) { - mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err); + mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err); goto err_profile_cleanup; } @@ -6111,15 +6174,29 @@ err_devlink_unregister: static int mlx5e_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { - return _mlx5e_probe(adev); + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct auxiliary_device *actual_adev; + int err; + + err = mlx5_sd_init(mdev); + if (err) + return err; + + actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); + if (actual_adev) + return _mlx5e_probe(actual_adev); + return 0; } -static void mlx5e_remove(struct auxiliary_device *adev) +static void _mlx5e_remove(struct auxiliary_device *adev) { + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; + struct mlx5_core_dev *mdev = edev->mdev; - mlx5_core_uplink_netdev_set(priv->mdev, NULL); + mlx5_core_uplink_netdev_set(mdev, NULL); mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); _mlx5e_suspend(adev); @@ -6129,6 +6206,19 @@ static void mlx5e_remove(struct auxiliary_device *adev) mlx5e_destroy_devlink(mlx5e_dev); } +static void mlx5e_remove(struct auxiliary_device *adev) +{ + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct auxiliary_device *actual_adev; + + actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); + if (actual_adev) + _mlx5e_remove(actual_adev); + + mlx5_sd_cleanup(mdev); +} + static const struct auxiliary_device_id mlx5e_id_table[] = { { .name = MLX5_ADEV_NAME ".eth", }, {}, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 4b96ad657145..f3d0898bdbc6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -561,11 +561,23 @@ static const struct counter_desc drop_rq_stats_desc[] = { #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) +static bool q_counter_any(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *pos; + int i; + + mlx5_sd_for_each_dev(i, priv->mdev, pos) + if (priv->q_counter[i++]) + return true; + + return false; +} + static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) { int num_stats = 0; - if (priv->q_counter) + if (q_counter_any(priv)) num_stats += NUM_Q_COUNTERS; if (priv->drop_rq_q_counter) @@ -578,7 +590,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) { int i; - for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) + for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); @@ -593,7 +605,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) { int i; - for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) + for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i); for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) @@ -607,18 +619,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; - int ret; + struct mlx5_core_dev *pos; + u32 rx_out_of_buffer = 0; + int ret, i; MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); - if (priv->q_counter) { - MLX5_SET(query_q_counter_in, in, counter_set_id, - priv->q_counter); - ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); - if (!ret) - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, - out, out_of_buffer); + mlx5_sd_for_each_dev(i, priv->mdev, pos) { + if (priv->q_counter[i]) { + MLX5_SET(query_q_counter_in, in, counter_set_id, + priv->q_counter[i]); + ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out); + if (!ret) + rx_out_of_buffer += MLX5_GET(query_q_counter_out, + out, out_of_buffer); + } } + qcnt->rx_out_of_buffer = rx_out_of_buffer; if (priv->drop_rq_q_counter) { MLX5_SET(query_q_counter_in, in, counter_set_id, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9fb2c057bd78..31ed26cac9bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -766,7 +766,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) return err; mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels); - err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, + err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, NULL, hp->num_channels, mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, &indir); @@ -1169,7 +1169,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); - params.q_counter = priv->q_counter; + params.q_counter = priv->q_counter[0]; err = devl_param_driverinit_value_get( devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5c166d9d2dca..2fa076b23fbe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_skb_cb_hwtstamp_init(skb); mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, metadata_index); + /* ensure skb is put on metadata_map before tracking the index */ + wmb(); mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index); if (!netif_tx_queue_stopped(sq->txq) && mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index 190f10aba170..5a0047bdcb51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) xa_for_each(&esw->offloads.vport_reps, i, rep) { rpriv = rep->rep_data[REP_ETH].priv; - if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems)) + if (!rpriv || !rpriv->netdev) continue; rhashtable_walk_enter(&rpriv->tc_ht, &iter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b0455134c98e..baaae628b0a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) } static bool -esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest) +esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest) { - bool vf_dest = false, pf_dest = false; + bool internal_dest = false, external_dest = false; int i; for (i = 0; i < max_dest; i++) { - if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT) + if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT && + dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK) continue; - if (dests[i].vport.num == MLX5_VPORT_UPLINK) - pf_dest = true; + /* Uplink dest is external, but considered as internal + * if there is reformat because firmware uses LB+hairpin to support it. + */ + if (dests[i].vport.num == MLX5_VPORT_UPLINK && + !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) + external_dest = true; else - vf_dest = true; + internal_dest = true; - if (vf_dest && pf_dest) + if (internal_dest && external_dest) return true; } @@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, /* Header rewrite with combined wire+loopback in FDB is not allowed */ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) && - esw_dests_to_vf_pf_vports(dest, i)) { + esw_dests_to_int_external(dest, i)) { esw_warn(esw->dev, - "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n"); + "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n"); rule = ERR_PTR(-EINVAL); goto err_esw_get; } @@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) -{ - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct net *devl_net, *netdev_net; - bool ret = false; - - mutex_lock(&dev->mlx5e_res.uplink_netdev_lock); - if (dev->mlx5e_res.uplink_netdev) { - netdev_net = dev_net(dev->mlx5e_res.uplink_netdev); - devl_net = devlink_net(devlink); - ret = net_eq(devl_net, netdev_net); - } - mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock); - return ret; -} - int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && - !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) { - NL_SET_ERR_MSG_MOD(extack, - "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); - return -EPERM; - } - mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index f27eab6e4929..2911aa34a5be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT); mlx5_eq_notifier_register(dev, &fw_reset->nb); } void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev) { - mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb); + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + + if (!fw_reset) + return; + + mlx5_eq_notifier_unregister(dev, &fw_reset->nb); } void mlx5_drain_fw_reset(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags); cancel_work_sync(&fw_reset->fw_live_patch_work); cancel_work_sync(&fw_reset->reset_request_work); @@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = { int mlx5_fw_reset_init(struct mlx5_core_dev *dev) { - struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); + struct mlx5_fw_reset *fw_reset; int err; + if (!MLX5_CAP_MCAM_REG(dev, mfrl)) + return 0; + + fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); if (!fw_reset) return -ENOMEM; fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events"); @@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!fw_reset) + return; + devl_params_unregister(priv_to_devlink(dev), mlx5_fw_reset_devlink_params, ARRAY_SIZE(mlx5_fw_reset_devlink_params)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9463ede84d8d..ad38e31822df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct health_buffer __iomem *h = health->health; u8 synd = ioread8(&h->synd); + devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); if (!synd) return 0; - devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index ec32b686f586..d58032dd0df7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -10,6 +10,7 @@ enum mlx5_devcom_component { MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_MPV, MLX5_DEVCOM_HCA_PORTS, + MLX5_DEVCOM_SD_GROUP, MLX5_DEVCOM_NUM_COMPONENTS, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 2b5826a785c4..37d5f445598c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -54,4 +54,16 @@ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *md { return mdev->mlx5e_res.uplink_netdev; } + +struct mlx5_sd; + +static inline struct mlx5_sd *mlx5_get_sd(struct mlx5_core_dev *dev) +{ + return dev->sd; +} + +static inline void mlx5_set_sd(struct mlx5_core_dev *dev, struct mlx5_sd *sd) +{ + dev->sd = sd; +} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c new file mode 100644 index 000000000000..5b28084e8a03 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "lib/sd.h" +#include "mlx5_core.h" +#include "lib/mlx5.h" +#include "fs_cmd.h" +#include <linux/mlx5/vport.h> +#include <linux/debugfs.h> + +#define sd_info(__dev, format, ...) \ + dev_info((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__) +#define sd_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__) + +struct mlx5_sd { + u32 group_id; + u8 host_buses; + struct mlx5_devcom_comp_dev *devcom; + struct dentry *dfs; + bool primary; + union { + struct { /* primary */ + struct mlx5_core_dev *secondaries[MLX5_SD_MAX_GROUP_SZ - 1]; + struct mlx5_flow_table *tx_ft; + }; + struct { /* secondary */ + struct mlx5_core_dev *primary_dev; + u32 alias_obj_id; + }; + }; +}; + +static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + + if (!sd) + return 1; + + return sd->host_buses; +} + +static struct mlx5_core_dev *mlx5_sd_get_primary(struct mlx5_core_dev *dev) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + + if (!sd) + return dev; + + return sd->primary ? dev : sd->primary_dev; +} + +struct mlx5_core_dev * +mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx) +{ + struct mlx5_sd *sd; + + if (idx == 0) + return primary; + + if (idx >= mlx5_sd_get_host_buses(primary)) + return NULL; + + sd = mlx5_get_sd(primary); + return sd->secondaries[idx - 1]; +} + +int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix) +{ + return ch_ix % mlx5_sd_get_host_buses(dev); +} + +int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix) +{ + return ch_ix / mlx5_sd_get_host_buses(dev); +} + +struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix) +{ + int mdev_idx = mlx5_sd_ch_ix_get_dev_ix(primary, ch_ix); + + return mlx5_sd_primary_get_peer(primary, mdev_idx); +} + +static bool ft_create_alias_supported(struct mlx5_core_dev *dev) +{ + u64 obj_allowed = MLX5_CAP_GEN_2_64(dev, allowed_object_for_other_vhca_access); + u32 obj_supp = MLX5_CAP_GEN_2(dev, cross_vhca_object_to_object_supported); + + if (!(obj_supp & + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE)) + return false; + + if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) + return false; + + return true; +} + +static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses) +{ + /* Feature is currently implemented for PFs only */ + if (!mlx5_core_is_pf(dev)) + return false; + + /* Honor the SW implementation limit */ + if (host_buses > MLX5_SD_MAX_GROUP_SZ) + return false; + + /* Disconnect secondaries from the network */ + if (!MLX5_CAP_GEN(dev, eswitch_manager)) + return false; + if (!MLX5_CAP_GEN(dev, silent_mode)) + return false; + + /* RX steering from primary to secondaries */ + if (!MLX5_CAP_GEN(dev, cross_vhca_rqt)) + return false; + if (host_buses > MLX5_CAP_GEN_2(dev, max_rqt_vhca_id)) + return false; + + /* TX steering from secondaries to primary */ + if (!ft_create_alias_supported(dev)) + return false; + if (!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) + return false; + + return true; +} + +static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm, + u8 *host_buses, u8 *sd_group) +{ + u32 out[MLX5_ST_SZ_DW(mpir_reg)]; + int err; + + err = mlx5_query_mpir_reg(dev, out); + if (err) + return err; + + err = mlx5_query_nic_vport_sd_group(dev, sd_group); + if (err) + return err; + + *sdm = MLX5_GET(mpir_reg, out, sdm); + *host_buses = MLX5_GET(mpir_reg, out, host_buses); + + return 0; +} + +static u32 mlx5_sd_group_id(struct mlx5_core_dev *dev, u8 sd_group) +{ + return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group); +} + +static int sd_init(struct mlx5_core_dev *dev) +{ + u8 host_buses, sd_group; + struct mlx5_sd *sd; + u32 group_id; + bool sdm; + int err; + + if (!MLX5_CAP_MCAM_REG(dev, mpir)) + return 0; + + err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group); + if (err) + return err; + + if (!sdm) + return 0; + + if (!sd_group) + return 0; + + group_id = mlx5_sd_group_id(dev, sd_group); + + if (!mlx5_sd_is_supported(dev, host_buses)) { + sd_warn(dev, "can't support requested netdev combining for group id 0x%x), skipping\n", + group_id); + return 0; + } + + sd = kzalloc(sizeof(*sd), GFP_KERNEL); + if (!sd) + return -ENOMEM; + + sd->host_buses = host_buses; + sd->group_id = group_id; + + mlx5_set_sd(dev, sd); + + return 0; +} + +static void sd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + + mlx5_set_sd(dev, NULL); + kfree(sd); +} + +static int sd_register(struct mlx5_core_dev *dev) +{ + struct mlx5_devcom_comp_dev *devcom, *pos; + struct mlx5_core_dev *peer, *primary; + struct mlx5_sd *sd, *primary_sd; + int err, i; + + sd = mlx5_get_sd(dev); + devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP, + sd->group_id, NULL, dev); + if (!devcom) + return -ENOMEM; + + sd->devcom = devcom; + + if (mlx5_devcom_comp_get_size(devcom) != sd->host_buses) + return 0; + + mlx5_devcom_comp_lock(devcom); + mlx5_devcom_comp_set_ready(devcom, true); + mlx5_devcom_comp_unlock(devcom); + + if (!mlx5_devcom_for_each_peer_begin(devcom)) { + err = -ENODEV; + goto err_devcom_unreg; + } + + primary = dev; + mlx5_devcom_for_each_peer_entry(devcom, peer, pos) + if (peer->pdev->bus->number < primary->pdev->bus->number) + primary = peer; + + primary_sd = mlx5_get_sd(primary); + primary_sd->primary = true; + i = 0; + /* loop the secondaries */ + mlx5_devcom_for_each_peer_entry(primary_sd->devcom, peer, pos) { + struct mlx5_sd *peer_sd = mlx5_get_sd(peer); + + primary_sd->secondaries[i++] = peer; + peer_sd->primary = false; + peer_sd->primary_dev = primary; + } + + mlx5_devcom_for_each_peer_end(devcom); + return 0; + +err_devcom_unreg: + mlx5_devcom_comp_lock(sd->devcom); + mlx5_devcom_comp_set_ready(sd->devcom, false); + mlx5_devcom_comp_unlock(sd->devcom); + mlx5_devcom_unregister_component(sd->devcom); + return err; +} + +static void sd_unregister(struct mlx5_core_dev *dev) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + + mlx5_devcom_comp_lock(sd->devcom); + mlx5_devcom_comp_set_ready(sd->devcom, false); + mlx5_devcom_comp_unlock(sd->devcom); + mlx5_devcom_unregister_component(sd->devcom); +} + +static int sd_cmd_set_primary(struct mlx5_core_dev *primary, u8 *alias_key) +{ + struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {}; + struct mlx5_sd *sd = mlx5_get_sd(primary); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *nic_ns; + struct mlx5_flow_table *ft; + int err; + + nic_ns = mlx5_get_flow_namespace(primary, MLX5_FLOW_NAMESPACE_EGRESS); + if (!nic_ns) + return -EOPNOTSUPP; + + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + return err; + } + sd->tx_ft = ft; + memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN); + allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + allow_attr.obj_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; + + err = mlx5_cmd_allow_other_vhca_access(primary, &allow_attr); + if (err) { + mlx5_core_err(primary, "Failed to allow other vhca access err=%d\n", + err); + mlx5_destroy_flow_table(ft); + return err; + } + + return 0; +} + +static void sd_cmd_unset_primary(struct mlx5_core_dev *primary) +{ + struct mlx5_sd *sd = mlx5_get_sd(primary); + + mlx5_destroy_flow_table(sd->tx_ft); +} + +static int sd_secondary_create_alias_ft(struct mlx5_core_dev *secondary, + struct mlx5_core_dev *primary, + struct mlx5_flow_table *ft, + u32 *obj_id, u8 *alias_key) +{ + u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; + u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id); + struct mlx5_cmd_alias_obj_create_attr alias_attr = {}; + int ret; + + memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN); + alias_attr.obj_id = aliased_object_id; + alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + alias_attr.vhca_id = vhca_id_to_be_accessed; + ret = mlx5_cmd_alias_obj_create(secondary, &alias_attr, obj_id); + if (ret) { + mlx5_core_err(secondary, "Failed to create alias object err=%d\n", + ret); + return ret; + } + + return 0; +} + +static void sd_secondary_destroy_alias_ft(struct mlx5_core_dev *secondary) +{ + struct mlx5_sd *sd = mlx5_get_sd(secondary); + + mlx5_cmd_alias_obj_destroy(secondary, sd->alias_obj_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +} + +static int sd_cmd_set_secondary(struct mlx5_core_dev *secondary, + struct mlx5_core_dev *primary, + u8 *alias_key) +{ + struct mlx5_sd *primary_sd = mlx5_get_sd(primary); + struct mlx5_sd *sd = mlx5_get_sd(secondary); + int err; + + err = mlx5_fs_cmd_set_l2table_entry_silent(secondary, 1); + if (err) + return err; + + err = sd_secondary_create_alias_ft(secondary, primary, primary_sd->tx_ft, + &sd->alias_obj_id, alias_key); + if (err) + goto err_unset_silent; + + err = mlx5_fs_cmd_set_tx_flow_table_root(secondary, sd->alias_obj_id, false); + if (err) + goto err_destroy_alias_ft; + + return 0; + +err_destroy_alias_ft: + sd_secondary_destroy_alias_ft(secondary); +err_unset_silent: + mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0); + return err; +} + +static void sd_cmd_unset_secondary(struct mlx5_core_dev *secondary) +{ + mlx5_fs_cmd_set_tx_flow_table_root(secondary, 0, true); + sd_secondary_destroy_alias_ft(secondary); + mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0); +} + +static void sd_print_group(struct mlx5_core_dev *primary) +{ + struct mlx5_sd *sd = mlx5_get_sd(primary); + struct mlx5_core_dev *pos; + int i; + + sd_info(primary, "group id %#x, primary %s, vhca %#x\n", + sd->group_id, pci_name(primary->pdev), + MLX5_CAP_GEN(primary, vhca_id)); + mlx5_sd_for_each_secondary(i, primary, pos) + sd_info(primary, "group id %#x, secondary_%d %s, vhca %#x\n", + sd->group_id, i - 1, pci_name(pos->pdev), + MLX5_CAP_GEN(pos, vhca_id)); +} + +static ssize_t dev_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev; + char tbuf[32]; + int ret; + + dev = filp->private_data; + ret = snprintf(tbuf, sizeof(tbuf), "%s vhca %#x\n", pci_name(dev->pdev), + MLX5_CAP_GEN(dev, vhca_id)); + + return simple_read_from_buffer(buf, count, pos, tbuf, ret); +} + +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dev_read, +}; + +int mlx5_sd_init(struct mlx5_core_dev *dev) +{ + struct mlx5_core_dev *primary, *pos, *to; + struct mlx5_sd *sd = mlx5_get_sd(dev); + u8 alias_key[ACCESS_KEY_LEN]; + int err, i; + + err = sd_init(dev); + if (err) + return err; + + sd = mlx5_get_sd(dev); + if (!sd) + return 0; + + err = sd_register(dev); + if (err) + goto err_sd_cleanup; + + if (!mlx5_devcom_comp_is_ready(sd->devcom)) + return 0; + + primary = mlx5_sd_get_primary(dev); + + for (i = 0; i < ACCESS_KEY_LEN; i++) + alias_key[i] = get_random_u8(); + + err = sd_cmd_set_primary(primary, alias_key); + if (err) + goto err_sd_unregister; + + sd->dfs = debugfs_create_dir("multi-pf", mlx5_debugfs_get_dev_root(primary)); + debugfs_create_x32("group_id", 0400, sd->dfs, &sd->group_id); + debugfs_create_file("primary", 0400, sd->dfs, primary, &dev_fops); + + mlx5_sd_for_each_secondary(i, primary, pos) { + char name[32]; + + err = sd_cmd_set_secondary(pos, primary, alias_key); + if (err) + goto err_unset_secondaries; + + snprintf(name, sizeof(name), "secondary_%d", i - 1); + debugfs_create_file(name, 0400, sd->dfs, pos, &dev_fops); + + } + + sd_info(primary, "group id %#x, size %d, combined\n", + sd->group_id, mlx5_devcom_comp_get_size(sd->devcom)); + sd_print_group(primary); + + return 0; + +err_unset_secondaries: + to = pos; + mlx5_sd_for_each_secondary_to(i, primary, to, pos) + sd_cmd_unset_secondary(pos); + sd_cmd_unset_primary(primary); + debugfs_remove_recursive(sd->dfs); +err_sd_unregister: + sd_unregister(dev); +err_sd_cleanup: + sd_cleanup(dev); + return err; +} + +void mlx5_sd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + struct mlx5_core_dev *primary, *pos; + int i; + + if (!sd) + return; + + if (!mlx5_devcom_comp_is_ready(sd->devcom)) + goto out; + + primary = mlx5_sd_get_primary(dev); + mlx5_sd_for_each_secondary(i, primary, pos) + sd_cmd_unset_secondary(pos); + sd_cmd_unset_primary(primary); + debugfs_remove_recursive(sd->dfs); + + sd_info(primary, "group id %#x, uncombined\n", sd->group_id); +out: + sd_unregister(dev); + sd_cleanup(dev); +} + +struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev, + struct auxiliary_device *adev, + int idx) +{ + struct mlx5_sd *sd = mlx5_get_sd(dev); + struct mlx5_core_dev *primary; + + if (!sd) + return adev; + + if (!mlx5_devcom_comp_is_ready(sd->devcom)) + return NULL; + + primary = mlx5_sd_get_primary(dev); + if (dev == primary) + return adev; + + return &primary->priv.adev[idx]->adev; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h new file mode 100644 index 000000000000..137efaf9aabc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_LIB_SD_H__ +#define __MLX5_LIB_SD_H__ + +#define MLX5_SD_MAX_GROUP_SZ 2 + +struct mlx5_sd; + +struct mlx5_core_dev *mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx); +int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix); +int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix); +struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix); +struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev, + struct auxiliary_device *adev, + int idx); + +int mlx5_sd_init(struct mlx5_core_dev *dev); +void mlx5_sd_cleanup(struct mlx5_core_dev *dev); + +#define mlx5_sd_for_each_dev_from_to(i, primary, ix_from, to, pos) \ + for (i = ix_from; \ + (pos = mlx5_sd_primary_get_peer(primary, i)) && pos != (to); i++) + +#define mlx5_sd_for_each_dev(i, primary, pos) \ + mlx5_sd_for_each_dev_from_to(i, primary, 0, NULL, pos) + +#define mlx5_sd_for_each_dev_to(i, primary, to, pos) \ + mlx5_sd_for_each_dev_from_to(i, primary, 0, to, pos) + +#define mlx5_sd_for_each_secondary(i, primary, pos) \ + mlx5_sd_for_each_dev_from_to(i, primary, 1, NULL, pos) + +#define mlx5_sd_for_each_secondary_to(i, primary, to, pos) \ + mlx5_sd_for_each_dev_from_to(i, primary, 1, to, pos) + +#endif /* __MLX5_LIB_SD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c index 253d7ad9b809..8b63968bbee9 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c @@ -124,6 +124,41 @@ static void mlxbf_gige_get_pauseparam(struct net_device *netdev, pause->tx_pause = 1; } +static bool mlxbf_gige_llu_counters_enabled(struct mlxbf_gige *priv) +{ + u32 data; + + if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) { + data = readl(priv->llu_base + MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG); + if (data & MLXBF_GIGE_BF2_LLU_COUNTERS_EN) + return true; + } else { + data = readl(priv->llu_base + MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG); + if (data & MLXBF_GIGE_BF3_LLU_COUNTERS_EN) + return true; + } + + return false; +} + +static void mlxbf_gige_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *pause_stats) +{ + struct mlxbf_gige *priv = netdev_priv(netdev); + u64 data_lo, data_hi; + + /* Read LLU counters to provide stats only if counters are enabled */ + if (mlxbf_gige_llu_counters_enabled(priv)) { + data_lo = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_LO); + data_hi = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_HI); + pause_stats->tx_pause_frames = (data_hi << 32) | data_lo; + + data_lo = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_LO); + data_hi = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_HI); + pause_stats->rx_pause_frames = (data_hi << 32) | data_lo; + } +} + const struct ethtool_ops mlxbf_gige_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ringparam = mlxbf_gige_get_ringparam, @@ -134,6 +169,7 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = { .get_ethtool_stats = mlxbf_gige_get_ethtool_stats, .nway_reset = phy_ethtool_nway_reset, .get_pauseparam = mlxbf_gige_get_pauseparam, + .get_pause_stats = mlxbf_gige_get_pause_stats, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h index cd0973229c9b..98a8681c21b9 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h @@ -99,4 +99,34 @@ #define MLXBF_GIGE_100M_IPG_SIZE 119 #define MLXBF_GIGE_10M_IPG_SIZE 1199 +/* Offsets into OOB LLU block for pause frame counters */ +#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI 0x33d8 +#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO 0x33dc +#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI 0x3210 +#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO 0x3214 + +#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI 0x3a88 +#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO 0x3a8c +#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI 0x38c0 +#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO 0x38c4 + +#define MLXBF_GIGE_TX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \ + MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI : \ + MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI) +#define MLXBF_GIGE_TX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \ + MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO : \ + MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO) +#define MLXBF_GIGE_RX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \ + MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI : \ + MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI) +#define MLXBF_GIGE_RX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \ + MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO : \ + MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO) + +#define MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG 0x2110 +#define MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG 0x2030 + +#define MLXBF_GIGE_BF2_LLU_COUNTERS_EN BIT(0) +#define MLXBF_GIGE_BF3_LLU_COUNTERS_EN BIT(4) + #endif /* !defined(__MLXBF_GIGE_REGS_H__) */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c index 4af285918ea2..75868b3f548e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5, list) { if ((vid == 0 || mact_entry->vid == vid) && ether_addr_equal(addr, mact_entry->mac)) { + sparx5_mact_forget(sparx5, addr, mact_entry->vid); + list_del(&mact_entry->list); devm_kfree(sparx5->dev, mact_entry); - - sparx5_mact_forget(sparx5, addr, mact_entry->vid); } } mutex_unlock(&sparx5->mact_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 9ffef2e06885..2ccc2c2a06e3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, u8 status, int err); +bool ionic_notifyq_service(struct ionic_cq *cq); +bool ionic_adminq_service(struct ionic_cq *cq); int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 746072b4dbd0..874499337132 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -629,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, cq->desc_size = desc_size; cq->tail_idx = 0; cq->done_color = 1; + cq->idev = &lif->ionic->idev; return 0; } -void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) -{ - struct ionic_cq_info *cur; - unsigned int i; - - cq->base = base; - cq->base_pa = base_pa; - - for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) - cur->cq_desc = base + (i * cq->desc_size); -} - -void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) -{ - cq->bound_q = q; -} - unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, ionic_cq_cb cb, ionic_cq_done_cb done_cb, void *done_arg) { - struct ionic_cq_info *cq_info; unsigned int work_done = 0; if (work_to_do == 0) return 0; - cq_info = &cq->info[cq->tail_idx]; - while (cb(cq, cq_info)) { + while (cb(cq)) { if (cq->tail_idx == cq->num_descs - 1) cq->done_color = !cq->done_color; + cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); - cq_info = &cq->info[cq->tail_idx]; if (++work_done >= work_to_do) break; @@ -692,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, return -EINVAL; q->lif = lif; - q->idev = idev; q->index = index; q->num_descs = num_descs; q->desc_size = desc_size; @@ -706,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, return 0; } -void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell) { - struct ionic_desc_info *cur; - unsigned int i; - - q->base = base; - q->base_pa = base_pa; - - for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) - cur->desc = base + (i * q->desc_size); -} - -void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa) -{ - struct ionic_desc_info *cur; - unsigned int i; - - q->cmb_base = base; - q->cmb_base_pa = base_pa; - - for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) - cur->cmb_desc = base + (i * q->desc_size); -} - -void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) -{ - struct ionic_desc_info *cur; - unsigned int i; - - q->sg_base = base; - q->sg_base_pa = base_pa; - - for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) - cur->sg_desc = base + (i * q->sg_desc_size); -} - -void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, - void *cb_arg) -{ - struct ionic_desc_info *desc_info; struct ionic_lif *lif = q->lif; struct device *dev = q->dev; - desc_info = &q->info[q->head_idx]; - desc_info->cb = cb; - desc_info->cb_arg = cb_arg; - q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", @@ -771,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, } } -static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) +bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) { unsigned int mask, tail, head; @@ -781,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) return ((pos - tail) & mask) < ((head - tail) & mask); } - -void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, - unsigned int stop_index) -{ - struct ionic_desc_info *desc_info; - ionic_desc_cb cb; - void *cb_arg; - u16 index; - - /* check for empty queue */ - if (q->tail_idx == q->head_idx) - return; - - /* stop index must be for a descriptor that is not yet completed */ - if (unlikely(!ionic_q_is_posted(q, stop_index))) - dev_err(q->dev, - "ionic stop is not posted %s stop %u tail %u head %u\n", - q->name, stop_index, q->tail_idx, q->head_idx); - - do { - desc_info = &q->info[q->tail_idx]; - index = q->tail_idx; - q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - - cb = desc_info->cb; - cb_arg = desc_info->cb_arg; - - desc_info->cb = NULL; - desc_info->cb_arg = NULL; - - if (cb) - cb(q, desc_info, cq_info, cb_arg); - } while (index != stop_index); -} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 516db910e8e8..f30eee4a5a80 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -122,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64); /* I/O */ static_assert(sizeof(struct ionic_txq_desc) == 16); static_assert(sizeof(struct ionic_txq_sg_desc) == 128); +static_assert(sizeof(struct ionic_txq_sg_desc_v1) == 256); static_assert(sizeof(struct ionic_txq_comp) == 16); static_assert(sizeof(struct ionic_rxq_desc) == 16); static_assert(sizeof(struct ionic_rxq_sg_desc) == 128); static_assert(sizeof(struct ionic_rxq_comp) == 16); +static_assert(sizeof(struct ionic_rxq_comp) == sizeof(struct ionic_txq_comp)); /* SR/IOV */ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64); @@ -175,21 +177,8 @@ struct ionic_dev { struct ionic_devinfo dev_info; }; -struct ionic_cq_info { - union { - void *cq_desc; - struct ionic_admin_comp *admincq; - struct ionic_notifyq_event *notifyq; - }; -}; - struct ionic_queue; struct ionic_qcq; -struct ionic_desc_info; - -typedef void (*ionic_desc_cb)(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, void *cb_arg); #define IONIC_MAX_BUF_LEN ((u16)-1) #define IONIC_PAGE_SIZE PAGE_SIZE @@ -209,28 +198,25 @@ struct ionic_buf_info { u32 len; }; -#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) +#define IONIC_TX_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) +#define IONIC_RX_MAX_FRAGS (1 + IONIC_RX_MAX_SG_ELEMS) -struct ionic_desc_info { - union { - void *desc; - struct ionic_txq_desc *txq_desc; - struct ionic_rxq_desc *rxq_desc; - struct ionic_admin_cmd *adminq_desc; - }; - void __iomem *cmb_desc; - union { - void *sg_desc; - struct ionic_txq_sg_desc *txq_sg_desc; - struct ionic_rxq_sg_desc *rxq_sgl_desc; - }; +struct ionic_tx_desc_info { unsigned int bytes; unsigned int nbufs; - struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; - ionic_desc_cb cb; - void *cb_arg; + struct sk_buff *skb; struct xdp_frame *xdpf; enum xdp_action act; + struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; +}; + +struct ionic_rx_desc_info { + unsigned int nbufs; + struct ionic_buf_info bufs[IONIC_RX_MAX_FRAGS]; +}; + +struct ionic_admin_desc_info { + void *ctx; }; #define IONIC_QUEUE_NAME_MAX_SZ 16 @@ -238,7 +224,12 @@ struct ionic_desc_info { struct ionic_queue { struct device *dev; struct ionic_lif *lif; - struct ionic_desc_info *info; + union { + void *info; + struct ionic_tx_desc_info *tx_info; + struct ionic_rx_desc_info *rx_info; + struct ionic_admin_desc_info *admin_info; + }; u64 dbval; unsigned long dbell_deadline; unsigned long dbell_jiffies; @@ -248,29 +239,33 @@ struct ionic_queue { unsigned int num_descs; unsigned int max_sg_elems; u64 features; - u64 drop; - struct ionic_dev *idev; unsigned int type; unsigned int hw_index; unsigned int hw_type; + bool xdp_flush; union { void *base; struct ionic_txq_desc *txq; struct ionic_rxq_desc *rxq; struct ionic_admin_cmd *adminq; }; - void __iomem *cmb_base; + union { + void __iomem *cmb_base; + struct ionic_txq_desc __iomem *cmb_txq; + struct ionic_rxq_desc __iomem *cmb_rxq; + }; union { void *sg_base; struct ionic_txq_sg_desc *txq_sgl; + struct ionic_txq_sg_desc_v1 *txq_sgl_v1; struct ionic_rxq_sg_desc *rxq_sgl; }; struct xdp_rxq_info *xdp_rxq_info; struct ionic_queue *partner; - bool xdp_flush; dma_addr_t base_pa; dma_addr_t cmb_base_pa; dma_addr_t sg_base_pa; + u64 drop; unsigned int desc_size; unsigned int sg_desc_size; unsigned int pid; @@ -292,7 +287,6 @@ struct ionic_intr_info { struct ionic_cq { struct ionic_lif *lif; - struct ionic_cq_info *info; struct ionic_queue *bound_q; struct ionic_intr_info *bound_intr; u16 tail_idx; @@ -301,6 +295,7 @@ struct ionic_cq { unsigned int desc_size; void *base; dma_addr_t base_pa; + struct ionic_dev *idev; } ____cacheline_aligned_in_smp; struct ionic; @@ -375,7 +370,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, unsigned int num_descs, size_t desc_size); void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); -typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq); typedef void (*ionic_cq_done_cb)(void *done_arg); unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, ionic_cq_cb cb, ionic_cq_done_cb done_cb, @@ -386,13 +381,9 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, struct ionic_queue *q, unsigned int index, const char *name, unsigned int num_descs, size_t desc_size, size_t sg_desc_size, unsigned int pid); -void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); -void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa); -void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); -void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, - void *cb_arg); -void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, - unsigned int stop_index); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell); +bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos); + int ionic_heartbeat_check(struct ionic *ionic); bool ionic_is_fw_running(struct ionic_dev *idev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 33b1691a4ee5..7f0c6cdc375e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -433,8 +433,6 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) ionic_xdp_unregister_rxq_info(&qcq->q); ionic_qcq_intr_free(lif, qcq); - vfree(qcq->cq.info); - qcq->cq.info = NULL; vfree(qcq->q.info); qcq->q.info = NULL; } @@ -538,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, unsigned int num_descs, unsigned int desc_size, unsigned int cq_desc_size, unsigned int sg_desc_size, + unsigned int desc_info_size, unsigned int pid, struct ionic_qcq **qcq) { struct ionic_dev *idev = &lif->ionic->idev; struct device *dev = lif->ionic->dev; - void *q_base, *cq_base, *sg_base; - dma_addr_t cq_base_pa = 0; - dma_addr_t sg_base_pa = 0; - dma_addr_t q_base_pa = 0; struct ionic_qcq *new; int err; @@ -561,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, new->q.dev = dev; new->flags = flags; - new->q.info = vcalloc(num_descs, sizeof(*new->q.info)); + new->q.info = vcalloc(num_descs, desc_info_size); if (!new->q.info) { netdev_err(lif->netdev, "Cannot allocate queue info\n"); err = -ENOMEM; @@ -580,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, err = ionic_alloc_qcq_interrupt(lif, new); if (err) - goto err_out; - - new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info)); - if (!new->cq.info) { - netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); - err = -ENOMEM; - goto err_out_free_irq; - } + goto err_out_free_q_info; err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); if (err) { netdev_err(lif->netdev, "Cannot initialize completion queue\n"); - goto err_out_free_cq_info; + goto err_out_free_irq; } if (flags & IONIC_QCQ_F_NOTIFYQ) { @@ -610,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, if (!new->q_base) { netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); err = -ENOMEM; - goto err_out_free_cq_info; + goto err_out_free_irq; } - q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); - q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); - ionic_q_map(&new->q, q_base, q_base_pa); - - cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); - cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); - ionic_cq_map(&new->cq, cq_base, cq_base_pa); - ionic_cq_bind(&new->cq, &new->q); + new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); + new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); + + /* Base the NotifyQ cq.base off of the ALIGNed q.base */ + new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE); + new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); + new->cq.bound_q = &new->q; } else { /* regular DMA q descriptors */ new->q_size = PAGE_SIZE + (num_descs * desc_size); @@ -628,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, if (!new->q_base) { netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); err = -ENOMEM; - goto err_out_free_cq_info; + goto err_out_free_irq; } - q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); - q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); - ionic_q_map(&new->q, q_base, q_base_pa); + new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE); + new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); if (flags & IONIC_QCQ_F_CMB_RINGS) { /* on-chip CMB q descriptors */ @@ -657,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, } new->cmb_q_base_pa -= idev->phy_cmb_pages; - ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa); + new->q.cmb_base = new->cmb_q_base; + new->q.cmb_base_pa = new->cmb_q_base_pa; } /* cq DMA descriptors */ @@ -669,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, err = -ENOMEM; goto err_out_free_q; } - cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); - cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); - ionic_cq_map(&new->cq, cq_base, cq_base_pa); - ionic_cq_bind(&new->cq, &new->q); + new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE); + new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); + new->cq.bound_q = &new->q; } if (flags & IONIC_QCQ_F_SG) { @@ -684,9 +670,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, err = -ENOMEM; goto err_out_free_cq; } - sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); - sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); - ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); + new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); } INIT_WORK(&new->dim.work, ionic_dim_work); @@ -704,8 +689,6 @@ err_out_free_q: ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); } dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); -err_out_free_cq_info: - vfree(new->cq.info); err_out_free_irq: if (flags & IONIC_QCQ_F_INTR) { devm_free_irq(dev, new->intr.vector, &new->napi); @@ -731,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) IONIC_ADMINQ_LENGTH, sizeof(struct ionic_admin_cmd), sizeof(struct ionic_admin_comp), - 0, lif->kern_pid, &lif->adminqcq); + 0, + sizeof(struct ionic_admin_desc_info), + lif->kern_pid, &lif->adminqcq); if (err) return err; ionic_debugfs_add_qcq(lif, lif->adminqcq); @@ -742,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) flags, IONIC_NOTIFYQ_LENGTH, sizeof(struct ionic_notifyq_cmd), sizeof(union ionic_notifyq_comp), - 0, lif->kern_pid, &lif->notifyqcq); + 0, + sizeof(struct ionic_admin_desc_info), + lif->kern_pid, &lif->notifyqcq); if (err) goto err_out; ionic_debugfs_add_qcq(lif, lif->notifyqcq); @@ -960,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_tx_desc_info), lif->kern_pid, &txq); if (err) goto err_qcq_alloc; @@ -1019,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_rx_desc_info), lif->kern_pid, &rxq); if (err) goto err_qcq_alloc; @@ -1172,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); } -static bool ionic_notifyq_service(struct ionic_cq *cq, - struct ionic_cq_info *cq_info) -{ - union ionic_notifyq_comp *comp = cq_info->cq_desc; - struct ionic_deferred_work *work; - struct net_device *netdev; - struct ionic_queue *q; - struct ionic_lif *lif; - u64 eid; - - q = cq->bound_q; - lif = q->info[0].cb_arg; - netdev = lif->netdev; - eid = le64_to_cpu(comp->event.eid); - - /* Have we run out of new completions to process? */ - if ((s64)(eid - lif->last_eid) <= 0) - return false; - - lif->last_eid = eid; - - dev_dbg(lif->ionic->dev, "notifyq event:\n"); - dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, - comp, sizeof(*comp), true); - - switch (le16_to_cpu(comp->event.ecode)) { - case IONIC_EVENT_LINK_CHANGE: - ionic_link_status_check_request(lif, CAN_NOT_SLEEP); - break; - case IONIC_EVENT_RESET: - if (lif->ionic->idev.fw_status_ready && - !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && - !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "Reset event dropped\n"); - clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); - } else { - work->type = IONIC_DW_TYPE_LIF_RESET; - ionic_lif_deferred_enqueue(&lif->deferred, work); - } - } - break; - default: - netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", - comp->event.ecode, eid); - break; - } - - return true; -} - -static bool ionic_adminq_service(struct ionic_cq *cq, - struct ionic_cq_info *cq_info) -{ - struct ionic_admin_comp *comp = cq_info->cq_desc; - - if (!color_match(comp->color, cq->done_color)) - return false; - - ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); - - return true; -} - static int ionic_adminq_napi(struct napi_struct *napi, int budget) { struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; @@ -2110,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_tx_desc_info), lif->kern_pid, &lif->txqcqs[i]); if (err) goto err_out; @@ -2141,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_rx_desc_info), lif->kern_pid, &lif->rxqcqs[i]); if (err) goto err_out; @@ -2958,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) swap(a->cq.desc_size, b->cq.desc_size); swap(a->cq.base, b->cq.base); swap(a->cq.base_pa, b->cq.base_pa); - swap(a->cq.info, b->cq.info); swap(a->cq_base, b->cq_base); swap(a->cq_base_pa, b->cq_base_pa); swap(a->cq_size, b->cq_size); @@ -3022,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 4, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_tx_desc_info), lif->kern_pid, &lif->txqcqs[i]); if (err) goto err_out; @@ -3030,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_tx_desc_info), lif->kern_pid, &tx_qcqs[i]); if (err) goto err_out; @@ -3051,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 4, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_rx_desc_info), lif->kern_pid, &lif->rxqcqs[i]); if (err) goto err_out; @@ -3059,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, num_desc, desc_sz, comp_sz, sg_desc_sz, + sizeof(struct ionic_rx_desc_info), lif->kern_pid, &rx_qcqs[i]); if (err) goto err_out; @@ -3633,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif) dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); /* preset the callback info */ - q->info[0].cb_arg = lif; + q->admin_info[0].ctx = lif; qcq->flags |= IONIC_QCQ_F_INITED; @@ -3885,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) union ionic_q_identity __iomem *q_ident; struct ionic *ionic = lif->ionic; struct ionic_dev *idev; + u16 max_frags; int qtype; int err; @@ -3952,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", qtype, qti->sg_desc_stride); - if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { - qti->max_sg_elems = IONIC_MAX_FRAGS - 1; - dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", - qtype, qti->max_sg_elems); - } + if (qtype == IONIC_QTYPE_TXQ) + max_frags = IONIC_TX_MAX_FRAGS; + else if (qtype == IONIC_QTYPE_RXQ) + max_frags = IONIC_RX_MAX_FRAGS; + else + max_frags = 1; - if (qti->max_sg_elems > MAX_SKB_FRAGS) { - qti->max_sg_elems = MAX_SKB_FRAGS; - dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", - qtype, qti->max_sg_elems); - } + qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS); + dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n", + qtype, qti->max_sg_elems); } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index b4f8692a3ead..08f4266fe2aa 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -71,25 +71,25 @@ struct ionic_qcq { void *q_base; dma_addr_t q_base_pa; u32 q_size; + u32 cq_size; void *cq_base; dma_addr_t cq_base_pa; - u32 cq_size; void *sg_base; dma_addr_t sg_base_pa; u32 sg_size; + unsigned int flags; void __iomem *cmb_q_base; phys_addr_t cmb_q_base_pa; u32 cmb_q_size; u32 cmb_pgid; u32 cmb_order; struct dim dim; + struct timer_list napi_deadline; struct ionic_queue q; struct ionic_cq cq; - struct ionic_intr_info intr; - struct timer_list napi_deadline; struct napi_struct napi; - unsigned int flags; struct ionic_qcq *napi_qcq; + struct ionic_intr_info intr; struct dentry *dentry; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 29b4d039bbce..c1259324b0be 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) static void ionic_adminq_flush(struct ionic_lif *lif) { - struct ionic_desc_info *desc_info; + struct ionic_admin_desc_info *desc_info; + struct ionic_admin_cmd *desc; unsigned long irqflags; struct ionic_queue *q; @@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif) q = &lif->adminqcq->q; while (q->tail_idx != q->head_idx) { - desc_info = &q->info[q->tail_idx]; - memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd)); - desc_info->cb = NULL; - desc_info->cb_arg = NULL; + desc = &q->adminq[q->tail_idx]; + desc_info = &q->admin_info[q->tail_idx]; + memset(desc, 0, sizeof(union ionic_adminq_cmd)); + desc_info->ctx = NULL; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); } spin_unlock_irqrestore(&lif->adminq_lock, irqflags); @@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif, return err; } -static void ionic_adminq_cb(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, void *cb_arg) +bool ionic_notifyq_service(struct ionic_cq *cq) { - struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_deferred_work *work; + union ionic_notifyq_comp *comp; + struct net_device *netdev; + struct ionic_queue *q; + struct ionic_lif *lif; + u64 eid; + + comp = &((union ionic_notifyq_comp *)cq->base)[cq->tail_idx]; + + q = cq->bound_q; + lif = q->admin_info[0].ctx; + netdev = lif->netdev; + eid = le64_to_cpu(comp->event.eid); + + /* Have we run out of new completions to process? */ + if ((s64)(eid - lif->last_eid) <= 0) + return false; + + lif->last_eid = eid; + + dev_dbg(lif->ionic->dev, "notifyq event:\n"); + dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, + comp, sizeof(*comp), true); + + switch (le16_to_cpu(comp->event.ecode)) { + case IONIC_EVENT_LINK_CHANGE: + ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + break; + case IONIC_EVENT_RESET: + if (lif->ionic->idev.fw_status_ready && + !test_bit(IONIC_LIF_F_FW_RESET, lif->state) && + !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "Reset event dropped\n"); + clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); + } else { + work->type = IONIC_DW_TYPE_LIF_RESET; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } + } + break; + default: + netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", + comp->event.ecode, eid); + break; + } + + return true; +} + +bool ionic_adminq_service(struct ionic_cq *cq) +{ + struct ionic_admin_desc_info *desc_info; + struct ionic_queue *q = cq->bound_q; struct ionic_admin_comp *comp; + u16 index; - if (!ctx) - return; + comp = &((struct ionic_admin_comp *)cq->base)[cq->tail_idx]; + + if (!color_match(comp->color, cq->done_color)) + return false; + + /* check for empty queue */ + if (q->tail_idx == q->head_idx) + return false; - comp = cq_info->cq_desc; + do { + desc_info = &q->admin_info[q->tail_idx]; + index = q->tail_idx; + q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); + if (likely(desc_info->ctx)) { + struct ionic_admin_ctx *ctx = desc_info->ctx; - memcpy(&ctx->comp, comp, sizeof(*comp)); + memcpy(&ctx->comp, comp, sizeof(*comp)); - dev_dbg(q->dev, "comp admin queue command:\n"); - dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, - &ctx->comp, sizeof(ctx->comp), true); + dev_dbg(q->dev, "comp admin queue command:\n"); + dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->comp, sizeof(ctx->comp), true); + complete_all(&ctx->work); + desc_info->ctx = NULL; + } + } while (index != le16_to_cpu(comp->comp_index)); - complete_all(&ctx->work); + return true; } bool ionic_adminq_poke_doorbell(struct ionic_queue *q) @@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q) int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) { - struct ionic_desc_info *desc_info; + struct ionic_admin_desc_info *desc_info; + struct ionic_admin_cmd *desc; unsigned long irqflags; struct ionic_queue *q; int err = 0; @@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) if (err) goto err_out; - desc_info = &q->info[q->head_idx]; - memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd)); + desc_info = &q->admin_info[q->head_idx]; + desc_info->ctx = ctx; + + desc = &q->adminq[q->head_idx]; + memcpy(desc, &ctx->cmd, sizeof(ctx->cmd)); dev_dbg(&lif->netdev->dev, "post admin queue command:\n"); dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, &ctx->cmd, sizeof(ctx->cmd), true); - ionic_q_post(q, true, ionic_adminq_cb, ctx); + ionic_q_post(q, true); err_out: spin_unlock_irqrestore(&lif->adminq_lock, irqflags); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 6d168ad8c84f..5dba6d2d633c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -19,23 +19,20 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, size_t offset, size_t len); static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, - struct ionic_desc_info *desc_info); + struct ionic_tx_desc_info *desc_info); static void ionic_tx_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, - void *cb_arg); + struct ionic_tx_desc_info *desc_info, + struct ionic_txq_comp *comp); -static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, - ionic_desc_cb cb_func, void *cb_arg) +static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) { - ionic_q_post(q, ring_dbell, cb_func, cb_arg); + ionic_q_post(q, ring_dbell); } -static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, - ionic_desc_cb cb_func, void *cb_arg) +static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell) { - ionic_q_post(q, ring_dbell, cb_func, cb_arg); + ionic_q_post(q, ring_dbell); } bool ionic_txq_poke_doorbell(struct ionic_queue *q) @@ -99,6 +96,14 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q) return true; } +static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q) +{ + if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1))) + return q->txq_sgl_v1[q->head_idx].elems; + else + return q->txq_sgl[q->head_idx].elems; +} + static inline struct netdev_queue *q_to_ndq(struct net_device *netdev, struct ionic_queue *q) { @@ -123,37 +128,29 @@ static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) static int ionic_rx_page_alloc(struct ionic_queue *q, struct ionic_buf_info *buf_info) { - struct ionic_rx_stats *stats; - struct device *dev; + struct device *dev = q->dev; + dma_addr_t dma_addr; struct page *page; - dev = q->dev; - stats = q_to_rx_stats(q); - - if (unlikely(!buf_info)) { - net_err_ratelimited("%s: %s invalid buf_info in alloc\n", - dev_name(dev), q->name); - return -EINVAL; - } - page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); if (unlikely(!page)) { net_err_ratelimited("%s: %s page alloc failed\n", dev_name(dev), q->name); - stats->alloc_err++; + q_to_rx_stats(q)->alloc_err++; return -ENOMEM; } - buf_info->dma_addr = dma_map_page(dev, page, 0, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { + dma_addr = dma_map_page(dev, page, 0, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) { __free_pages(page, 0); net_err_ratelimited("%s: %s dma map failed\n", dev_name(dev), q->name); - stats->dma_map_err++; + q_to_rx_stats(q)->dma_map_err++; return -EIO; } + buf_info->dma_addr = dma_addr; buf_info->page = page; buf_info->page_offset = 0; @@ -180,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q, } static bool ionic_rx_buf_recycle(struct ionic_queue *q, - struct ionic_buf_info *buf_info, u32 used) + struct ionic_buf_info *buf_info, u32 len) { u32 size; @@ -192,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, if (page_to_nid(buf_info->page) != numa_mem_id()) return false; - size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); + size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); buf_info->page_offset += size; if (buf_info->page_offset >= IONIC_PAGE_SIZE) return false; @@ -202,95 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, return true; } -static struct sk_buff *ionic_rx_frags(struct net_device *netdev, - struct ionic_queue *q, - struct ionic_desc_info *desc_info, - unsigned int headroom, - unsigned int len, - unsigned int num_sg_elems, - bool synced) +static void ionic_rx_add_skb_frag(struct ionic_queue *q, + struct sk_buff *skb, + struct ionic_buf_info *buf_info, + u32 off, u32 len, + bool synced) +{ + if (!synced) + dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info), + off, len, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + buf_info->page, buf_info->page_offset + off, + len, + IONIC_PAGE_SIZE); + + if (!ionic_rx_buf_recycle(q, buf_info, len)) { + dma_unmap_page(q->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + buf_info->page = NULL; + } +} + +static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, + struct ionic_rx_desc_info *desc_info, + unsigned int headroom, + unsigned int len, + unsigned int num_sg_elems, + bool synced) { struct ionic_buf_info *buf_info; - struct ionic_rx_stats *stats; - struct device *dev = q->dev; struct sk_buff *skb; unsigned int i; u16 frag_len; - stats = q_to_rx_stats(q); - buf_info = &desc_info->bufs[0]; - prefetchw(buf_info->page); skb = napi_get_frags(&q_to_qcq(q)->napi); if (unlikely(!skb)) { net_warn_ratelimited("%s: SKB alloc failed on %s!\n", - dev_name(dev), q->name); - stats->alloc_err++; + dev_name(q->dev), q->name); + q_to_rx_stats(q)->alloc_err++; return NULL; } - i = num_sg_elems + 1; - do { - if (unlikely(!buf_info->page)) { - dev_kfree_skb(skb); - return NULL; - } - - if (headroom) - frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); - else - frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); - len -= frag_len; - - if (!synced) - dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), - headroom, frag_len, DMA_FROM_DEVICE); - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf_info->page, buf_info->page_offset + headroom, - frag_len, IONIC_PAGE_SIZE); - - if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { - dma_unmap_page(dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - buf_info->page = NULL; - } - - /* only needed on the first buffer */ - if (headroom) - headroom = 0; + if (headroom) + frag_len = min_t(u16, len, + IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); + else + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); - buf_info++; + if (unlikely(!buf_info->page)) + goto err_bad_buf_page; + ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced); + len -= frag_len; + buf_info++; - i--; - } while (i > 0); + for (i = 0; i < num_sg_elems; i++, buf_info++) { + if (unlikely(!buf_info->page)) + goto err_bad_buf_page; + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); + ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced); + len -= frag_len; + } return skb; + +err_bad_buf_page: + dev_kfree_skb(skb); + return NULL; } static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, struct ionic_queue *q, - struct ionic_desc_info *desc_info, + struct ionic_rx_desc_info *desc_info, unsigned int headroom, unsigned int len, bool synced) { struct ionic_buf_info *buf_info; - struct ionic_rx_stats *stats; struct device *dev = q->dev; struct sk_buff *skb; - stats = q_to_rx_stats(q); - buf_info = &desc_info->bufs[0]; skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); if (unlikely(!skb)) { net_warn_ratelimited("%s: SKB alloc failed on %s!\n", dev_name(dev), q->name); - stats->alloc_err++; + q_to_rx_stats(q)->alloc_err++; return NULL; } @@ -313,7 +311,7 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, } static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { unsigned int nbufs = desc_info->nbufs; struct ionic_buf_info *buf_info; @@ -351,7 +349,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, enum xdp_action act, struct page *page, int off, bool ring_doorbell) { - struct ionic_desc_info *desc_info; + struct ionic_tx_desc_info *desc_info; struct ionic_buf_info *buf_info; struct ionic_tx_stats *stats; struct ionic_txq_desc *desc; @@ -359,16 +357,14 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, dma_addr_t dma_addr; u64 cmd; - desc_info = &q->info[q->head_idx]; - desc = desc_info->txq_desc; + desc_info = &q->tx_info[q->head_idx]; + desc = &q->txq[q->head_idx]; buf_info = desc_info->bufs; stats = q_to_tx_stats(q); dma_addr = ionic_tx_map_single(q, frame->data, len); - if (dma_mapping_error(q->dev, dma_addr)) { - stats->dma_map_err++; + if (!dma_addr) return -EIO; - } buf_info->dma_addr = dma_addr; buf_info->len = len; buf_info->page = page; @@ -388,11 +384,10 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, bi = &buf_info[1]; sinfo = xdp_get_shared_info_from_frame(frame); frag = sinfo->frags; - elem = desc_info->txq_sg_desc->elems; + elem = ionic_tx_sg_elems(q); for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) { dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (dma_mapping_error(q->dev, dma_addr)) { - stats->dma_map_err++; + if (!dma_addr) { ionic_tx_desc_unmap_bufs(q, desc_info); return -EIO; } @@ -419,7 +414,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, stats->pkts++; stats->bytes += len; - ionic_txq_post(q, ring_doorbell, ionic_tx_clean, NULL); + ionic_txq_post(q, ring_doorbell); return 0; } @@ -627,21 +622,19 @@ out_xdp_abort: } static void ionic_rx_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, - void *cb_arg) + struct ionic_rx_desc_info *desc_info, + struct ionic_rxq_comp *comp) { struct net_device *netdev = q->lif->netdev; struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; - struct ionic_rxq_comp *comp; struct bpf_prog *xdp_prog; unsigned int headroom; struct sk_buff *skb; + bool synced = false; + bool use_copybreak; u16 len; - comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); - stats = q_to_rx_stats(q); if (comp->status) { @@ -654,17 +647,20 @@ static void ionic_rx_clean(struct ionic_queue *q, stats->bytes += len; xdp_prog = READ_ONCE(q->lif->xdp_prog); - if (xdp_prog && - ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) - return; + if (xdp_prog) { + if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) + return; + synced = true; + } headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; - if (len <= q->lif->rx_copybreak) + use_copybreak = len <= q->lif->rx_copybreak; + if (use_copybreak) skb = ionic_rx_copybreak(netdev, q, desc_info, - headroom, len, !!xdp_prog); + headroom, len, synced); else - skb = ionic_rx_frags(netdev, q, desc_info, headroom, len, - comp->num_sg_elems, !!xdp_prog); + skb = ionic_rx_build_skb(q, desc_info, headroom, len, + comp->num_sg_elems, synced); if (unlikely(!skb)) { stats->dropped++; @@ -716,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q, u64 hwstamp; cq_desc_hwstamp = - cq_info->cq_desc + + (void *)comp + qcq->cq.desc_size - sizeof(struct ionic_rxq_comp) - IONIC_HWSTAMP_CQ_NEGOFFSET; @@ -731,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q, } } - if (len <= q->lif->rx_copybreak) + if (use_copybreak) napi_gro_receive(&qcq->napi, skb); else napi_gro_frags(&qcq->napi); } -bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +bool ionic_rx_service(struct ionic_cq *cq) { + struct ionic_rx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; - struct ionic_desc_info *desc_info; struct ionic_rxq_comp *comp; - comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); + comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx]; if (!color_match(comp->pkt_type_color, cq->done_color)) return false; @@ -755,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) if (q->tail_idx != le16_to_cpu(comp->comp_index)) return false; - desc_info = &q->info[q->tail_idx]; + desc_info = &q->rx_info[q->tail_idx]; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); /* clean the related q entry, only one per qc completion */ - ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); - - desc_info->cb = NULL; - desc_info->cb_arg = NULL; + ionic_rx_clean(q, desc_info, comp); return true; } static inline void ionic_write_cmb_desc(struct ionic_queue *q, - void __iomem *cmb_desc, void *desc) { - if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) - memcpy_toio(cmb_desc, desc, q->desc_size); + /* Since Rx and Tx descriptors are the same size, we can + * save an instruction or two and skip the qtype check. + */ + if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)) + memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0])); } void ionic_rx_fill(struct ionic_queue *q) { struct net_device *netdev = q->lif->netdev; - struct ionic_desc_info *desc_info; - struct ionic_rxq_sg_desc *sg_desc; + struct ionic_rx_desc_info *desc_info; struct ionic_rxq_sg_elem *sg_elem; struct ionic_buf_info *buf_info; unsigned int fill_threshold; @@ -807,8 +801,8 @@ void ionic_rx_fill(struct ionic_queue *q) nfrags = 0; remain_len = len; - desc_info = &q->info[q->head_idx]; - desc = desc_info->desc; + desc = &q->rxq[q->head_idx]; + desc_info = &q->rx_info[q->head_idx]; buf_info = &desc_info->bufs[0]; if (!buf_info->page) { /* alloc a new buffer? */ @@ -837,9 +831,8 @@ void ionic_rx_fill(struct ionic_queue *q) nfrags++; /* fill sg descriptors - buf[1..n] */ - sg_desc = desc_info->sg_desc; - for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { - sg_elem = &sg_desc->elems[j]; + sg_elem = q->rxq_sgl[q->head_idx].elems; + for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) { if (!buf_info->page) { /* alloc a new sg buffer? */ if (unlikely(ionic_rx_page_alloc(q, buf_info))) { sg_elem->addr = 0; @@ -857,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q) } /* clear end sg element as a sentinel */ - if (j < q->max_sg_elems) { - sg_elem = &sg_desc->elems[j]; + if (j < q->max_sg_elems) memset(sg_elem, 0, sizeof(*sg_elem)); - } desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; desc_info->nbufs = nfrags; - ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); + ionic_write_cmb_desc(q, desc); - ionic_rxq_post(q, false, ionic_rx_clean, NULL); + ionic_rxq_post(q, false); } ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, @@ -883,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q) { - struct ionic_desc_info *desc_info; + struct ionic_rx_desc_info *desc_info; struct ionic_buf_info *buf_info; unsigned int i, j; for (i = 0; i < q->num_descs; i++) { - desc_info = &q->info[i]; - for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { + desc_info = &q->rx_info[i]; + for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) { buf_info = &desc_info->bufs[j]; if (buf_info->page) ionic_rx_page_free(q, buf_info); } desc_info->nbufs = 0; - desc_info->cb = NULL; - desc_info->cb_arg = NULL; } q->head_idx = 0; @@ -942,14 +931,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_cq *cq = napi_to_cq(napi); - struct ionic_dev *idev; - struct ionic_lif *lif; u32 work_done = 0; u32 flags = 0; - lif = cq->bound_q->lif; - idev = &lif->ionic->idev; - work_done = ionic_tx_cq_service(cq, budget); if (unlikely(!budget)) @@ -963,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; - ionic_intr_credits(idev->intr_ctrl, + ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index, work_done, flags); } @@ -986,17 +970,12 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_cq *cq = napi_to_cq(napi); - struct ionic_dev *idev; - struct ionic_lif *lif; u32 work_done = 0; u32 flags = 0; if (unlikely(!budget)) return budget; - lif = cq->bound_q->lif; - idev = &lif->ionic->idev; - work_done = ionic_cq_service(cq, budget, ionic_rx_service, NULL, NULL); @@ -1011,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; - ionic_intr_credits(idev->intr_ctrl, + ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index, work_done, flags); } @@ -1028,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) struct ionic_cq *rxcq = napi_to_cq(napi); unsigned int qi = rxcq->bound_q->index; struct ionic_qcq *txqcq; - struct ionic_dev *idev; struct ionic_lif *lif; struct ionic_cq *txcq; bool resched = false; @@ -1037,7 +1015,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) u32 flags = 0; lif = rxcq->bound_q->lif; - idev = &lif->ionic->idev; txqcq = lif->txqcqs[qi]; txcq = &lif->txqcqs[qi]->cq; @@ -1060,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) if (rx_work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; - ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, + ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index, tx_work_done + rx_work_done, flags); } @@ -1077,7 +1054,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) { - struct ionic_tx_stats *stats = q_to_tx_stats(q); struct device *dev = q->dev; dma_addr_t dma_addr; @@ -1085,7 +1061,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, if (dma_mapping_error(dev, dma_addr)) { net_warn_ratelimited("%s: DMA single map failed on %s!\n", dev_name(dev), q->name); - stats->dma_map_err++; + q_to_tx_stats(q)->dma_map_err++; return 0; } return dma_addr; @@ -1095,7 +1071,6 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, size_t offset, size_t len) { - struct ionic_tx_stats *stats = q_to_tx_stats(q); struct device *dev = q->dev; dma_addr_t dma_addr; @@ -1103,16 +1078,16 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, if (dma_mapping_error(dev, dma_addr)) { net_warn_ratelimited("%s: DMA frag map failed on %s!\n", dev_name(dev), q->name); - stats->dma_map_err++; + q_to_tx_stats(q)->dma_map_err++; + return 0; } return dma_addr; } static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { struct ionic_buf_info *buf_info = desc_info->bufs; - struct ionic_tx_stats *stats = q_to_tx_stats(q); struct device *dev = q->dev; dma_addr_t dma_addr; unsigned int nfrags; @@ -1120,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, int frag_idx; dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); - if (dma_mapping_error(dev, dma_addr)) { - stats->dma_map_err++; + if (!dma_addr) return -EIO; - } buf_info->dma_addr = dma_addr; buf_info->len = skb_headlen(skb); buf_info++; @@ -1132,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, nfrags = skb_shinfo(skb)->nr_frags; for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); - if (dma_mapping_error(dev, dma_addr)) { - stats->dma_map_err++; + if (!dma_addr) goto dma_fail; - } buf_info->dma_addr = dma_addr; buf_info->len = skb_frag_size(frag); buf_info++; @@ -1153,12 +1124,13 @@ dma_fail: dma_unmap_page(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); } - dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); + dma_unmap_single(dev, desc_info->bufs[0].dma_addr, + desc_info->bufs[0].len, DMA_TO_DEVICE); return -EIO; } static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { struct ionic_buf_info *buf_info = desc_info->bufs; struct device *dev = q->dev; @@ -1167,24 +1139,23 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, if (!desc_info->nbufs) return; - dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, + dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); buf_info++; for (i = 1; i < desc_info->nbufs; i++, buf_info++) - dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, + dma_unmap_page(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); desc_info->nbufs = 0; } static void ionic_tx_clean(struct ionic_queue *q, - struct ionic_desc_info *desc_info, - struct ionic_cq_info *cq_info, - void *cb_arg) + struct ionic_tx_desc_info *desc_info, + struct ionic_txq_comp *comp) { struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_qcq *qcq = q_to_qcq(q); - struct sk_buff *skb = cb_arg; + struct sk_buff *skb; if (desc_info->xdpf) { ionic_xdp_tx_desc_clean(q->partner, desc_info); @@ -1198,17 +1169,18 @@ static void ionic_tx_clean(struct ionic_queue *q, ionic_tx_desc_unmap_bufs(q, desc_info); + skb = desc_info->skb; if (!skb) return; if (unlikely(ionic_txq_hwstamp_enabled(q))) { - if (cq_info) { + if (comp) { struct skb_shared_hwtstamps hwts = {}; __le64 *cq_desc_hwstamp; u64 hwstamp; cq_desc_hwstamp = - cq_info->cq_desc + + (void *)comp + qcq->cq.desc_size - sizeof(struct ionic_txq_comp) - IONIC_HWSTAMP_CQ_NEGOFFSET; @@ -1234,17 +1206,17 @@ static void ionic_tx_clean(struct ionic_queue *q, napi_consume_skb(skb, 1); } -static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, +static bool ionic_tx_service(struct ionic_cq *cq, unsigned int *total_pkts, unsigned int *total_bytes) { + struct ionic_tx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; - struct ionic_desc_info *desc_info; struct ionic_txq_comp *comp; unsigned int bytes = 0; unsigned int pkts = 0; u16 index; - comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); + comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx]; if (!color_match(comp->color, cq->done_color)) return false; @@ -1253,17 +1225,16 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, * several q entries completed for each cq completion */ do { - desc_info = &q->info[q->tail_idx]; + desc_info = &q->tx_info[q->tail_idx]; desc_info->bytes = 0; index = q->tail_idx; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); - if (desc_info->cb_arg) { + ionic_tx_clean(q, desc_info, comp); + if (desc_info->skb) { pkts++; bytes += desc_info->bytes; + desc_info->skb = NULL; } - desc_info->cb = NULL; - desc_info->cb_arg = NULL; } while (index != le16_to_cpu(comp->comp_index)); (*total_pkts) += pkts; @@ -1274,7 +1245,6 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) { - struct ionic_cq_info *cq_info; unsigned int work_done = 0; unsigned int bytes = 0; unsigned int pkts = 0; @@ -1282,12 +1252,10 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) if (work_to_do == 0) return 0; - cq_info = &cq->info[cq->tail_idx]; - while (ionic_tx_service(cq, cq_info, &pkts, &bytes)) { + while (ionic_tx_service(cq, &pkts, &bytes)) { if (cq->tail_idx == cq->num_descs - 1) cq->done_color = !cq->done_color; cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); - cq_info = &cq->info[cq->tail_idx]; if (++work_done >= work_to_do) break; @@ -1308,33 +1276,31 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) void ionic_tx_flush(struct ionic_cq *cq) { - struct ionic_dev *idev = &cq->lif->ionic->idev; u32 work_done; work_done = ionic_tx_cq_service(cq, cq->num_descs); if (work_done) - ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index, work_done, IONIC_INTR_CRED_RESET_COALESCE); } void ionic_tx_empty(struct ionic_queue *q) { - struct ionic_desc_info *desc_info; + struct ionic_tx_desc_info *desc_info; int bytes = 0; int pkts = 0; /* walk the not completed tx entries, if any */ while (q->head_idx != q->tail_idx) { - desc_info = &q->info[q->tail_idx]; + desc_info = &q->tx_info[q->tail_idx]; desc_info->bytes = 0; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); - if (desc_info->cb_arg) { + ionic_tx_clean(q, desc_info, NULL); + if (desc_info->skb) { pkts++; bytes += desc_info->bytes; + desc_info->skb = NULL; } - desc_info->cb = NULL; - desc_info->cb_arg = NULL; } if (likely(!ionic_txq_hwstamp_enabled(q))) { @@ -1391,7 +1357,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) } static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, - struct ionic_desc_info *desc_info, + struct ionic_tx_desc_info *desc_info, struct sk_buff *skb, dma_addr_t addr, u8 nsge, u16 len, unsigned int hdrlen, unsigned int mss, @@ -1399,7 +1365,7 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, u16 vlan_tci, bool has_vlan, bool start, bool done) { - struct ionic_txq_desc *desc = desc_info->desc; + struct ionic_txq_desc *desc = &q->txq[q->head_idx]; u8 flags = 0; u64 cmd; @@ -1415,15 +1381,15 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, desc->hdr_len = cpu_to_le16(hdrlen); desc->mss = cpu_to_le16(mss); - ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); + ionic_write_cmb_desc(q, desc); if (start) { skb_tx_timestamp(skb); if (likely(!ionic_txq_hwstamp_enabled(q))) netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len); - ionic_txq_post(q, false, ionic_tx_clean, skb); + ionic_txq_post(q, false); } else { - ionic_txq_post(q, done, NULL, NULL); + ionic_txq_post(q, done); } } @@ -1431,7 +1397,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); - struct ionic_desc_info *desc_info; + struct ionic_tx_desc_info *desc_info; struct ionic_buf_info *buf_info; struct ionic_txq_sg_elem *elem; struct ionic_txq_desc *desc; @@ -1453,8 +1419,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, bool encap; int err; - desc_info = &q->info[q->head_idx]; - buf_info = desc_info->bufs; + desc_info = &q->tx_info[q->head_idx]; if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) return -EIO; @@ -1491,6 +1456,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, else hdrlen = skb_tcp_all_headers(skb); + desc_info->skb = skb; + buf_info = desc_info->bufs; tso_rem = len; seg_rem = min(tso_rem, hdrlen + mss); @@ -1517,8 +1484,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, chunk_len = min(frag_rem, seg_rem); if (!desc) { /* fill main descriptor */ - desc = desc_info->txq_desc; - elem = desc_info->txq_sg_desc->elems; + desc = &q->txq[q->head_idx]; + elem = ionic_tx_sg_elems(q); desc_addr = frag_addr; desc_len = chunk_len; } else { @@ -1542,7 +1509,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, start, done); start = false; /* Buffer information is stored with the first tso descriptor */ - desc_info = &q->info[q->head_idx]; + desc_info = &q->tx_info[q->head_idx]; desc_info->nbufs = 0; } @@ -1555,9 +1522,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, } static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { - struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_txq_desc *desc = &q->txq[q->head_idx]; struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); bool has_vlan; @@ -1585,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); desc->csum_offset = cpu_to_le16(skb->csum_offset); - ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); + ionic_write_cmb_desc(q, desc); if (skb_csum_is_sctp(skb)) stats->crc32_csum++; @@ -1594,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, } static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { - struct ionic_txq_desc *desc = desc_info->txq_desc; + struct ionic_txq_desc *desc = &q->txq[q->head_idx]; struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_tx_stats *stats = q_to_tx_stats(q); bool has_vlan; @@ -1624,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, desc->csum_start = 0; desc->csum_offset = 0; - ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); + ionic_write_cmb_desc(q, desc); stats->csum_none++; } static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, - struct ionic_desc_info *desc_info) + struct ionic_tx_desc_info *desc_info) { - struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; struct ionic_buf_info *buf_info = &desc_info->bufs[1]; - struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_sg_elem *elem; unsigned int i; + elem = ionic_tx_sg_elems(q); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { elem->addr = cpu_to_le64(buf_info->dma_addr); elem->len = cpu_to_le16(buf_info->len); @@ -1649,13 +1616,15 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, static int ionic_tx(struct net_device *netdev, struct ionic_queue *q, struct sk_buff *skb) { - struct ionic_desc_info *desc_info = &q->info[q->head_idx]; + struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx]; struct ionic_tx_stats *stats = q_to_tx_stats(q); bool ring_dbell = true; if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) return -EIO; + desc_info->skb = skb; + /* set up the initial descriptor */ if (skb->ip_summed == CHECKSUM_PARTIAL) ionic_tx_calc_csum(q, skb, desc_info); @@ -1677,7 +1646,7 @@ static int ionic_tx(struct net_device *netdev, struct ionic_queue *q, ring_dbell = __netdev_tx_sent_queue(ndq, skb->len, netdev_xmit_more()); } - ionic_txq_post(q, ring_dbell, ionic_tx_clean, skb); + ionic_txq_post(q, ring_dbell); return 0; } @@ -1761,12 +1730,10 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) linearize: if (too_many_frags) { - struct ionic_tx_stats *stats = q_to_tx_stats(q); - err = skb_linearize(skb); if (err) return err; - stats->linearize++; + q_to_tx_stats(q)->linearize++; } return ndescs; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 68228bb8c119..9e73e324e7a1 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget); int ionic_txrx_napi(struct napi_struct *napi, int budget); netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); -bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +bool ionic_rx_service(struct ionic_cq *cq); int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags); #endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index fac227d372db..dcd901eccfc8 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -11,6 +11,7 @@ #include "net_driver.h" #include <linux/module.h> #include <linux/iommu.h> +#include <net/rps.h> #include "efx.h" #include "nic.h" #include "rx_common.h" diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c index 4579f43484c3..219fb358a646 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.c +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -11,6 +11,7 @@ #include "net_driver.h" #include <linux/module.h> #include <linux/iommu.h> +#include <net/rps.h> #include "efx.h" #include "nic.h" #include "rx_common.h" diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 9d2f4ac783e4..2939a21ca74f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, txqueue, netif_tx_queue_stopped(netif_txq), jiffies_to_msecs(jiffies - trans_start), - dql_avail(&netif_txq->dql), + netdev_queue_dql_avail(netif_txq), k3_cppi_desc_pool_avail(tx_chn->desc_pool)); if (netif_tx_queue_stopped(netif_txq)) { |