summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c228
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c265
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c51
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1665
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c186
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c150
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c122
27 files changed, 2276 insertions, 988 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index aa86a81c8f4a..c2bd2584201f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -9,7 +9,7 @@
enum HCLGE_MBX_OPCODE {
HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
- HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/
+ HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */
HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index e0b7c3c44e7b..546a60530384 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -65,7 +65,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
-#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) | \
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
@@ -718,6 +718,8 @@ struct hnae3_ae_ops {
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
struct ethtool_ts_info *info);
+ int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
+ u32 *status_code);
};
struct hnae3_dcb_ops {
@@ -772,6 +774,7 @@ struct hnae3_knic_private_info {
u16 int_rl_setting;
enum pkt_hash_types rss_type;
+ void __iomem *io_base;
};
struct hnae3_roce_private_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 80461ab0ce9e..2b66c59f5eaf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -38,9 +38,8 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = {
},
};
-static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd);
-static int hns3_dbg_common_file_init(struct hnae3_handle *handle,
- unsigned int cmd);
+static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd);
+static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd);
static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
{
@@ -696,7 +695,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len)
sprintf(result[j++], "%u", i);
sprintf(result[j++], "%u",
h->ae_algo->ops->get_global_queue_id(h, i));
- sprintf(result[j++], "%u",
+ sprintf(result[j++], "%d",
priv->ring[i].tqp_vector->vector_irq);
hns3_dbg_fill_content(content, sizeof(content), queue_map_items,
(const char **)result,
@@ -798,10 +797,10 @@ static const struct hns3_dbg_item tx_bd_info_items[] = {
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
{ "TV", 2 },
- { "OLT_VLAN_LEN", 2},
- { "PAYLEN_OL4CS", 2},
- { "BD_FE_SC_VLD", 2},
- { "MSS_HW_CSUM", 0},
+ { "OLT_VLAN_LEN", 2 },
+ { "PAYLEN_OL4CS", 2 },
+ { "BD_FE_SC_VLD", 2 },
+ { "MSS_HW_CSUM", 0 },
};
static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
@@ -868,7 +867,7 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- static const char * const str[] = {"no", "yes"};
+ const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index cdb5f14fb6bc..22af3d6ce178 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -63,7 +63,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table))
-#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\
+#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
dma_get_cache_alignment())
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
@@ -100,7 +100,7 @@ static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
- {0, }
+ {0,}
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
@@ -971,8 +971,7 @@ static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
/* The free tx buffer is divided into two part, so pick the
* larger one.
*/
- return (ntc > (tx_spare->len - ntu) ? ntc :
- (tx_spare->len - ntu)) - 1;
+ return max(ntc, tx_spare->len - ntu) - 1;
}
static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
@@ -2852,7 +2851,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_start_xmit = hns3_nic_net_xmit,
.ndo_tx_timeout = hns3_nic_net_timeout,
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
- .ndo_do_ioctl = hns3_nic_do_ioctl,
+ .ndo_eth_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
.ndo_features_check = hns3_features_check,
@@ -3127,11 +3126,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
- NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
-
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -3141,62 +3135,37 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- netdev->vlan_features |= NETIF_F_RXCSUM |
- NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
-
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
- NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
-
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- netdev->hw_features |= NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF)) {
- netdev->hw_features |= NETIF_F_NTUPLE;
+ if (!(h->flags & HNAE3_SUPPORT_VF))
netdev->features |= NETIF_F_NTUPLE;
- }
}
- if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
- netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
- }
- if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) {
- netdev->hw_features |= NETIF_F_HW_CSUM;
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
netdev->features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
- } else {
- netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ else
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- }
- if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
- if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
- netdev->hw_features |= NETIF_F_HW_TC;
+ if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps))
netdev->features |= NETIF_F_HW_TC;
- }
- if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= netdev->features;
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->vlan_features |= netdev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE |
+ NETIF_F_HW_TC);
+
+ netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -3205,6 +3174,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
unsigned int order = hns3_page_order(ring);
struct page *p;
+ if (ring->page_pool) {
+ p = page_pool_dev_alloc_frag(ring->page_pool,
+ &cb->page_offset,
+ hns3_buf_size(ring));
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->buf = page_address(p);
+ cb->dma = page_pool_get_dma_addr(p);
+ cb->type = DESC_TYPE_PP_FRAG;
+ cb->reuse_flag = 0;
+ return 0;
+ }
+
p = dev_alloc_pages(order);
if (!p)
return -ENOMEM;
@@ -3227,8 +3211,13 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring,
if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
napi_consume_skb(cb->priv, budget);
- else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
- __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ else if (!HNAE3_IS_TX_RING(ring)) {
+ if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ else if (cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, cb->priv,
+ false);
+ }
memset(cb, 0, sizeof(*cb));
}
@@ -3315,7 +3304,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
int ret;
ret = hns3_alloc_buffer(ring, cb);
- if (ret)
+ if (ret || ring->page_pool)
goto out;
ret = hns3_map_buffer(ring, cb);
@@ -3337,7 +3326,8 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
if (ret)
return ret;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
return 0;
}
@@ -3367,7 +3357,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
@@ -3539,6 +3530,12 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
u32 frag_size = size - pull_len;
bool reused;
+ if (ring->page_pool) {
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+ return;
+ }
+
/* Avoid re-using remote or pfmem page */
if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
goto out;
@@ -3856,6 +3853,9 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
/* We can reuse buffer as-is, just make sure it is reusable */
if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1;
+ else if (desc_cb->type & DESC_TYPE_PP_FRAG)
+ page_pool_put_full_page(ring->page_pool, desc_cb->priv,
+ false);
else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv,
desc_cb->pagecnt_bias);
@@ -3863,6 +3863,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
hns3_rx_ring_move_fw(ring);
return 0;
}
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(skb);
+
u64_stats_update_begin(&ring->syncp);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
@@ -3901,6 +3905,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
"alloc rx fraglist skb fail\n");
return -ENXIO;
}
+
+ if (ring->page_pool)
+ skb_mark_for_recycle(new_skb);
+
ring->frag_num = 0;
if (ring->tail_skb) {
@@ -4434,9 +4442,7 @@ static void hns3_tx_dim_work(struct work_struct *work)
static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
{
INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
- tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
- tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
@@ -4705,6 +4711,29 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
priv->ring = NULL;
}
+static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
+{
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
+ PP_FLAG_DMA_SYNC_DEV,
+ .order = hns3_page_order(ring),
+ .pool_size = ring->desc_num * hns3_buf_size(ring) /
+ (PAGE_SIZE << hns3_page_order(ring)),
+ .nid = dev_to_node(ring_to_dev(ring)),
+ .dev = ring_to_dev(ring),
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE << hns3_page_order(ring),
+ };
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
+ PTR_ERR(ring->page_pool));
+ ring->page_pool = NULL;
+ }
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -4724,6 +4753,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
+ hns3_alloc_page_pool(ring);
+
ret = hns3_alloc_ring_buffers(ring);
if (ret)
goto out_with_desc;
@@ -4764,6 +4795,11 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
devm_kfree(ring_to_dev(ring), tx_spare);
ring->tx_spare = NULL;
}
+
+ if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int hns3_buf_size2type(u32 buf_size)
@@ -4954,6 +4990,66 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
}
+static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode mode, bool is_tx)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_handle *handle = priv->ae_handle;
+ int i;
+
+ if (is_tx) {
+ priv->tx_cqe_mode = mode;
+
+ for (i = 0; i < priv->vector_num; i++)
+ priv->tqp_vector[i].tx_group.dim.mode = mode;
+ } else {
+ priv->rx_cqe_mode = mode;
+
+ for (i = 0; i < priv->vector_num; i++)
+ priv->tqp_vector[i].rx_group.dim.mode = mode;
+ }
+
+ /* only device version above V3(include V3), GL can switch CQ/EQ
+ * period mode.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ u32 new_mode;
+ u64 reg;
+
+ new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ?
+ HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE;
+ reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG;
+
+ writel(new_mode, handle->kinfo.io_base + reg);
+ }
+}
+
+void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode tx_mode,
+ enum dim_cq_period_mode rx_mode)
+{
+ hns3_set_cq_period_mode(priv, tx_mode, true);
+ hns3_set_cq_period_mode(priv, rx_mode, false);
+}
+
+static void hns3_state_init(struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
+
+ if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
+ set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -5021,6 +5117,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_init_ring;
}
+ hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE,
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+
ret = hns3_init_phy(netdev);
if (ret)
goto out_init_phy;
@@ -5054,16 +5153,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
- if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
- set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
-
- if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
- set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
-
- set_bit(HNS3_NIC_STATE_INITED, &priv->state);
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
- set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ hns3_state_init(handle);
ret = register_netdev(netdev);
if (ret) {
@@ -5353,6 +5443,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode);
+
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 15af3d93857b..6162d9f88e37 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -6,6 +6,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
+#include <net/page_pool.h>
#include "hnae3.h"
@@ -201,6 +202,12 @@ enum hns3_nic_state {
#define HNS3_RING_EN_B 0
+#define HNS3_GL0_CQ_MODE_REG 0x20d00
+#define HNS3_GL1_CQ_MODE_REG 0x20d04
+#define HNS3_GL2_CQ_MODE_REG 0x20d08
+#define HNS3_CQ_MODE_EQE 1U
+#define HNS3_CQ_MODE_CQE 0U
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
@@ -307,6 +314,7 @@ enum hns3_desc_type {
DESC_TYPE_BOUNCE_ALL = 1 << 3,
DESC_TYPE_BOUNCE_HEAD = 1 << 4,
DESC_TYPE_SGL_SKB = 1 << 5,
+ DESC_TYPE_PP_FRAG = 1 << 6,
};
struct hns3_desc_cb {
@@ -340,7 +348,7 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_LLDP,
HNS3_L3_TYPE_BPDU,
HNS3_L3_TYPE_MAC_PAUSE,
- HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
+ HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */
/* reserved for 0xA~0xB */
@@ -384,11 +392,11 @@ enum hns3_pkt_ol4type {
};
struct hns3_rx_ptype {
- u32 ptype:8;
- u32 csum_level:2;
- u32 ip_summed:2;
- u32 l3_type:4;
- u32 valid:1;
+ u32 ptype : 8;
+ u32 csum_level : 2;
+ u32 ip_summed : 2;
+ u32 l3_type : 4;
+ u32 valid : 1;
};
struct ring_stats {
@@ -451,6 +459,7 @@ struct hns3_enet_ring {
struct hnae3_queue *tqp;
int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */
+ struct page_pool *page_pool;
/* statistic */
struct ring_stats stats;
@@ -513,9 +522,9 @@ struct hns3_enet_coalesce {
u16 int_gl;
u16 int_ql;
u16 int_ql_max;
- u8 adapt_enable:1;
- u8 ql_enable:1;
- u8 unit_1us:1;
+ u8 adapt_enable : 1;
+ u8 ql_enable : 1;
+ u8 unit_1us : 1;
enum hns3_flow_level_range flow_level;
};
@@ -569,6 +578,8 @@ struct hns3_nic_priv {
unsigned long state;
+ enum dim_cq_period_mode tx_cqe_mode;
+ enum dim_cq_period_mode rx_cqe_mode;
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
@@ -593,6 +604,11 @@ struct hns3_hw_error_info {
const char *msg;
};
+struct hns3_reset_type_map {
+ enum ethtool_reset_flags rst_flags;
+ enum hnae3_reset_type rst_type;
+};
+
static inline int ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
@@ -702,4 +718,7 @@ void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
u16 hns3_get_max_available_channels(struct hnae3_handle *h);
+void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
+ enum dim_cq_period_mode tx_mode,
+ enum dim_cq_period_mode rx_mode);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 82061ab6930f..7ea511d59e91 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -7,21 +7,7 @@
#include <linux/sfp.h>
#include "hns3_enet.h"
-
-struct hns3_stats {
- char stats_string[ETH_GSTRING_LEN];
- int stats_offset;
-};
-
-struct hns3_sfp_type {
- u8 type;
- u8 ext_type;
-};
-
-struct hns3_pflag_desc {
- char name[ETH_GSTRING_LEN];
- void (*handler)(struct net_device *netdev, bool enable);
-};
+#include "hns3_ethtool.h"
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
@@ -312,33 +298,8 @@ out:
return ret_val;
}
-/**
- * hns3_self_test - self test
- * @ndev: net device
- * @eth_test: test cmd
- * @data: test result
- */
-static void hns3_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
+static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
- bool if_running = netif_running(ndev);
- int test_index = 0;
- u32 i;
-
- if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
- return;
- }
-
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
- return;
-
- netif_dbg(h, drv, ndev, "self test start");
-
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -355,6 +316,18 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
st_param[HNAE3_LOOP_PHY][1] =
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+}
+
+static void hns3_selftest_prepare(struct net_device *ndev,
+ bool if_running, int (*st_param)[2])
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -373,6 +346,35 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->halt_autoneg(h, true);
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+}
+
+static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (h->ae_algo->ops->enable_vlan_filter)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+#endif
+
+ if (if_running)
+ ndev->netdev_ops->ndo_open(ndev);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
+}
+
+static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ struct ethtool_test *eth_test, u64 *data)
+{
+ int test_index = 0;
+ u32 i;
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
@@ -391,21 +393,32 @@ static void hns3_self_test(struct net_device *ndev,
test_index++;
}
+}
- clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
-
- if (h->ae_algo->ops->halt_autoneg)
- h->ae_algo->ops->halt_autoneg(h, false);
+/**
+ * hns3_nic_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ bool if_running = netif_running(ndev);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (h->ae_algo->ops->enable_vlan_filter)
- h->ae_algo->ops->enable_vlan_filter(h, true);
-#endif
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
- if (if_running)
- ndev->netdev_ops->ndo_open(ndev);
+ /* Only do offline selftest, or pass by default */
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ return;
- netif_dbg(h, drv, ndev, "self test end\n");
+ hns3_selftest_prepare(ndev, if_running, st_param);
+ hns3_do_selftest(ndev, st_param, eth_test, data);
+ hns3_selftest_restore(ndev, if_running);
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -953,6 +966,60 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
+static const struct hns3_reset_type_map hns3_reset_type[] = {
+ {ETH_RESET_MGMT, HNAE3_IMP_RESET},
+ {ETH_RESET_ALL, HNAE3_GLOBAL_RESET},
+ {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET},
+};
+
+static const struct hns3_reset_type_map hns3vf_reset_type[] = {
+ {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET},
+};
+
+static int hns3_set_reset(struct net_device *netdev, u32 *flags)
+{
+ enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hns3_reset_type_map *rst_type_map;
+ u32 i, size;
+
+ if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
+ return -EBUSY;
+
+ if (!ops->set_default_reset_request || !ops->reset_event)
+ return -EOPNOTSUPP;
+
+ if (h->flags & HNAE3_SUPPORT_VF) {
+ rst_type_map = hns3vf_reset_type;
+ size = ARRAY_SIZE(hns3vf_reset_type);
+ } else {
+ rst_type_map = hns3_reset_type;
+ size = ARRAY_SIZE(hns3_reset_type);
+ }
+
+ for (i = 0; i < size; i++) {
+ if (rst_type_map[i].rst_flags == *flags) {
+ rst_type = rst_type_map[i].rst_type;
+ break;
+ }
+ }
+
+ if (rst_type == HNAE3_NONE_RESET ||
+ (rst_type == HNAE3_IMP_RESET &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2))
+ return -EOPNOTSUPP;
+
+ netdev_info(netdev, "Setting reset type %d\n", rst_type);
+
+ ops->set_default_reset_request(ae_dev, rst_type);
+
+ ops->reset_event(h->pdev, h);
+
+ return 0;
+}
+
static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
u32 tx_desc_num, u32 rx_desc_num)
{
@@ -1139,7 +1206,9 @@ static void hns3_get_channels(struct net_device *netdev,
}
static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
@@ -1161,6 +1230,11 @@ static int hns3_get_coalesce(struct net_device *netdev,
cmd->tx_max_coalesced_frames = tx_coal->int_ql;
cmd->rx_max_coalesced_frames = rx_coal->int_ql;
+ kernel_coal->use_cqe_mode_tx = (priv->tx_cqe_mode ==
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ kernel_coal->use_cqe_mode_rx = (priv->rx_cqe_mode ==
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+
return 0;
}
@@ -1321,13 +1395,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
}
static int hns3_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
+ enum dim_cq_period_mode tx_mode;
+ enum dim_cq_period_mode rx_mode;
int ret;
int i;
@@ -1353,6 +1431,14 @@ static int hns3_set_coalesce(struct net_device *netdev,
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
+ tx_mode = kernel_coal->use_cqe_mode_tx ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rx_mode = kernel_coal->use_cqe_mode_rx ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ hns3_cq_period_mode_init(priv, tx_mode, rx_mode);
+
return 0;
}
@@ -1658,7 +1744,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_TX_USECS_HIGH | \
- ETHTOOL_COALESCE_MAX_FRAMES)
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_CQE)
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
@@ -1671,6 +1758,71 @@ static int hns3_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static const struct hns3_ethtool_link_ext_state_mapping
+hns3_link_ext_state_map[] = {
+ {1, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+ {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+
+ {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+ {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+ {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+ {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+ {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+ {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+ {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+ {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST},
+ {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS},
+
+ {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+ {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+
+ {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+};
+
+static int hns3_get_link_ext_state(struct net_device *netdev,
+ struct ethtool_link_ext_state_info *info)
+{
+ const struct hns3_ethtool_link_ext_state_mapping *map;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 status_code, i;
+ int ret;
+
+ if (netif_carrier_ok(netdev))
+ return -ENODATA;
+
+ if (!h->ae_algo->ops->get_link_diagnosis_info)
+ return -EOPNOTSUPP;
+
+ ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) {
+ map = &hns3_link_ext_state_map[i];
+ if (map->status_code == status_code) {
+ info->link_ext_state = map->link_ext_state;
+ info->__link_ext_substate = map->link_ext_substate;
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
@@ -1699,6 +1851,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.set_priv_flags = hns3_set_priv_flags,
.get_tunable = hns3_get_tunable,
.set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1740,6 +1893,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ts_info = hns3_get_ts_info,
.get_tunable = hns3_get_tunable,
.set_tunable = hns3_set_tunable,
+ .reset = hns3_set_reset,
+ .get_link_ext_state = hns3_get_link_ext_state,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
new file mode 100644
index 000000000000..822d6fcbc73b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HNS3_ETHTOOL_H
+#define __HNS3_ETHTOOL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct hns3_stats {
+ char stats_string[ETH_GSTRING_LEN];
+ int stats_offset;
+};
+
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
+struct hns3_pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ void (*handler)(struct net_device *netdev, bool enable);
+};
+
+struct hns3_ethtool_link_ext_state_mapping {
+ u32 status_code;
+ enum ethtool_link_ext_state link_ext_state;
+ u8 link_ext_substate;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index a685392dbfe9..d1bf5c4c0abb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index eb748aa35952..474c6d1664e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -169,17 +169,19 @@ static bool hclge_is_special_opcode(u16 opcode)
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
- u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
- HCLGE_QUERY_ALL_ERR_INFO};
+ static const u16 spec_opcode[] = {
+ HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO
+ };
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -360,41 +362,34 @@ static void hclge_set_default_capability(struct hclge_dev *hdev)
}
}
+const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
+ {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
+ {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
+ {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
+ {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
+ {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
+ {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
+ {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
+ {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+};
+
static void hclge_parse_capability(struct hclge_dev *hdev,
struct hclge_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps;
+ u32 caps, i;
caps = __le32_to_cpu(cmd->caps[0]);
- if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
- set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
- set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
- set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
- set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
- set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
- set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B))
- set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B))
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B))
- set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
- set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B))
- set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
- set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) {
- set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- }
+ for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
+ if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
+ set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
+ ae_dev->caps);
}
static __le32 hclge_build_api_caps(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ac70d49e205d..33244472e0d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -320,6 +320,9 @@ enum hclge_opcode_type {
/* PHY command */
HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -450,7 +453,7 @@ struct hclge_tc_thrd {
};
struct hclge_priv_buf {
- struct hclge_waterline wl; /* Waterline for low and high*/
+ struct hclge_waterline wl; /* Waterline for low and high */
u32 buf_size; /* TC private buffer size */
u32 tx_buf_size;
u32 enable; /* Enable TC private buffer or not */
@@ -1014,16 +1017,6 @@ struct hclge_common_lb_cmd {
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
-#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c
-#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGE_NIC_SW_RST_RDY_B 16
@@ -1198,6 +1191,19 @@ struct hclge_dev_specs_1_cmd {
u8 rsv1[18];
};
+/* mac speed type defined in firmware command */
+enum HCLGE_FIRMWARE_MAC_SPEED {
+ HCLGE_FW_MAC_SPEED_1G,
+ HCLGE_FW_MAC_SPEED_10G,
+ HCLGE_FW_MAC_SPEED_25G,
+ HCLGE_FW_MAC_SPEED_40G,
+ HCLGE_FW_MAC_SPEED_50G,
+ HCLGE_FW_MAC_SPEED_100G,
+ HCLGE_FW_MAC_SPEED_10M,
+ HCLGE_FW_MAC_SPEED_100M,
+ HCLGE_FW_MAC_SPEED_200G,
+};
+
#define HCLGE_PHY_LINK_SETTING_BD_NUM 2
struct hclge_phy_link_ksetting_0_cmd {
@@ -1228,6 +1234,12 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
+/* capabilities bits map between imp firmware and local driver */
+struct hclge_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 39f56f245d84..4a619e5d3f35 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -104,26 +104,30 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
return 0;
}
-static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
- u8 *tc, bool *changed)
+static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
+ bool *changed)
{
- bool has_ets_tc = false;
- u32 total_ets_bw = 0;
- u8 max_tc = 0;
- int ret;
+ u8 max_tc_id = 0;
u8 i;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (ets->prio_tc[i] > max_tc_id)
+ max_tc_id = ets->prio_tc[i];
}
- ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
- if (ret)
- return ret;
+ /* return max tc number, max tc id need to plus 1 */
+ return max_tc_id + 1;
+}
+
+static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+ struct ieee_ets *ets, bool *changed)
+{
+ bool has_ets_tc = false;
+ u32 total_ets_bw = 0;
+ u8 i;
for (i = 0; i < hdev->tc_max; i++) {
switch (ets->tc_tsa[i]) {
@@ -148,7 +152,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
if (has_ets_tc && total_ets_bw != BW_PERCENT)
return -EINVAL;
- *tc = max_tc + 1;
+ return 0;
+}
+
+static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ u8 *tc, bool *changed)
+{
+ u8 tc_num;
+ int ret;
+
+ tc_num = hclge_ets_tc_changed(hdev, ets, changed);
+
+ ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
+ if (ret)
+ return ret;
+
+ *tc = tc_num;
if (*tc != hdev->tm_info.num_tc)
*changed = true;
@@ -234,9 +257,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_notify_init_up(hdev);
- if (ret)
- return ret;
+ return hclge_notify_init_up(hdev);
}
return hclge_tm_dwrr_cfg(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 288788186ecc..68ed1715ac52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -926,26 +926,45 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static const struct hclge_dbg_item tm_pri_items[] = {
+ { "ID", 4 },
+ { "MODE", 2 },
+ { "DWRR", 2 },
+ { "C_IR_B", 2 },
+ { "C_IR_U", 2 },
+ { "C_IR_S", 2 },
+ { "C_BS_B", 2 },
+ { "C_BS_S", 2 },
+ { "C_FLAG", 2 },
+ { "C_RATE(Mbps)", 2 },
+ { "P_IR_B", 2 },
+ { "P_IR_U", 2 },
+ { "P_IR_S", 2 },
+ { "P_BS_B", 2 },
+ { "P_BS_S", 2 },
+ { "P_FLAG", 2 },
+ { "P_RATE(Mbps)", 0 }
+};
+
static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
{
- struct hclge_tm_shaper_para c_shaper_para;
- struct hclge_tm_shaper_para p_shaper_para;
- u8 pri_num, sch_mode, weight;
- char *sch_mode_str;
- int pos = 0;
- int ret;
- u8 i;
+ char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
+ struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
+ char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
+ char content[HCLGE_DBG_TM_INFO_LEN];
+ u8 pri_num, sch_mode, weight, i, j;
+ int pos, ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
- pos += scnprintf(buf + pos, len - pos,
- "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
- pos += scnprintf(buf + pos, len - pos,
- "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U ");
- pos += scnprintf(buf + pos, len - pos,
- "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
+ for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
+ result[i] = &data_str[i][0];
+
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ NULL, ARRAY_SIZE(tm_pri_items));
+ pos = scnprintf(buf, len, "%s", content);
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
@@ -971,21 +990,16 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
- pos += scnprintf(buf + pos, len - pos,
- "%04u %4s %3u %3u %3u %3u ",
- i, sch_mode_str, weight, c_shaper_para.ir_b,
- c_shaper_para.ir_u, c_shaper_para.ir_s);
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %1u %6u ",
- c_shaper_para.bs_b, c_shaper_para.bs_s,
- c_shaper_para.flag, c_shaper_para.rate);
- pos += scnprintf(buf + pos, len - pos,
- "%3u %3u %3u %3u %3u ",
- p_shaper_para.ir_b, p_shaper_para.ir_u,
- p_shaper_para.ir_s, p_shaper_para.bs_b,
- p_shaper_para.bs_s);
- pos += scnprintf(buf + pos, len - pos, "%1u %6u\n",
- p_shaper_para.flag, p_shaper_para.rate);
+ j = 0;
+ sprintf(result[j++], "%04u", i);
+ sprintf(result[j++], "%4s", sch_mode_str);
+ sprintf(result[j++], "%3u", weight);
+ hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
+ hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
+ hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
+ (const char **)result,
+ ARRAY_SIZE(tm_pri_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
new file mode 100644
index 000000000000..e4aad695abcc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclge_devlink.h"
+
+static int hclge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGE_DEVLINK_FW_STRING_LEN 32
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclge_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ struct hclge_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->vport->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclge_devlink_ops = {
+ .info_get = hclge_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclge_devlink_reload_down,
+ .reload_up = hclge_devlink_reload_up,
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink = devlink_alloc(&hclge_devlink_ops,
+ sizeof(struct hclge_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ ret = devlink_register(devlink);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclge_devlink_uninit(struct hclge_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
new file mode 100644
index 000000000000..918be04507a5
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEVLINK_H
+#define __HCLGE_DEVLINK_H
+
+#include "hclge_main.h"
+
+struct hclge_devlink_priv {
+ struct hclge_dev *hdev;
+};
+
+int hclge_devlink_init(struct hclge_dev *hdev);
+void hclge_devlink_uninit(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index ec9a7f8bc3fe..718c16d686fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,468 +4,895 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(6),
+ .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
- { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "rx_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "tx_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tx_buf_underrun",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "rx_stp_fifo_underflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rss_idt_mem10_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
- { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
- { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
- { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(1),
+ .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
- { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(26), .msg = "rd_bus_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(27), .msg = "wr_bus_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(28), .msg = "reg_search_miss",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(29), .msg = "rx_q_search_miss",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(13),
+ .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(24),
+ .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(25),
+ .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(26),
+ .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(27),
+ .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(28),
+ .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(29),
+ .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(30),
+ .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(31),
+ .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(4),
+ .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(5), .msg = "buf_wait_timeout",
- .reset_level = HNAE3_NONE_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
- { .int_msk = BIT(0), .msg = "buf_sum_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(2), .msg = "ppp_mbid_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "cks_edit_position_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "vlan_num_in_err",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
#define HCLGE_SSU_MEM_ECC_ERR(x) \
- { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
- .reset_level = HNAE3_GLOBAL_RESET }
+{ \
+ .int_msk = BIT(x), \
+ .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET \
+}
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
HCLGE_SSU_MEM_ECC_ERR(0),
@@ -504,131 +931,269 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
- { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "ig_host_inf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(4),
+ .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(5),
+ .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(6),
+ .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(7),
+ .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(8),
+ .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(11),
+ .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(12),
+ .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(13),
+ .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(14),
+ .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(15),
+ .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(16),
+ .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(17),
+ .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(18),
+ .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(19),
+ .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(20),
+ .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(21),
+ .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(22),
+ .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(23),
+ .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
- { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(1),
+ .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(2),
+ .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ .int_msk = BIT(3),
+ .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
- .reset_level = HNAE3_FUNC_RESET },
- { .int_msk = BIT(9), .msg = "low_water_line_err_port",
- .reset_level = HNAE3_NONE_RESET },
- { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
- .reset_level = HNAE3_GLOBAL_RESET },
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET
+ }, {
+ .int_msk = BIT(9),
+ .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET
+ }, {
+ .int_msk = BIT(10),
+ .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
- { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
- { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
- { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
- { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
- { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
- { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
- { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
- { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
- { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
- { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
- { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
- { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
- { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
- { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
- { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
- { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
- { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
- { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
- { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
- { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
- { /* sentinel */ }
+ {
+ .int_msk = 0,
+ .msg = "rocee qmm ovf: sgid invalid err"
+ }, {
+ .int_msk = 0x4,
+ .msg = "rocee qmm ovf: sgid ovf err"
+ }, {
+ .int_msk = 0x8,
+ .msg = "rocee qmm ovf: smac invalid err"
+ }, {
+ .int_msk = 0xC,
+ .msg = "rocee qmm ovf: smac ovf err"
+ }, {
+ .int_msk = 0x10,
+ .msg = "rocee qmm ovf: cqc invalid err"
+ }, {
+ .int_msk = 0x11,
+ .msg = "rocee qmm ovf: cqc ovf err"
+ }, {
+ .int_msk = 0x12,
+ .msg = "rocee qmm ovf: cqc hopnum err"
+ }, {
+ .int_msk = 0x13,
+ .msg = "rocee qmm ovf: cqc ba0 err"
+ }, {
+ .int_msk = 0x14,
+ .msg = "rocee qmm ovf: srqc invalid err"
+ }, {
+ .int_msk = 0x15,
+ .msg = "rocee qmm ovf: srqc ovf err"
+ }, {
+ .int_msk = 0x16,
+ .msg = "rocee qmm ovf: srqc hopnum err"
+ }, {
+ .int_msk = 0x17,
+ .msg = "rocee qmm ovf: srqc ba0 err"
+ }, {
+ .int_msk = 0x18,
+ .msg = "rocee qmm ovf: mpt invalid err"
+ }, {
+ .int_msk = 0x19,
+ .msg = "rocee qmm ovf: mpt ovf err"
+ }, {
+ .int_msk = 0x1A,
+ .msg = "rocee qmm ovf: mpt hopnum err"
+ }, {
+ .int_msk = 0x1B,
+ .msg = "rocee qmm ovf: mpt ba0 err"
+ }, {
+ .int_msk = 0x1C,
+ .msg = "rocee qmm ovf: qpc invalid err"
+ }, {
+ .int_msk = 0x1D,
+ .msg = "rocee qmm ovf: qpc ovf err"
+ }, {
+ .int_msk = 0x1E,
+ .msg = "rocee qmm ovf: qpc hopnum err"
+ }, {
+ .int_msk = 0x1F,
+ .msg = "rocee qmm ovf: qpc ba0 err"
+ }, {
+ /* sentinel */
+ }
};
static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
@@ -1709,34 +2274,36 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
static const struct hclge_hw_blk hw_blk[] = {
{
- .msk = BIT(0), .name = "IGU_EGU",
- .config_err_int = hclge_config_igu_egu_hw_err_int,
- },
- {
- .msk = BIT(1), .name = "PPP",
- .config_err_int = hclge_config_ppp_hw_err_int,
- },
- {
- .msk = BIT(2), .name = "SSU",
- .config_err_int = hclge_config_ssu_hw_err_int,
- },
- {
- .msk = BIT(3), .name = "PPU",
- .config_err_int = hclge_config_ppu_hw_err_int,
- },
- {
- .msk = BIT(4), .name = "TM",
- .config_err_int = hclge_config_tm_hw_err_int,
- },
- {
- .msk = BIT(5), .name = "COMMON",
- .config_err_int = hclge_config_common_hw_err_int,
- },
- {
- .msk = BIT(8), .name = "MAC",
- .config_err_int = hclge_config_mac_err_int,
- },
- { /* sentinel */ }
+ .msk = BIT(0),
+ .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ }, {
+ .msk = BIT(1),
+ .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ }, {
+ .msk = BIT(2),
+ .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ }, {
+ .msk = BIT(3),
+ .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ }, {
+ .msk = BIT(4),
+ .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ }, {
+ .msk = BIT(5),
+ .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ }, {
+ .msk = BIT(8),
+ .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ }, {
+ /* sentinel */
+ }
};
static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 03ae122f1c9a..e55ba2e511b1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -23,6 +23,7 @@
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
+#include "hclge_devlink.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
@@ -91,23 +92,23 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
- HCLGE_CMDQ_TX_ADDR_H_REG,
- HCLGE_CMDQ_TX_DEPTH_REG,
- HCLGE_CMDQ_TX_TAIL_REG,
- HCLGE_CMDQ_TX_HEAD_REG,
- HCLGE_CMDQ_RX_ADDR_L_REG,
- HCLGE_CMDQ_RX_ADDR_H_REG,
- HCLGE_CMDQ_RX_DEPTH_REG,
- HCLGE_CMDQ_RX_TAIL_REG,
- HCLGE_CMDQ_RX_HEAD_REG,
+static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_NIC_CSQ_DEPTH_REG,
+ HCLGE_NIC_CSQ_TAIL_REG,
+ HCLGE_NIC_CSQ_HEAD_REG,
+ HCLGE_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_NIC_CRQ_DEPTH_REG,
+ HCLGE_NIC_CRQ_TAIL_REG,
+ HCLGE_NIC_CRQ_HEAD_REG,
HCLGE_VECTOR0_CMDQ_SRC_REG,
HCLGE_CMDQ_INTR_STS_REG,
HCLGE_CMDQ_INTR_EN_REG,
HCLGE_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
- HCLGE_VECTOR0_OTER_EN_REG,
+ HCLGE_PF_OTHER_INT_REG,
HCLGE_MISC_RESET_STS_REG,
HCLGE_MISC_VECTOR_INT_STS,
HCLGE_GLOBAL_RESET_REG,
@@ -374,14 +375,14 @@ static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
};
static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
+ { PACKET_TYPE_ID, 6 },
+ { IP_FRAGEMENT, 1 },
+ { ROCE_TYPE, 1 },
+ { NEXT_KEY, 5 },
+ { VLAN_NUMBER, 2 },
+ { SRC_VPORT, 12 },
+ { DST_VPORT, 12 },
+ { TUNNEL_PACKET, 1 },
};
static const struct key_info tuple_key_info[] = {
@@ -748,9 +749,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
-#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
- HNAE3_SUPPORT_PHY_LOOPBACK |\
- HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
+ HNAE3_SUPPORT_PHY_LOOPBACK | \
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -958,31 +959,31 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
- case 6:
+ case HCLGE_FW_MAC_SPEED_10M:
*speed = HCLGE_MAC_SPEED_10M;
break;
- case 7:
+ case HCLGE_FW_MAC_SPEED_100M:
*speed = HCLGE_MAC_SPEED_100M;
break;
- case 0:
+ case HCLGE_FW_MAC_SPEED_1G:
*speed = HCLGE_MAC_SPEED_1G;
break;
- case 1:
+ case HCLGE_FW_MAC_SPEED_10G:
*speed = HCLGE_MAC_SPEED_10G;
break;
- case 2:
+ case HCLGE_FW_MAC_SPEED_25G:
*speed = HCLGE_MAC_SPEED_25G;
break;
- case 3:
+ case HCLGE_FW_MAC_SPEED_40G:
*speed = HCLGE_MAC_SPEED_40G;
break;
- case 4:
+ case HCLGE_FW_MAC_SPEED_50G:
*speed = HCLGE_MAC_SPEED_50G;
break;
- case 5:
+ case HCLGE_FW_MAC_SPEED_100G:
*speed = HCLGE_MAC_SPEED_100G;
break;
- case 8:
+ case HCLGE_FW_MAC_SPEED_200G:
*speed = HCLGE_MAC_SPEED_200G;
break;
default:
@@ -992,44 +993,43 @@ static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
return 0;
}
+static const struct hclge_speed_bit_map speed_bit_map[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
+ {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
+ {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
+ {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
+ {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
+ {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+};
+
+static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
+ if (speed == speed_bit_map[i].speed) {
+ *speed_bit = speed_bit_map[i].speed_bit;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 speed_ability = hdev->hw.mac.speed_ability;
u32 speed_bit = 0;
+ int ret;
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- speed_bit = HCLGE_SUPPORT_10M_BIT;
- break;
- case HCLGE_MAC_SPEED_100M:
- speed_bit = HCLGE_SUPPORT_100M_BIT;
- break;
- case HCLGE_MAC_SPEED_1G:
- speed_bit = HCLGE_SUPPORT_1G_BIT;
- break;
- case HCLGE_MAC_SPEED_10G:
- speed_bit = HCLGE_SUPPORT_10G_BIT;
- break;
- case HCLGE_MAC_SPEED_25G:
- speed_bit = HCLGE_SUPPORT_25G_BIT;
- break;
- case HCLGE_MAC_SPEED_40G:
- speed_bit = HCLGE_SUPPORT_40G_BIT;
- break;
- case HCLGE_MAC_SPEED_50G:
- speed_bit = HCLGE_SUPPORT_50G_BIT;
- break;
- case HCLGE_MAC_SPEED_100G:
- speed_bit = HCLGE_SUPPORT_100G_BIT;
- break;
- case HCLGE_MAC_SPEED_200G:
- speed_bit = HCLGE_SUPPORT_200G_BIT;
- break;
- default:
- return -EINVAL;
- }
+ ret = hclge_get_speed_bit(speed, &speed_bit);
+ if (ret)
+ return ret;
if (speed_bit & speed_ability)
return 0;
@@ -1814,6 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
+ nic->kinfo.io_base = hdev->hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -2580,39 +2581,39 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
switch (speed) {
case HCLGE_MAC_SPEED_10M:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
break;
case HCLGE_MAC_SPEED_100M:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
break;
case HCLGE_MAC_SPEED_1G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
break;
case HCLGE_MAC_SPEED_10G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
break;
case HCLGE_MAC_SPEED_25G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
break;
case HCLGE_MAC_SPEED_40G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
break;
case HCLGE_MAC_SPEED_50G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
break;
case HCLGE_MAC_SPEED_100G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
break;
case HCLGE_MAC_SPEED_200G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 8);
+ HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
@@ -3420,7 +3421,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_enable_vector(&hdev->misc_vector, false);
event_cause = hclge_check_event_cause(hdev, &clearval);
- /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ /* vector 0 interrupt is shared with reset and mailbox source events. */
switch (event_cause) {
case HCLGE_VECTOR0_EVENT_ERR:
hclge_errhand_task_schedule(hdev);
@@ -3789,6 +3790,12 @@ static void hclge_do_reset(struct hclge_dev *hdev)
}
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ dev_info(&pdev->dev, "IMP reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
+ break;
case HNAE3_GLOBAL_RESET:
dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
@@ -5937,7 +5944,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
cur_key_x = key_x;
cur_key_y = key_y;
- for (i = 0 ; i < MAX_TUPLE; i++) {
+ for (i = 0; i < MAX_TUPLE; i++) {
bool tuple_valid;
tuple_size = tuple_key_info[i].key_length / 8;
@@ -11509,10 +11516,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_pci_uninit;
+
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
if (ret)
- goto err_pci_uninit;
+ goto err_devlink_uninit;
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -11689,6 +11700,8 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_cmd_uninit(hdev);
+err_devlink_uninit:
+ hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev);
@@ -12079,6 +12092,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
+ hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
@@ -12867,6 +12881,29 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
return 0;
}
+static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
+ u32 *status_code)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query link diagnosis info, ret = %d\n", ret);
+ return ret;
+ }
+
+ *status_code = le32_to_cpu(desc.data[0]);
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12967,6 +13004,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_tx_hwts_info = hclge_ptp_set_tx_info,
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
+ .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e446b839a371..de6afbcbfbac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,6 +8,7 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+#include <net/devlink.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -37,22 +38,22 @@
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
-#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
-#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
-#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
-#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
-#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
-#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
-#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
-#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
-#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
-#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
+
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
-#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
#define HCLGE_GRO_EN_REG 0x28000
#define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008
@@ -193,6 +194,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
+#define HCLGE_TRIGGER_IMP_RESET_B 7U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -944,6 +946,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
+ struct devlink *devlink;
};
/* VPort level vlan tag configuration for TX direction */
@@ -1055,6 +1058,11 @@ struct hclge_vport {
struct list_head vlan_list; /* Store VF vlan table */
};
+struct hclge_speed_bit_map {
+ u32 speed;
+ u32 speed_bit;
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index c0a478ae9583..2ce5302c5956 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -10,7 +10,14 @@
static u16 hclge_errno_to_resp(int errno)
{
- return abs(errno);
+ int resp = abs(errno);
+
+ /* The status for pf to vf msg cmd is u16, constrainted by HW.
+ * We need to keep the same type with it.
+ * The intput errno is the stander error code, it's safely to
+ * use a u16 to store the abs(errno).
+ */
+ return (u16)resp;
}
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
@@ -66,6 +73,8 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
resp_msg->len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index dbf5f4c08019..7a9b77de632a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -127,7 +127,7 @@ static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
}
bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
-void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev);
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev);
void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 2c26ea607a53..51ff7d86ee90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -7,4 +7,4 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d9ddb0a243d4..59772b0e9531 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -71,7 +71,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
static bool hclgevf_is_special_opcode(u16 opcode)
{
- static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ const u16 spec_opcode[] = {0x30, 0x31, 0x32};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -342,25 +342,26 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
+const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
+ {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+};
+
static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
struct hclgevf_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps;
+ u32 caps, i;
caps = __le32_to_cpu(cmd->caps[0]);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
- set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
- set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
- set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B))
- set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
- set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
- if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B))
- set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
+ for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
+ if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
+ set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
+ ae_dev->caps);
}
static __le32 hclgevf_build_api_caps(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 5b82177f98b4..39d0b589c720 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -266,16 +266,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_TYPE_CRQ 0
#define HCLGEVF_TYPE_CSQ 1
-#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
-#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
@@ -306,6 +296,12 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
+/* capabilities bits map between imp firmware and local driver */
+struct hclgevf_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
new file mode 100644
index 000000000000..f478770299c6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#include <net/devlink.h>
+
+#include "hclgevf_devlink.h"
+
+static int hclgevf_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+#define HCLGEVF_DEVLINK_FW_STRING_LEN 32
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
+ struct hclgevf_dev *hdev = priv->hdev;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (ret)
+ return ret;
+
+ snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+}
+
+static int hclgevf_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ dev_err(&pdev->dev, "reset is handling\n");
+ return -EBUSY;
+ }
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h,
+ HNAE3_UNINIT_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hclgevf_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
+ struct hclgevf_dev *hdev = priv->hdev;
+ struct hnae3_handle *h = &hdev->nic;
+ int ret;
+
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ rtnl_lock();
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct devlink_ops hclgevf_devlink_ops = {
+ .info_get = hclgevf_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = hclgevf_devlink_reload_down,
+ .reload_up = hclgevf_devlink_reload_up,
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_devlink_priv *priv;
+ struct devlink *devlink;
+ int ret;
+
+ devlink =
+ devlink_alloc(&hclgevf_devlink_ops,
+ sizeof(struct hclgevf_devlink_priv), &pdev->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ priv = devlink_priv(devlink);
+ priv->hdev = hdev;
+ hdev->devlink = devlink;
+
+ ret = devlink_register(devlink);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
+ ret);
+ goto out_reg_fail;
+ }
+
+ devlink_reload_enable(devlink);
+
+ return 0;
+
+out_reg_fail:
+ devlink_free(devlink);
+ return ret;
+}
+
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
+{
+ struct devlink *devlink = hdev->devlink;
+
+ devlink_reload_disable(devlink);
+
+ devlink_unregister(devlink);
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
new file mode 100644
index 000000000000..e09ea3d8a963
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2021 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_DEVLINK_H
+#define __HCLGEVF_DEVLINK_H
+
+#include "hclgevf_main.h"
+
+struct hclgevf_devlink_priv {
+ struct hclgevf_dev *hdev;
+};
+
+int hclgevf_devlink_init(struct hclgevf_dev *hdev);
+void hclgevf_devlink_uninit(struct hclgevf_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 938654778979..82e727020120 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -8,6 +8,7 @@
#include "hclgevf_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclgevf_devlink.h"
#define HCLGEVF_NAME "hclgevf"
@@ -39,16 +40,16 @@ static const u8 hclgevf_hash_key[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
- HCLGEVF_CMDQ_TX_ADDR_H_REG,
- HCLGEVF_CMDQ_TX_DEPTH_REG,
- HCLGEVF_CMDQ_TX_TAIL_REG,
- HCLGEVF_CMDQ_TX_HEAD_REG,
- HCLGEVF_CMDQ_RX_ADDR_L_REG,
- HCLGEVF_CMDQ_RX_ADDR_H_REG,
- HCLGEVF_CMDQ_RX_DEPTH_REG,
- HCLGEVF_CMDQ_RX_TAIL_REG,
- HCLGEVF_CMDQ_RX_HEAD_REG,
+static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
+ HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
+ HCLGEVF_NIC_CSQ_DEPTH_REG,
+ HCLGEVF_NIC_CSQ_TAIL_REG,
+ HCLGEVF_NIC_CSQ_HEAD_REG,
+ HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
+ HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
+ HCLGEVF_NIC_CRQ_DEPTH_REG,
+ HCLGEVF_NIC_CRQ_TAIL_REG,
+ HCLGEVF_NIC_CRQ_HEAD_REG,
HCLGEVF_VECTOR0_CMDQ_SRC_REG,
HCLGEVF_VECTOR0_CMDQ_STATE_REG,
HCLGEVF_CMDQ_INTR_EN_REG,
@@ -538,6 +539,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->pdev = hdev->pdev;
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
+ nic->kinfo.io_base = hdev->hw.io_base;
ret = hclgevf_knic_setup(hdev);
if (ret)
@@ -1961,7 +1963,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
@@ -3339,6 +3341,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_devlink_init;
+
ret = hclgevf_cmd_queue_init(hdev);
if (ret)
goto err_cmd_queue_init;
@@ -3443,6 +3449,8 @@ err_misc_irq_init:
err_cmd_init:
hclgevf_cmd_uninit(hdev);
err_cmd_queue_init:
+ hclgevf_devlink_uninit(hdev);
+err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
@@ -3464,6 +3472,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
}
hclgevf_cmd_uninit(hdev);
+ hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index e8013be055f8..883130a9b48f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -6,6 +6,7 @@
#include <linux/fs.h>
#include <linux/if_vlan.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
@@ -32,16 +33,17 @@
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
/* bar registers for cmdq */
-#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
-#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
-#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
-#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
-#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
-#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
-#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
-#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
-#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
-#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
@@ -316,7 +318,6 @@ struct hclgevf_dev {
struct hclgevf_mac_table_cfg mac_table;
- bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
@@ -332,6 +333,8 @@ struct hclgevf_dev {
u32 flag;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+
+ struct devlink *devlink;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index b339b9bc0625..fdc66fae0960 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -155,18 +155,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
return tail == hw->cmq.crq.next_to_use;
}
+static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req)
+{
+ struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
+
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%u)\n",
+ req->msg.vf_mbx_msg_code);
+
+ resp->origin_mbx_msg =
+ (req->msg.vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
+ resp->resp_status =
+ hclgevf_resp_to_errno(req->msg.resp_status);
+ memcpy(resp->additional_info, req->msg.resp_data,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
+ if (req->match_id) {
+ /* If match_id is not zero, it means PF support match_id.
+ * if the match_id is right, VF get the right response, or
+ * ignore the response. and driver will clear hdev->mbx_resp
+ * when send next message which need response.
+ */
+ if (req->match_id == resp->match_id)
+ resp->received_resp = true;
+ } else {
+ resp->received_resp = true;
+ }
+}
+
+static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req)
+{
+ /* we will drop the async msg if we find ARQ as full
+ * and continue with next message
+ */
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ dev_warn(&hdev->pdev->dev,
+ "Async Q full, dropping msg(%u)\n",
+ req->msg.code);
+ return;
+ }
+
+ /* tail the async message in arq */
+ memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
+ atomic_inc(&hdev->arq.count);
+
+ hclgevf_mbx_task_schedule(hdev);
+}
+
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
- struct hclgevf_mbx_resp_status *resp;
struct hclge_mbx_pf_to_vf_cmd *req;
struct hclgevf_cmq_ring *crq;
struct hclgevf_desc *desc;
- u16 *msg_q;
u16 flag;
- u8 *temp;
- int i;
- resp = &hdev->mbx_resp;
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
@@ -200,69 +248,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
*/
switch (req->msg.code) {
case HCLGE_MBX_PF_VF_RESP:
- if (resp->received_resp)
- dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%u)\n",
- req->msg.vf_mbx_msg_code);
- resp->received_resp = true;
-
- resp->origin_mbx_msg =
- (req->msg.vf_mbx_msg_code << 16);
- resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
- resp->resp_status =
- hclgevf_resp_to_errno(req->msg.resp_status);
-
- temp = (u8 *)req->msg.resp_data;
- for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
- resp->additional_info[i] = *temp;
- temp++;
- }
-
- /* If match_id is not zero, it means PF support
- * match_id. If the match_id is right, VF get the
- * right response, otherwise ignore the response.
- * Driver will clear hdev->mbx_resp when send
- * next message which need response.
- */
- if (req->match_id) {
- if (req->match_id == resp->match_id)
- resp->received_resp = true;
- } else {
- resp->received_resp = true;
- }
+ hclgevf_handle_mbx_response(hdev, req);
break;
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HCLGE_MBX_PUSH_VLAN_INFO:
case HCLGE_MBX_PUSH_PROMISC_INFO:
- /* set this mbx event as pending. This is required as we
- * might loose interrupt event when mbx task is busy
- * handling. This shall be cleared when mbx task just
- * enters handling state.
- */
- hdev->mbx_event_pending = true;
-
- /* we will drop the async msg if we find ARQ as full
- * and continue with next message
- */
- if (atomic_read(&hdev->arq.count) >=
- HCLGE_MBX_MAX_ARQ_MSG_NUM) {
- dev_warn(&hdev->pdev->dev,
- "Async Q full, dropping msg(%u)\n",
- req->msg.code);
- break;
- }
-
- /* tail the async message in arq */
- msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], &req->msg,
- HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
- hclge_mbx_tail_ptr_move_arq(hdev->arq);
- atomic_inc(&hdev->arq.count);
-
- hclgevf_mbx_task_schedule(hdev);
-
+ hclgevf_handle_mbx_msg(hdev, req);
break;
default:
dev_err(&hdev->pdev->dev,
@@ -298,11 +291,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
u8 flag;
u8 idx;
- /* we can safely clear it now as we are at start of the async message
- * processing
- */
- hdev->mbx_event_pending = false;
-
tail = hdev->arq.tail;
/* process all the async queue messages */