diff options
Diffstat (limited to 'drivers/net')
130 files changed, 3167 insertions, 1753 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 22243c480a05..98d4c5a3ff21 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; + /* else: fall through */ case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 57348f2b49a3..71362b7f6040 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8baf9d3eb4b1..3f4d2c8da21a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index dc77bfded865..62da46537734 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1827,6 +1827,7 @@ get_vf: DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); + /* fall through */ case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 176fc9f4d7de..d2dadade1d0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp, speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall thru */ + /* fall through */ } case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); @@ -7984,7 +7984,7 @@ static int bnxt_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp); + bp, bp, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 795f45024c20..d0699f39ba34 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1544,22 +1544,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp) int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { - int rc = 0; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); - break; - + return bnxt_tc_add_flow(bp, src_fid, cls_flower); case TC_CLSFLOWER_DESTROY: - rc = bnxt_tc_del_flow(bp, cls_flower); - break; - + return bnxt_tc_del_flow(bp, cls_flower); case TC_CLSFLOWER_STATS: - rc = bnxt_tc_get_flow_stats(bp, cls_flower); - break; + return bnxt_tc_get_flow_stats(bp, cls_flower); + default: + return -EOPNOTSUPP; } - return rc; } static const struct rhashtable_params bnxt_tc_flow_ht_params = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 05d405905906..0745f2dfc80c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep); + vf_rep, vf_rep, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3be87efdc93d..0a796d5ec893 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -721,6 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -781,6 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -10706,28 +10708,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + /* fall through */ case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + /* fall through */ case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + /* fall through */ case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + /* fall through */ case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + /* fall through */ case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + /* fall through */ case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + /* fall through */ case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + /* fall through */ case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + /* fall through */ case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + /* fall through */ case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + /* fall through */ case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + /* fall through */ case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8a815bb57177..7cb4e753829b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3569,9 +3569,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { u8 vfmac[ETH_ALEN]; - random_ether_addr(&vfmac[0]); - if (__liquidio_set_vf_mac(netdev, j, - &vfmac[0], false)) { + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0dbe2d9e22d6..4a8cbd864ef7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,6 +46,7 @@ #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/vmalloc.h> +#include <linux/rhashtable.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -521,6 +522,15 @@ enum { MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, }; +enum { + PRIV_FLAG_PORT_TX_VM_BIT, +}; + +#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT) + +#define PRIV_FLAGS_ADAP 0 +#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM + struct adapter; struct sge_rspq; @@ -557,6 +567,7 @@ struct port_info { struct hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; + u32 eth_flags; }; struct dentry; @@ -867,6 +878,7 @@ struct adapter { unsigned int flags; unsigned int adap_idx; enum chip_type chip; + u32 eth_flags; int msg_enable; __be16 vxlan_port; @@ -956,6 +968,7 @@ struct adapter { struct chcr_stats_debug chcr_stats; /* TC flower offload */ + bool tc_flower_initialized; struct rhashtable flower_tbl; struct rhashtable_params flower_ht_params; struct timer_list flower_stats_timer; @@ -1333,7 +1346,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index f7eef93ffc87..ddb8b9eba6bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -177,6 +177,10 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "bg3_frames_trunc ", }; +static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = { + [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { @@ -185,6 +189,8 @@ static int get_sset_count(struct net_device *dev, int sset) ARRAY_SIZE(adapter_stats_strings) + ARRAY_SIZE(channel_stats_strings) + ARRAY_SIZE(loopback_stats_strings); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(cxgb4_priv_flags_strings); default: return -EOPNOTSUPP; } @@ -235,6 +241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) FW_HDR_FW_VER_MINOR_G(exprom_vers), FW_HDR_FW_VER_MICRO_G(exprom_vers), FW_HDR_FW_VER_BUILD_G(exprom_vers)); + info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -250,6 +257,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) data += sizeof(channel_stats_strings); memcpy(data, loopback_stats_strings, sizeof(loopback_stats_strings)); + } else if (stringset == ETH_SS_PRIV_FLAGS) { + memcpy(data, cxgb4_priv_flags_strings, + sizeof(cxgb4_priv_flags_strings)); } } @@ -1499,6 +1509,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev, offset, len, &data[eprom->len - len]); } +static u32 cxgb4_get_priv_flags(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + return (adapter->eth_flags | pi->eth_flags); +} + +/** + * set_flags - set/unset specified flags if passed in new_flags + * @cur_flags: pointer to current flags + * @new_flags: new incoming flags + * @flags: set of flags to set/unset + */ +static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags) +{ + *cur_flags = (*cur_flags & ~flags) | (new_flags & flags); +} + +static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP); + set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT); + + return 0; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1535,6 +1575,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_dump_data = get_dump_data, .get_module_info = cxgb4_get_module_info, .get_module_eeprom = cxgb4_get_module_eeprom, + .get_priv_flags = cxgb4_get_priv_flags, + .set_priv_flags = cxgb4_set_priv_flags, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index bc03c175a3cd..1c0374cbb890 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3016,7 +3016,7 @@ static int cxgb_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, - pi, dev); + pi, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); return 0; @@ -3217,7 +3217,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, - .ndo_start_xmit = t4_eth_xmit, + .ndo_start_xmit = t4_start_xmit, .ndo_select_queue = cxgb_select_queue, .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3ddd2c4acf68..623f73dd7738 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap) { int ret; + if (adap->tc_flower_initialized) + return -EEXIST; + adap->flower_ht_params = cxgb4_tc_flower_ht_params; ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); if (ret) @@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap) INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); + adap->tc_flower_initialized = true; return 0; } void cxgb4_cleanup_tc_flower(struct adapter *adap) { + if (!adap->tc_flower_initialized) + return; + if (adap->flower_stats_timer.function) del_timer_sync(&adap->flower_stats_timer); cancel_work_sync(&adap->flower_stats_work); rhashtable_destroy(&adap->flower_tbl); + adap->tc_flower_initialized = false; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 9148abb7994c..7fc656680299 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap) struct port_info *pi = netdev2pinfo(adap->port[j]); s = pi->sched_tbl; + if (!s) + continue; + for (i = 0; i < s->sched_size; i++) { struct sched_class *e; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 395e2a0e8d7f..ebb46c472d52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, } /** - * t4_eth_xmit - add a packet to an Ethernet Tx queue + * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. */ -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; u64 cntrl, *end, *sgl; @@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } +/* Constants ... */ +enum { + /* Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), +}; + +/** + * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int t4vf_is_eth_imm(const struct sk_buff *skb) +{ + /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (t4vf_is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const struct skb_shared_info *ssi; + struct fw_eth_tx_pkt_vm_wr *wr; + int qidx, credits, max_pkt_len; + struct cpl_tx_pkt_core *cpl; + const struct port_info *pi; + unsigned int flits, ndesc; + struct sge_eth_txq *txq; + struct adapter *adapter; + u64 cntrl, *end; + u32 wr_mid; + const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci); + + /* The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* Discard the packet if the length is greater than mtu */ + max_pkt_len = ETH_HLEN + dev->mtu; + if (skb_vlan_tag_present(skb)) + max_pkt_len += VLAN_HLEN; + if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + goto out_free; + + /* Figure out which TX Queue we're going to use. */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + WARN_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + + /* Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = t4vf_calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + eth_txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!t4vf_is_eth_imm(skb) && + unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + eth_txq_stop(txq); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + /* Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); + /* Fill in the LSO CPL message. */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | + LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = (t4vf_is_eth_imm(skb) + ? skb->len + sizeof(*cpl) + : sizeof(*cpl)); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; + txq->tx_cso++; + } else { + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; + } + } + + /* If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (skb_vlan_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); + } + + /* Fill in the TX Packet CPL message header. */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | + TXPKT_INTF_V(pi->port_id) | + TXPKT_PF_V(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + /* Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (t4vf_is_eth_imm(skb)) { + /* In-line the packet's data and free the skb since we don't + * need it any longer. + */ + cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + /* Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + + cxgb4_ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) + return cxgb4_vf_eth_xmit(skb, dev); + + return cxgb4_eth_xmit(skb, dev); +} + /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index c7f8d0441278..e3adf435913e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6d7404f66f84..ce1f04fdbf70 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2435,7 +2435,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - random_ether_addr(netdev->dev_addr); + eth_random_addr(netdev->dev_addr); } gmac_write_mac_address(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ab02057ac730..65a22cd9aef2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; params.specific_params.non_rx_params.err_fqid = errq->fqid; @@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; rx_p = ¶ms.specific_params.rx_params; @@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; + struct skb_shared_hwtstamps shhwtstamps; dma_addr_t addr = qm_fd_addr(fd); const struct qm_sg_entry *sgt; struct sk_buff **skbh, *skb; int nr_frags, i; + u64 ns; skbh = (struct sk_buff **)phys_to_virt(addr); skb = *skbh; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } + } + if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_single(dev, addr, @@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; @@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { + struct skb_shared_hwtstamps *shhwtstamps; struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; const struct qm_fd *fd = &dq->fd; @@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct sk_buff *skb; int *count_ptr; void *vaddr; + u64 ns; fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (!skb) return qman_cb_dqrr_consume; + if (priv->rx_tstamp) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) + shhwtstamps->hwtstamp = ns_to_ktime(ns); + else + dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); + } + skb->protocol = eth_type_trans(skb, net_dev); if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && @@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->rx_tstamp = false; + } else { + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - if (!net_dev->phydev) - return -EINVAL; - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + int ret = -EINVAL; + + if (cmd == SIOCGMIIREG) { + if (net_dev->phydev) + return phy_mii_ioctl(net_dev->phydev, rq, cmd); + } + + if (cmd == SIOCSHWTSTAMP) + return dpaa_ts_ioctl(net_dev, rq, cmd); + + return ret; } static const struct net_device_ops dpaa_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index bd9422082f83..af320f83c742 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -182,6 +182,9 @@ struct dpaa_priv { struct dpaa_buffer_layout buf_layout[2]; u16 rx_headroom; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ }; /* from dpaa_ethtool.c */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 2f933b6b2f4e..3184c8f7cdd0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -32,6 +32,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> +#include <linux/of_platform.h> +#include <linux/net_tstamp.h> +#include <linux/fsl/ptp_qoriq.h> #include "dpaa_eth.h" #include "mac.h" @@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int dpaa_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *info) +{ + struct device *dev = net_dev->dev.parent; + struct device_node *mac_node = dev->of_node; + struct device_node *fman_node = NULL, *ptp_node = NULL; + struct platform_device *ptp_dev = NULL; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; + + fman_node = of_get_parent(mac_node); + if (fman_node) + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + + if (ptp_node) + ptp_dev = of_find_device_by_node(ptp_node); + + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + + if (ptp) + info->phc_index = ptp->phc_index; + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = { .set_link_ksettings = dpaa_set_link_ksettings, .get_rxnfc = dpaa_get_rxnfc, .set_rxnfc = dpaa_set_rxnfc, + .get_ts_info = dpaa_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 9530405030a7..c415ac67cb7b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); of_node_put(fm_node); - err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, + "fman", fman); if (err < 0) { dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", __func__, irq, err); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index bfa02e0014ae..935c317fa696 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -41,6 +41,7 @@ /* Frame queue Context Override */ #define FM_FD_CMD_FCO 0x80000000 #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */ #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ /* TX-Port: Unsupported Format */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 57b1e2b47c0a..1ca543ac8f2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,11 +123,13 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 +#define TCTRL_TTSE 0x00000040 #define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 #define RCTRL_GHTX 0x00000400 +#define RCTRL_RTSE 0x00000040 #define RCTRL_GRS 0x00000020 #define RCTRL_MPROM 0x00000008 #define RCTRL_RSF 0x00000004 @@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) return 0; } +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 rctrl, tctrl; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + rctrl = ioread32be(®s->rctrl); + tctrl = ioread32be(®s->tctrl); + + if (enable) { + rctrl |= RCTRL_RTSE; + tctrl |= TCTRL_TTSE; + } else { + rctrl &= ~RCTRL_RTSE; + tctrl &= ~TCTRL_TTSE; + } + + iowrite32be(rctrl, ®s->rctrl); + iowrite32be(tctrl, ®s->tctrl); + + return 0; +} + int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 1a689adf5a22..5149d96ec2c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable); +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable); #endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 446a97b792e3..bc6eb30aa20f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable) return 0; } +int memac_set_tstamp(struct fman_mac *memac, bool enable) +{ + return 0; /* Always enabled. */ +} + int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) { struct memac_regs __iomem *regs = memac->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b5a50338ed9a..b2c671ec0ce7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac, int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_set_allmulti(struct fman_mac *memac, bool enable); +int memac_set_tstamp(struct fman_mac *memac, bool enable); #endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ecbf6187e13a..ee82ee1384eb 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset) } EXPORT_SYMBOL(fman_port_get_hash_result_offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp) +{ + if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE) + return -EINVAL; + + *tstamp = be64_to_cpu(*(__be64 *)(data + + port->buffer_offsets.time_stamp_offset)); + + return 0; +} +EXPORT_SYMBOL(fman_port_get_tstamp); + static int fman_port_probe(struct platform_device *of_dev) { struct fman_port *port; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h index e86ca6a34e4e..9dbb69f40121 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.h +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port); int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp); + struct fman_port *fman_port_bind(struct device *dev); #endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 284735d4ebe9..40705938eecc 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -44,6 +44,7 @@ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff /* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_EN_TIMESTAMP 0x00100000 #define CMD_CFG_NO_LEN_CHK 0x00020000 #define CMD_CFG_PAUSE_IGNORE 0x00000100 #define CMF_CFG_CRC_FWD 0x00000040 @@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable) return 0; } +int tgec_set_tstamp(struct fman_mac *tgec, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + + if (enable) + tmp |= CMD_CFG_EN_TIMESTAMP; + else + tmp &= ~CMD_CFG_EN_TIMESTAMP; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) { struct tgec_regs __iomem *regs = tgec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index cbbd3b422a98..3bfd1062b386 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); int tgec_set_allmulti(struct fman_mac *tgec, bool enable); +int tgec_set_tstamp(struct fman_mac *tgec, bool enable); #endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b5b95f52c09..a847b9c3b31a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; + mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; + mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; + mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b520cec120ee..824a81a9f350 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -68,6 +68,7 @@ struct mac_device { int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); + int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 42fca3208c0b..22a817da861e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) ugeth_vdbg("%s: IN", __func__); + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&ugeth->lock, flags); dev->stats.tx_bytes += skb->len; @@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); + unsigned int bytes_sent = 0; + int howmany = 0; u8 __iomem *bd; /* BD pointer */ u32 bd_status; @@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; if (!skb) break; - + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; dev_consume_skb_any(skb); @@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev) phy_start(ugeth->phydev); napi_enable(&ugeth->napi); + netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, @@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev) free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); netif_stop_queue(dev); + netdev_reset_queue(dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index fb1a7251f45d..25152715396b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -85,10 +85,12 @@ config HNS3 drivers(like ODP)to register with HNAE devices and their associated operations. +if HNS3 + config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + default m depends on PCI_MSI - depends on HNS3 ---help--- This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of @@ -97,16 +99,15 @@ config HNS3_HCLGE config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n - depends on HNS3 && HNS3_HCLGE && DCB + depends on HNS3_HCLGE && DCB ---help--- Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. If unsure, say N. config HNS3_HCLGEVF - tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" - depends on PCI_MSI - depends on HNS3 + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI depends on HNS3_HCLGE ---help--- This selects the HNS3 VF drivers network acceleration engine & its hardware @@ -115,11 +116,13 @@ config HNS3_HCLGEVF config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" + default m depends on 64BIT && PCI - depends on HNS3 ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +endif #HNS3 + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 340e28211135..14374a856d30 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9d79dad2c6aa..0762ad18fdcc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -8,7 +8,6 @@ */ #include <linux/list.h> -#include <linux/slab.h> #include <linux/spinlock.h> #include "hnae3.h" @@ -41,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, { switch (client->type) { case HNAE3_CLIENT_KNIC: - hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_UNIC: - hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_ROCE: - hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); break; default: break; @@ -61,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_UNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_ROCE: - inited = hnae_get_bit(ae_dev->flag, - HNAE3_ROCE_CLIENT_INITED_B); + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); break; default: break; @@ -86,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && - hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } @@ -95,7 +94,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ret = ae_dev->ops->init_client_instance(client, ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, - "fail to instantiate client\n"); + "fail to instantiate client, ret = %d\n", ret); return ret; } @@ -135,7 +134,8 @@ int hnae3_register_client(struct hnae3_client *client) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed for port\n"); + "match and instantiation failed for port, ret = %d\n", + ret); } exit: @@ -185,11 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ae_dev->ops = ae_algo->ops; ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); continue; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and * initialize the figure out client instance @@ -198,7 +199,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } } @@ -218,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -232,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_algo->node); @@ -271,11 +273,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); goto out_err; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -286,7 +289,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } out_err: @@ -306,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -317,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_dev->node); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8acb1d116a02..da806fdfbbe6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -62,10 +62,10 @@ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -167,7 +167,6 @@ struct hnae3_client_ops { #define HNAE3_CLIENT_NAME_LENGTH 16 struct hnae3_client { char name[HNAE3_CLIENT_NAME_LENGTH]; - u16 version; unsigned long state; enum hnae3_client_type type; const struct hnae3_client_ops *ops; @@ -436,7 +435,6 @@ struct hnae3_dcb_ops { struct hnae3_ae_algo { const struct hnae3_ae_ops *ops; struct list_head node; - char name[HNAE3_CLASS_NAME_SIZE]; const struct pci_device_id *pdev_id_table; }; @@ -509,17 +507,17 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ }; -#define hnae_set_field(origin, mask, shift, val) \ +#define hnae3_set_field(origin, mask, shift, val) \ do { \ (origin) &= (~(mask)); \ (origin) |= ((val) << (shift)) & (mask); \ } while (0) -#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) +#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) -#define hnae_set_bit(origin, shift, val) \ - hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) -#define hnae_get_bit(origin, shift) \ - hnae_get_field((origin), (0x1 << (shift)), (shift)) +#define hnae3_set_bit(origin, shift, val) \ + hnae3_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae3_get_bit(origin, shift) \ + hnae3_get_field((origin), (0x1 << (shift)), (shift)) void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 25a73bb2e642..7e34d5fff3ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -493,8 +493,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -586,21 +586,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -609,16 +609,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -703,32 +706,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -749,43 +754,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); } switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_S, 0); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) wmb(); /* Commit all data before submit */ - hnae_queue_xmit(ring->tqp, buf_num); + hnae3_queue_xmit(ring->tqp, buf_num); return NETDEV_TX_OK; @@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae_page_order(ring); + unsigned int order = hnae3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae_page_size(ring); + cb->length = hnae3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -1780,33 +1785,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) /* free desc along with its attached buffer */ static void hns3_free_desc(struct hns3_enet_ring *ring) { + int size = ring->desc_num * sizeof(ring->desc[0]); + hns3_free_buffers(ring); - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } @@ -1887,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -1917,7 +1916,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) if (is_ring_empty(ring) || head == ring->next_to_clean) return true; /* no data to poll */ - if (!is_valid_clean_head(ring, head)) { + if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); @@ -2016,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, bool twobufs; twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - truesize = hnae_buf_size(ring); + truesize = hnae3_buf_size(ring); if (!twobufs) - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2076,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; @@ -2091,23 +2090,24 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; case HNS3_OL4_TYPE_NO_TUN: /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) + if ((l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; } @@ -2135,8 +2135,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 - switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S)) { + switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); break; @@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) return -EFAULT; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; @@ -2229,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); @@ -2257,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); @@ -2269,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; @@ -2279,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return -EFAULT; } - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; @@ -2532,10 +2532,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, tx_ring = tqp_vector->tx_group.ring; if (tx_ring) { cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); cur_chain->next = NULL; @@ -2549,12 +2549,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + HNAE3_RING_GL_TX); cur_chain = chain; } @@ -2564,10 +2564,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, if (!tx_ring && rx_ring) { cur_chain->next = NULL; cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); rx_ring = rx_ring->next; } @@ -2579,10 +2579,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); cur_chain = chain; @@ -2745,10 +2745,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; - ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); - if (ret) - return ret; - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { @@ -2809,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->io_base = q->io_base; } - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); ring->tqp = q; ring->desc = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 3b083d5ae9ce..bf9aa02be994 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector { u16 num_tqps; /* total number of tqps in TQP vector */ - cpumask_t affinity_mask; char name[HNAE3_INT_NAME_LEN]; /* when 0 should adjust interrupt coalesce parameter */ @@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ +#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) @@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) +#define hnae3_buf_size(_ring) ((_ring)->buf_size) +#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) +#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index c36d64710fa6..82cf12a07dc0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -18,8 +18,7 @@ #include "hclge_main.h" #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) -#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) + #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclge_ring_space(struct hclge_cmq_ring *ring) @@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); + int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) @@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); } static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) @@ -123,9 +113,9 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) if (ring->flag == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); @@ -133,9 +123,9 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); } else { hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); @@ -152,33 +142,22 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclge_desc *desc; - int clean = 0; u32 head; + int clean; - desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); return 0; } - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; - } - csq->next_to_clean = ntc; - + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -216,7 +195,7 @@ static bool hclge_is_special_opcode(u16 opcode) **/ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_desc *desc_to_use; bool complete = false; u32 timeout = 0; @@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) */ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { do { - if (hclge_cmd_csq_done(hw)) + if (hclge_cmd_csq_done(hw)) { + complete = true; break; + } udelay(1); timeout++; } while (timeout < hw->cmq.tx_timeout); } - if (hclge_cmd_csq_done(hw)) { - complete = true; + if (!complete) { + retval = -EAGAIN; + } else { handle = 0; while (handle < num) { /* Get the result of hardware write back */ desc_to_use = &hw->cmq.csq.desc[ntc]; desc[handle] = *desc_to_use; - pr_debug("Get cmd desc:\n"); if (likely(!hclge_is_special_opcode(opcode))) desc_ret = le16_to_cpu(desc[handle].retval); else desc_ret = le16_to_cpu(desc[0].retval); - if ((enum hclge_cmd_return_status)desc_ret == - HCLGE_CMD_EXEC_SUCCESS) + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) retval = 0; else retval = -EIO; - hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + hw->cmq.last_status = desc_ret; ntc++; handle++; if (ntc == hw->cmq.csq.desc_num) @@ -290,9 +270,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } } - if (!complete) - retval = -EAGAIN; - /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); if (handle != num) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d318d35e598f..6fffc69a7138 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. @@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); } return 0; @@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ - cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); - cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); - cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TQP_DESC_N_M, - HCLGE_CFG_TQP_DESC_N_S); - - cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_PHY_ADDR_M, - HCLGE_CFG_PHY_ADDR_S); - cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_MEDIA_TP_M, - HCLGE_CFG_MEDIA_TP_S); - cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_RX_BUF_LEN_M, - HCLGE_CFG_RX_BUF_LEN_S); + cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); /* get mac_address */ mac_addr_tmp = __le32_to_cpu(req->param[2]); - mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_MAC_ADDR_H_M, - HCLGE_CFG_MAC_ADDR_H_S); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; - cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_DEFAULT_SPEED_M, - HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); - cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_SPEED_ABILITY_M, - HCLGE_CFG_SPEED_ABILITY_S); + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); - hnae_set_field(offset, HCLGE_CFG_OFFSET_M, - HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); /* Len should be united by 4 bytes when send to hardware */ - hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, - HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); req->offset = cpu_to_le32(offset); } @@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev) /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae_set_bit(hdev->hw_tc_map, i, 1); + hnae3_set_bit(hdev->hw_tc_map, i, 1); hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; @@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, req = (struct hclge_cfg_tso_status_cmd *)desc.data; tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); req->tso_mss_min = cpu_to_le16(tso_mss); tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_max); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); req->tso_mss_max = cpu_to_le16(tso_mss); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2118,48 +2118,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); - hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); switch (speed) { case HCLGE_MAC_SPEED_10M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); break; case HCLGE_MAC_SPEED_100M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); break; case HCLGE_MAC_SPEED_1G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); break; case HCLGE_MAC_SPEED_10G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); break; case HCLGE_MAC_SPEED_25G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); break; case HCLGE_MAC_SPEED_40G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); break; case HCLGE_MAC_SPEED_50G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); break; case HCLGE_MAC_SPEED_100G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); return -EINVAL; } - hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, - 1); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2201,9 +2201,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return ret; } - *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); + *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); ret = hclge_parse_speed(speed_tmp, speed); if (ret) { @@ -2225,7 +2225,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2269,8 +2269,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); + hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); ether_addr_copy(req->mac_mask, mac_mask); status = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2614,6 +2614,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -2705,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) } val = hclge_read_dev(&hdev->hw, reg); - while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); val = hclge_read_dev(&hdev->hw, reg); cnt++; @@ -2727,8 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); req->fun_reset_vfid = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2747,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_CORE_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Core Reset requested\n"); break; @@ -3110,11 +3115,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u16 mode = 0; - hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, - HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, - HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); req->rss_tc_mode[i] = cpu_to_le16(mode); } @@ -3491,16 +3496,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, i = 0; for (node = ring_chain; node; node = node->next) { tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); - hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S)); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -3648,20 +3653,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3689,7 +3694,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3953,10 +3958,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, req = (struct hclge_mta_filter_mode_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3980,8 +3985,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); + hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); req->function_id = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4007,10 +4012,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); + hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); req->item_idx = cpu_to_le16(item_idx); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4257,17 +4262,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, - HCLGE_MAC_EPORT_VFID_S, vport->vport_id); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, - HCLGE_MAC_EPORT_PFID_S, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); @@ -4318,8 +4316,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); @@ -4351,10 +4349,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4418,10 +4416,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4802,19 +4800,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, - vcfg->accept_tag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, - vcfg->accept_untag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, - vcfg->accept_tag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, - vcfg->accept_untag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, - vcfg->insert_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, - vcfg->insert_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4840,14 +4838,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, - vcfg->strip_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, - vcfg->strip_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, - vcfg->vlan1_vlan_prionly ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, - vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -5043,7 +5041,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); - hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5073,7 +5071,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return ret; } - return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, @@ -5380,12 +5378,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); retval = phy_read(phydev, HCLGE_PHY_CSC_REG); - mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, - HCLGE_PHY_MDIX_CTRL_S); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); retval = phy_read(phydev, HCLGE_PHY_CSS_REG); - mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); - is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); @@ -5531,7 +5529,6 @@ static int hclge_pci_init(struct hclge_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->back = hdev; hw->io_base = pcim_iomap(pdev, 2, 0); if (!hw->io_base) { dev_err(&pdev->dev, "Can't map configuration register space\n"); @@ -5562,6 +5559,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) pci_disable_device(pdev); } +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -5702,12 +5723,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); - set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); - set_bit(HCLGE_STATE_DOWN, &hdev->state); - clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); + hclge_state_init(hdev); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -5812,16 +5828,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; - set_bit(HCLGE_STATE_DOWN, &hdev->state); - - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); + hclge_state_uninit(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -6149,8 +6156,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, - HCLGE_LED_LOCATE_STATE_S, locate_led_status); + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6280,7 +6287,6 @@ static const struct hnae3_ae_ops hclge_ops = { static struct hnae3_ae_algo ae_algo = { .ops = &hclge_ops, - .name = HCLGE_NAME, .pdev_id_table = ae_algo_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7488534528cd..71d38b852c56 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -190,7 +190,6 @@ struct hclge_hw { int num_vec; struct hclge_cmq cmq; struct hclge_caps caps; - void *back; }; /* TQP stats */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7541cb9b71ce..50ae2f8f188d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -128,12 +128,12 @@ static int hclge_get_ring_chain_from_mbx( HCLGE_MBX_RING_NODE_VARIABLE_NUM)) return -ENOMEM; - hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); - hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[5]); + hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[5]); cur_chain = ring_chain; @@ -142,19 +142,19 @@ static int hclge_get_ring_chain_from_mbx( if (!new_chain) goto err; - hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); - hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); + hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 9f7932e423b5..b6cfe6ff988d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); mdio_cmd->data_wr = cpu_to_le16(data); @@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); /* Read out phy data */ ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) return ret; } - if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { dev_err(&hdev->pdev->dev, "mdio read data error\n"); return -EIO; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 262c125f8137..82bc30fdff98 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; - grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, - HCLGE_BP_GRP_ID_S); - sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, - HCLGE_BP_SUB_GRP_ID_S); + grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index c2b6e8a6700f..c82d49ebd5bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd { }; #define hclge_tm_set_field(dest, string, val) \ - hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ - (HCLGE_TM_SHAP_##string##_LSH), val) + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) #define hclge_tm_get_field(src, string) \ - hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 1bbfe131b596..fb471fe2c494 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - hclgevf_ring_to_dma_dir(ring)); + int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a17872aab168..5a8653238e3f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); } status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle) } static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, - int vector, + int vector_id, struct hnae3_ring_chain_node *ring_chain) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i = 0, vector_id; + int i = 0; int status; u8 type; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - vector_id = hclgevf_get_vector_index(hdev, vector); - if (vector_id < 0) { - dev_err(&handle->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); - return vector_id; - } for (node = ring_chain; node; node = node->next) { int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + @@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } req->msg[idx_offset] = - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); req->msg[idx_offset + 1] = node->tqp_index; - req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); + req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); i++; if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - @@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { - return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); } static int hclgevf_unmap_ring_from_vector( @@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector( return vector_id; } - ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", @@ -990,8 +1000,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* wait to check the hardware reset completion status */ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { + while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { msleep(HCLGEVF_RESET_WAIT_MS); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); cnt++; @@ -1582,9 +1592,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } -static int hclgevf_init_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static int hclgevf_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; int ret; switch (client->type) { @@ -1635,9 +1646,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return 0; } -static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static void hclgevf_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; + /* un-init roce, if it exists */ if (hdev->roce_client) hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); @@ -1648,22 +1661,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, client->ops->uninit_instance(&hdev->nic, 0); } -static int hclgevf_register_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - return hclgevf_init_instance(hdev, client); -} - -static void hclgevf_unregister_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - hclgevf_uninit_instance(hdev, client); -} - static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -1924,8 +1921,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .init_client_instance = hclgevf_register_client, - .uninit_client_instance = hclgevf_unregister_client, + .init_client_instance = hclgevf_init_client_instance, + .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, @@ -1962,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { static struct hnae3_ae_algo ae_algovf = { .ops = &hclgevf_ops, - .name = HCLGEVF_NAME, .pdev_id_table = ae_algovf_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index b598c06af8e0..173ca27957ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c944bd10b03d..426b0ccb1fc6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np); + np, np, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); return 0; @@ -11978,7 +11978,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); - random_ether_addr(mac_addr); + eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index a7b87f935411..5906c1c1d19d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f707709969ac..f1e3397bd405 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2698,7 +2698,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2728,7 +2728,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 62e57b05a0ae..a8e21becb619 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9329,7 +9329,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index def00dc3eb4e..81a66cce7fa8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -553,6 +553,8 @@ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) +#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) +#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 @@ -831,52 +833,52 @@ struct mvpp2_port { /* HW TX descriptor for PPv2.1 */ struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + __le32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ + __le16 data_size; /* data size of transmitted packet in bytes */ + __le32 buf_dma_addr; /* physical addr of transmitted buffer */ + __le32 buf_cookie; /* cookie for access to TX buffer in tx path */ + __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + __le32 reserved2; /* reserved (for future use) */ }; /* HW RX descriptor for PPv2.1 */ struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ + __le32 status; /* info about received packet */ + __le16 reserved1; /* parser_info (for future use, PnC) */ + __le16 data_size; /* size of received packet in bytes */ + __le32 buf_dma_addr; /* physical address of the buffer */ + __le32 buf_cookie; /* cookie for access to RX buffer in rx path */ + __le16 reserved2; /* gem_port_id (for future use, PON) */ + __le16 reserved3; /* csum_l4 (for future use, PnC) */ u8 reserved4; /* bm_qset (for future use, BM) */ u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; + __le16 reserved6; /* classify_info (for future use, PnC) */ + __le32 reserved7; /* flow_id (for future use, PnC) */ + __le32 reserved8; }; /* HW TX descriptor for PPv2.2 */ struct mvpp22_tx_desc { - u32 command; + __le32 command; u8 packet_offset; u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; + __le16 data_size; + __le64 reserved1; + __le64 buf_dma_addr_ptp; + __le64 buf_cookie_misc; }; /* HW RX descriptor for PPv2.2 */ struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; + __le32 status; + __le16 reserved1; + __le16 data_size; + __le32 reserved2; + __le32 reserved3; + __le64 buf_dma_addr_key_hash; + __le64 buf_cookie_misc; }; /* Opaque type used by the driver to manipulate the HW TX and RX diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0319ed9ef8b8..88f3da184d76 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -151,9 +151,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; + return le32_to_cpu(tx_desc->pp21.buf_dma_addr); else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & + MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -166,12 +167,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, offset = dma_addr & MVPP2_TX_DESC_ALIGN; if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); tx_desc->pp21.packet_offset = offset; } else { - u64 val = (u64)addr; + __le64 val = cpu_to_le64(addr); - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -181,9 +182,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; + return le16_to_cpu(tx_desc->pp21.data_size); else - return tx_desc->pp22.data_size; + return le16_to_cpu(tx_desc->pp22.data_size); } static void mvpp2_txdesc_size_set(struct mvpp2_port *port, @@ -191,9 +192,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port, size_t size) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; + tx_desc->pp21.data_size = cpu_to_le16(size); else - tx_desc->pp22.data_size = size; + tx_desc->pp22.data_size = cpu_to_le16(size); } static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, @@ -211,9 +212,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, unsigned int command) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; + tx_desc->pp21.command = cpu_to_le32(command); else - tx_desc->pp22.command = command; + tx_desc->pp22.command = cpu_to_le32(command); } static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, @@ -229,36 +230,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; + return le32_to_cpu(rx_desc->pp21.buf_dma_addr); else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & + MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; + return le32_to_cpu(rx_desc->pp21.buf_cookie); else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & + MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; + return le16_to_cpu(rx_desc->pp21.data_size); else - return rx_desc->pp22.data_size; + return le16_to_cpu(rx_desc->pp22.data_size); } static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; + return le32_to_cpu(rx_desc->pp21.status); else - return rx_desc->pp22.status; + return le32_to_cpu(rx_desc->pp22.status); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) @@ -1735,7 +1738,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; - if (l3_proto == swab16(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 6bb69f086794..a882c14d7d77 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -30,17 +30,17 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return -EINVAL; /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); return 0; } @@ -60,18 +60,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } @@ -103,42 +103,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); else - pe->tcam.byte[enable_off] |= 1 << port; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); } /* Obtain port map from tcam sw entry */ static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; + return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ @@ -146,8 +139,12 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); } /* Get byte of data and its enable bits from tcam sw entry */ @@ -155,46 +152,45 @@ static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char *byte, unsigned char *enable) { - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; + *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); u16 tcam_data; - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; + tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; + return tcam_data == data; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + int i; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); else - pe->tcam.byte[ai_idx] &= ~(1 << i); + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); } - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; + return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; } /* Set ethertype in tcam sw entry */ @@ -215,16 +211,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Update ri bits in sram sw entry */ @@ -234,15 +230,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_RI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } @@ -251,7 +248,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; + return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ @@ -259,16 +256,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_AI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } @@ -278,12 +277,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + /* ai is stored on bits 90->97; so it spreads across two u32 */ + int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); + int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + bits = (pe->sram[ai_off] >> ai_shift) | + (pe->sram[ai_off + 1] << (32 - ai_shift)); return bits; } @@ -316,8 +315,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, } /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, @@ -346,13 +344,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, + offset & MVPP2_PRS_SRAM_UDF_MASK); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, @@ -362,16 +355,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); @@ -662,7 +647,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -790,8 +775,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && + mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); if (!match) continue; @@ -932,8 +917,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, pe.index = tid; /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, @@ -1433,17 +1418,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) pe.index = tid; - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -1644,8 +1625,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) MVPP2_PRS_IPV4_IHL_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 22fbbc4c8b28..a7c8d0818432 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -50,17 +50,25 @@ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). */ #define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_AI_MASK 0xff #define MVPP2_PRS_PORT_MASK 0xff #define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 + +/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */ +#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2) +#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2) + +#define MVPP2_PRS_TCAM_EN(data) ((data) << 16) +#define MVPP2_PRS_TCAM_AI_WORD 4 +#define MVPP2_PRS_TCAM_AI(ai) (ai) +#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai)) +#define MVPP2_PRS_TCAM_PORT_WORD 4 +#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8) +#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p)) +#define MVPP2_PRS_TCAM_LU_WORD 5 +#define MVPP2_PRS_TCAM_LU(lu) (lu) +#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu)) +#define MVPP2_PRS_TCAM_INV_WORD 5 #define MVPP2_PRS_VID_TCAM_BYTE 2 @@ -146,6 +154,7 @@ #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff #define MVPP2_PRS_SRAM_UDF_OFFS 73 #define MVPP2_PRS_SRAM_UDF_BITS 8 #define MVPP2_PRS_SRAM_UDF_MASK 0xff @@ -255,20 +264,10 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - struct mvpp2_prs_entry { u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; + u32 tcam[MVPP2_PRS_TCAM_WORDS]; + u32 sram[MVPP2_PRS_SRAM_WORDS]; }; struct mvpp2_prs_shadow { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9efbf193ad5a..d923f2f58608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \ + vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eb9eb7aa953a..e2b7586ed7a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -137,7 +137,6 @@ struct page_pool; #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 -#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_UMR_WQE_INLINE_SZ \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index f20074dbef32..39a5d13ba459 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -34,12 +34,11 @@ #ifndef __MLX5E_EN_ACCEL_H__ #define __MLX5E_EN_ACCEL_H__ -#ifdef CONFIG_MLX5_ACCEL - #include <linux/skbuff.h> #include <linux/netdevice.h> #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" +#include "en_accel/rxtx.h" #include "en.h" static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, @@ -64,9 +63,13 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, } #endif + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi); + if (unlikely(!skb)) + return NULL; + } + return skb; } -#endif /* CONFIG_MLX5_ACCEL */ - #endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c new file mode 100644 index 000000000000..7b7ec3998e84 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c @@ -0,0 +1,109 @@ +#include "en_accel/rxtx.h" + +static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb, + struct sk_buff *nskb, + int remaining) +{ + int bytes_needed = remaining, remaining_headlen, remaining_page_offset; + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int payload_len = remaining + sizeof(struct udphdr); + int k = 0, i, j; + + skb_copy_bits(skb, 0, nskb->data, headlen); + nskb->dev = skb->dev; + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb_network_offset(skb)); + skb_set_transport_header(nskb, skb_transport_offset(skb)); + skb_set_tail_pointer(nskb, headlen); + + /* How many frags do we need? */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]); + k++; + if (bytes_needed <= 0) + break; + } + + /* Fill the first frag and split it if necessary */ + j = skb_shinfo(skb)->nr_frags - k; + remaining_page_offset = -bytes_needed; + skb_fill_page_desc(nskb, 0, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset, + skb_shinfo(skb)->frags[j].size - remaining_page_offset); + + skb_frag_ref(skb, j); + + /* Fill the rest of the frags */ + for (i = 1; i < k; i++) { + j = skb_shinfo(skb)->nr_frags - k + i; + + skb_fill_page_desc(nskb, i, + skb_shinfo(skb)->frags[j].page.p, + skb_shinfo(skb)->frags[j].page_offset, + skb_shinfo(skb)->frags[j].size); + skb_frag_ref(skb, j); + } + skb_shinfo(nskb)->nr_frags = k; + + remaining_headlen = remaining - skb->data_len; + + /* headlen contains remaining data? */ + if (remaining_headlen > 0) + skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen, + remaining_headlen); + nskb->len = remaining + headlen; + nskb->data_len = payload_len - sizeof(struct udphdr) + + max_t(int, 0, remaining_headlen); + nskb->protocol = skb->protocol; + if (nskb->protocol == htons(ETH_P_IP)) { + ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) + + skb_shinfo(skb)->gso_segs); + ip_hdr(nskb)->tot_len = + htons(payload_len + sizeof(struct iphdr)); + } else { + ipv6_hdr(nskb)->payload_len = htons(payload_len); + } + udp_hdr(nskb)->len = htons(payload_len); + skb_shinfo(nskb)->gso_size = 0; + nskb->ip_summed = skb->ip_summed; + nskb->csum_start = skb->csum_start; + nskb->csum_offset = skb->csum_offset; + nskb->queue_mapping = skb->queue_mapping; +} + +/* might send skbs and update wqe and pi */ +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); + int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size; + struct sk_buff *nskb; + + if (skb->protocol == htons(ETH_P_IP)) + ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr)); + else + ipv6_hdr(skb)->payload_len = htons(payload_len); + udp_hdr(skb)->len = htons(payload_len); + if (!remaining) + return skb; + + sq->stats->udp_seg_rem++; + nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); + if (unlikely(!nskb)) { + sq->stats->dropped++; + return NULL; + } + + mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining); + + skb_shinfo(skb)->gso_segs--; + pskb_trim(skb, skb->len - remaining); + mlx5e_sq_xmit(sq, skb, *wqe, *pi); + mlx5e_sq_fetch_wqe(sq, wqe, pi); + return nskb; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h new file mode 100644 index 000000000000..ed42699a78b3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h @@ -0,0 +1,14 @@ + +#ifndef __MLX5E_EN_ACCEL_RX_TX_H__ +#define __MLX5E_EN_ACCEL_RX_TX_H__ + +#include <linux/skbuff.h> +#include "en.h" + +struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dae4156a710d..bbd2fd0b2e06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -270,12 +270,9 @@ void mlx5e_update_stats_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); + mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->profile->update_stats(priv); - queue_delayed_work(priv->wq, dwork, - msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); - } + priv->profile->update_stats(priv); mutex_unlock(&priv->state_lock); } @@ -352,8 +349,8 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info), - GFP_KERNEL, cpu_to_node(c->cpu)); + rq->mpwqe.info = kvzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; @@ -670,7 +667,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err_free: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -702,7 +699,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -965,15 +962,15 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kfree(sq->db.di); + kvfree(sq->db.di); } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di), - GFP_KERNEL, numa); + sq->db.di = kvzalloc_node(sizeof(*sq->db.di) * wq_sz, + GFP_KERNEL, numa); if (!sq->db.di) { mlx5e_free_xdpsq_db(sq); return -ENOMEM; @@ -1024,15 +1021,15 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) { - kfree(sq->db.ico_wqe); + kvfree(sq->db.ico_wqe); } static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe), - GFP_KERNEL, numa); + sq->db.ico_wqe = kvzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz, + GFP_KERNEL, numa); if (!sq->db.ico_wqe) return -ENOMEM; @@ -1077,8 +1074,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { - kfree(sq->db.wqe_info); - kfree(sq->db.dma_fifo); + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); } static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) @@ -1086,10 +1083,10 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; - sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo), - GFP_KERNEL, numa); - sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info), - GFP_KERNEL, numa); + sq->db.dma_fifo = kvzalloc_node(df_sz * sizeof(*sq->db.dma_fifo), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(wq_sz * sizeof(*sq->db.wqe_info), + GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; @@ -1893,7 +1890,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1979,7 +1976,7 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); - kfree(c); + kvfree(c); return err; } @@ -2018,7 +2015,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); - kfree(c); + kvfree(c); } #define DEFAULT_FRAG_SIZE (2048) @@ -2276,7 +2273,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, chs->num = chs->params.num_channels; chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); - cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); if (!chs->c || !cparam) goto err_free; @@ -2287,7 +2284,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - kfree(cparam); + kvfree(cparam); return 0; err_close_channels: @@ -2296,7 +2293,7 @@ err_close_channels: err_free: kfree(chs->c); - kfree(cparam); + kvfree(cparam); chs->num = 0; return err; } @@ -3371,7 +3368,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, priv); @@ -3405,6 +3402,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); @@ -4592,6 +4592,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + netdev->features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2b8040a3cdbd..8e3c5b4b90ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); return 0; @@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d3a1dd20e41d..f763a6aebc2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -487,7 +487,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); return 0; @@ -601,6 +601,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); + else + rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; return false; } @@ -1261,7 +1263,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats->mpwqe_filler++; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; goto mpwrq_cqe_out; } @@ -1383,6 +1388,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + rq->stats->xdp_tx_cqe += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1646859974ce..c0507fada0be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, @@ -59,6 +60,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, @@ -67,10 +69,13 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -80,6 +85,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, }; @@ -133,9 +143,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_tx += rq_stats->xdp_tx; + s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_full += rq_stats->xdp_tx_full; s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -145,6 +157,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; s->ch_eq_rearm += ch_stats->eq_rearm; for (j = 0; j < priv->max_opened_tc; j++) { @@ -157,8 +174,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; + s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; @@ -170,6 +189,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; #endif + s->tx_cqes += sq_stats->cqes; } } @@ -1107,12 +1127,14 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, @@ -1122,6 +1144,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, }; static const struct counter_desc sq_stats_desc[] = { @@ -1140,11 +1163,16 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 643153bb3607..fc3f66003edd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -61,6 +61,7 @@ struct mlx5e_sw_stats { u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; u64 tx_added_vlan_packets; + u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; u64 rx_removed_vlan_packets; @@ -70,6 +71,7 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_tx; + u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_full; u64 tx_csum_none; u64 tx_csum_partial; @@ -78,10 +80,13 @@ struct mlx5e_sw_stats { u64 tx_queue_dropped; u64 tx_xmit_more; u64 tx_recover; + u64 tx_cqes; u64 tx_queue_wake; + u64 tx_udp_seg_rem; u64 tx_cqe_err; u64 rx_wqe_err; - u64 rx_mpwqe_filler; + u64 rx_mpwqe_filler_cqes; + u64 rx_mpwqe_filler_strides; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -91,6 +96,11 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 rx_congst_umr; + u64 ch_events; + u64 ch_poll; + u64 ch_arm; + u64 ch_aff_change; u64 ch_eq_rearm; #ifdef CONFIG_MLX5_EN_TLS @@ -169,9 +179,11 @@ struct mlx5e_rq_stats { u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_tx; + u64 xdp_tx_cqe; u64 xdp_tx_full; u64 wqe_err; - u64 mpwqe_filler; + u64 mpwqe_filler_cqes; + u64 mpwqe_filler_strides; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; @@ -181,6 +193,7 @@ struct mlx5e_rq_stats { u64 cache_empty; u64 cache_busy; u64 cache_waive; + u64 congst_umr; }; struct mlx5e_sq_stats { @@ -196,6 +209,7 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; + u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; @@ -206,11 +220,16 @@ struct mlx5e_sq_stats { u64 dropped; u64 recover; /* dirtied @completion */ - u64 wake ____cacheline_aligned_in_smp; + u64 cqes ____cacheline_aligned_in_smp; + u64 wake; u64 cqe_err; }; struct mlx5e_ch_stats { + u64 events; + u64 poll; + u64 arm; + u64 aff_change; u64 eq_rearm; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f29deb44bf3b..f0739dae7b56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); stats->tso_packets++; stats->tso_bytes += skb->len - ihs; } @@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) sq = priv->txq2sq[skb_get_queue_mapping(skb)]; mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_ACCEL /* might send skbs and update wqe and pi */ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!skb)) return NETDEV_TX_OK; -#endif + return mlx5e_sq_xmit(sq, skb, wqe, pi); } @@ -466,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { + struct mlx5e_sq_stats *stats; struct mlx5e_txqsq *sq; struct mlx5_cqe64 *cqe; u32 dma_fifo_cc; @@ -483,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) if (!cqe) return false; + stats = sq->stats; + npkts = 0; nbytes = 0; @@ -511,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats->cqe_err++; + stats->cqe_err++; } do { @@ -556,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + stats->cqes += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -571,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats->wake++; + stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 1b17f682693b..4e1f99a98d5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -74,10 +74,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); + struct mlx5e_ch_stats *ch_stats = c->stats; bool busy = false; int work_done = 0; int i; + ch_stats->poll++; + for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); @@ -94,6 +97,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) return budget; + ch_stats->aff_change++; if (budget && work_done == budget) work_done--; } @@ -101,6 +105,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; + ch_stats->arm++; + for (i = 0; i < c->num_tc; i++) { mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); @@ -118,8 +124,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - cq->event_ctr++; napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 0b47126815b6..2bd4c3184eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } +static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) +{ + return wq->fbc.sz_m1 - wq->cur_sz; +} + static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 968b88af2ef5..52437363766a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1503,7 +1503,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, static int mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress) + struct tcf_block *block, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; @@ -1518,7 +1519,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; block_cb = __tcf_block_cb_register(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp, acl_block); + mlxsw_sp, acl_block, extack); if (IS_ERR(block_cb)) { err = PTR_ERR(block_cb); goto err_cb_register; @@ -1541,7 +1542,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, err_block_bind: if (!tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); err_cb_register: mlxsw_sp_acl_block_destroy(acl_block); } @@ -1571,7 +1572,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); if (!err && !tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); mlxsw_sp_acl_block_destroy(acl_block); } } @@ -1596,11 +1597,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, switch (f->command) { case TC_BLOCK_BIND: err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port); + mlxsw_sp_port, f->extack); if (err) return err; err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, - f->block, ingress); + f->block, ingress, + f->extack); if (err) { tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6aaaf3d9ba31..88bd27ace8d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -163,7 +163,8 @@ struct mlxsw_sp_rif_ops { const struct mlxsw_sp_rif_params *params); int (*configure)(struct mlxsw_sp_rif *rif); void (*deconfigure)(struct mlxsw_sp_rif *rif); - struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); + struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); }; static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@ -342,10 +343,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); } -static struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); - #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { @@ -5967,7 +5964,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } -static struct mlxsw_sp_rif * +struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { @@ -6125,6 +6122,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) return rif->dev; } +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) +{ + return rif->fid; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, @@ -6162,7 +6164,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, rif->ops = ops; if (ops->fid_get) { - fid = ops->fid_get(rif); + fid = ops->fid_get(rif, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@ -6267,7 +6269,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, } /* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif); + fid = rif->ops->fid_get(rif, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@ -6775,7 +6777,8 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); } @@ -6865,9 +6868,23 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { - u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; + u16 vid; + int err; + + if (is_vlan_dev(rif->dev)) { + vid = vlan_dev_vlan_id(rif->dev); + } else { + err = br_vlan_get_pvid(rif->dev, &vid); + if (!vid) + err = -EINVAL; + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); + return ERR_PTR(err); + } + } return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); } @@ -6937,7 +6954,8 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a01edcf56797..52e25695625c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -75,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index eea5666a86b2..da94e1eb9e16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1135,6 +1135,39 @@ err_port_vlan_set: return err; } +static int +mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_fid *fid; + u16 pvid; + u16 vid; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); + if (!rif) + return 0; + fid = mlxsw_sp_rif_fid(rif); + pvid = mlxsw_sp_fid_8021q_vid(fid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + if (vid != pvid) { + netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); + return -EBUSY; + } + } else { + if (vid == pvid) { + netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); + return -EBUSY; + } + } + } + + return 0; +} + static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) @@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; - if (netif_is_bridge_master(orig_dev)) - return -EOPNOTSUPP; + if (netif_is_bridge_master(orig_dev)) { + int err = 0; + + if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && + br_vlan_enabled(orig_dev) && + switchdev_trans_ph_prepare(trans)) + err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, + orig_dev, vlan); + if (!err) + err = -EOPNOTSUPP; + return err; + } if (switchdev_trans_ph_prepare(trans)) return 0; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dd947e4dd3ce..e1747a490066 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -828,7 +828,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) } if (!mac_address_valid) - random_ether_addr(adapter->mac_address); + eth_random_addr(adapter->mac_address); lan743x_mac_set_address(adapter, adapter->mac_address); ether_addr_copy(netdev->dev_addr, adapter->mac_address); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 776a8a9be8e3..1a4f2bb48ead 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) return 0; } +static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) +{ + /* Select the VID to configure */ + ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), + ANA_TABLES_VLANTIDX); + /* Set the vlan port members mask and issue a write command */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | + ANA_TABLES_VLANACCESS_CMD_WRITE, + ANA_TABLES_VLANACCESS); + + return ocelot_vlant_wait_for_completion(ocelot); +} + +static void ocelot_vlan_mode(struct ocelot_port *port, + netdev_features_t features) +{ + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(p); + else + val &= ~BIT(p); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static void ocelot_vlan_port_apply(struct ocelot *ocelot, + struct ocelot_port *port) +{ + u32 val; + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to clasify for untagged frames (may be zero) */ + val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid); + if (port->vlan_aware) + val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VLAN_CFG_VLAN_VID_M | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port->chip_port); + + /* Drop frames with multicast source address */ + val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA; + if (port->vlan_aware && !port->vid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */ + val = REW_TAG_CFG_TAG_TPID_CFG(0); + + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG(1); + else + /* Tag all frames */ + val |= REW_TAG_CFG_TAG_CFG(3); + } + ocelot_rmw_gix(ocelot, val, + REW_TAG_CFG_TAG_TPID_CFG_M | + REW_TAG_CFG_TAG_CFG_M, + REW_TAG_CFG, port->chip_port); + + /* Set default VLAN and tag type to 8021Q. */ + val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID(port->vid); + ocelot_rmw_gix(ocelot, val, + REW_PORT_VLAN_CFG_PORT_TPID_M | + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port->chip_port); +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + /* Make the port a member of the VLAN */ + ocelot->vlan_mask[vid] |= BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + /* Untagged egress vlan clasification */ + if (untagged) + port->vid = vid; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + /* Stop the port from being a member of the vlan */ + ocelot->vlan_mask[vid] &= ~BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + static void ocelot_vlan_init(struct ocelot *ocelot) { + u16 port, vid; + /* Clear VLAN table, by default all ports are members of all VLANs */ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, ANA_TABLES_VLANACCESS); ocelot_vlant_wait_for_completion(ocelot); + + /* Configure the port VLAN memberships */ + for (vid = 1; vid < VLAN_N_VID; vid++) { + ocelot->vlan_mask[vid] = 0; + ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + } + + /* Because VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, but + * we can't rely on it since module may be not loaded. + */ + ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); + ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, ocelot->num_phys_ports); + + /* Set vlan ingress filter mask to all ports but the CPU port by + * default. + */ + ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); + ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); + } } /* Watermark encode @@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + if (!vid) { + if (!port->vlan_aware) + /* If the bridge is not VLAN aware and no VID was + * provided, set it to pvid to ensure the MAC entry + * matches incoming untagged packets + */ + vid = port->pvid; + else + /* If the bridge is VLAN aware a VID must be provided as + * otherwise the learnt entry wouldn't match any frame. + */ + return -EINVAL; + } + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, ENTRYTYPE_NORMAL); } @@ -690,6 +883,30 @@ end: return ret; } +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, true); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ocelot_port *port = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(port, features); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_fdb_add = ocelot_fdb_add, .ndo_fdb_del = ocelot_fdb_del, .ndo_fdb_dump = ocelot_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_get_strings, .get_ethtool_stats = ocelot_get_ethtool_stats, .get_sset_count = ocelot_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ocelot_port_attr_get(struct net_device *dev, @@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port->vlan_aware = attr->u.vlan_filtering; + ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port); + break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); break; @@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev, return err; } +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_add(dev, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_vlan_del_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_del(dev, vid); + + if (ret) + return ret; + } + + return 0; +} + static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, const unsigned char *addr, u16 vid) @@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, bool new = false; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, u16 vid = mdb->vid; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), trans); @@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_vlan_del_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */ + ocelot_port->vlan_aware = 0; + ocelot_port->pvid = 0; + ocelot_port->vid = 0; +} + +static void ocelot_set_aggr_pgids(struct ocelot *ocelot) +{ + int i, port, lag; + + /* Reset destination and aggregation PGIDS */ + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + + for (i = PGID_AGGR; i < PGID_SRC; i++) + ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_PGID_PGID, i); + + /* Now, set PGIDs for each LAG */ + for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + unsigned long bond_mask; + int aggr_count = 0; + u8 aggr_idx[16]; + + bond_mask = ocelot->lags[lag]; + if (!bond_mask) + continue; + + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + // Destination mask + ocelot_write_rix(ocelot, bond_mask, + ANA_PGID_PGID, port); + aggr_idx[aggr_count] = port; + aggr_count++; + } + + for (i = PGID_AGGR; i < PGID_SRC; i++) { + u32 ac; + + ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); + ac &= ~bond_mask; + ac |= BIT(aggr_idx[i % aggr_count]); + ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); + } + } +} + +static void ocelot_setup_lag(struct ocelot *ocelot, int lag) +{ + unsigned long bond_mask = ocelot->lags[lag]; + unsigned int p; + + for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) { + u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + + /* Use lag port as logical port for port i */ + ocelot_write_gix(ocelot, port_cfg | + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG, p); + } +} + +static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + int lag, lp; + struct net_device *ndev; + u32 bond_mask = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond, ndev) { + struct ocelot_port *port = netdev_priv(ndev); + + bond_mask |= BIT(port->chip_port); + } + rcu_read_unlock(); + + lp = __ffs(bond_mask); + + /* If the new port is the lowest one, use it as the logical port from + * now on + */ + if (p == lp) { + lag = p; + ocelot->lags[p] = bond_mask; + bond_mask &= ~BIT(p); + if (bond_mask) { + lp = __ffs(bond_mask); + ocelot->lags[lp] = 0; + } + } else { + lag = lp; + ocelot->lags[lp] |= BIT(p); + } + + ocelot_setup_lag(ocelot, lag); + ocelot_set_aggr_pgids(ocelot); + + return 0; +} + +static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + u32 port_cfg; + int i; + + /* Remove port from any lag */ + for (i = 0; i < ocelot->num_phys_ports; i++) + ocelot->lags[i] &= ~BIT(ocelot_port->chip_port); + + /* if it was the logical port of the lag, move the lag config to the + * next port + */ + if (ocelot->lags[p]) { + int n = __ffs(ocelot->lags[p]); + + ocelot->lags[n] = ocelot->lags[p]; + ocelot->lags[p] = 0; + + ocelot_setup_lag(ocelot, n); + } + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p), + ANA_PORT_PORT_CFG, p); + + ocelot_set_aggr_pgids(ocelot); } /* Checks if the net_device instance given to us originate from our driver. */ @@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev, else ocelot_port_bridge_leave(ocelot_port, info->upper_dev); + + ocelot_vlan_port_apply(ocelot_port->ocelot, + ocelot_port); + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_lag_join(ocelot_port, + info->upper_dev); + else + ocelot_port_lag_leave(ocelot_port, + info->upper_dev); } break; default: @@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (event == NETDEV_PRECHANGEUPPER && + netif_is_lag_master(info->upper_dev)) { + struct netdev_lag_upper_info *lag_upper_info = info->upper_info; + struct netlink_ext_ack *extack; + + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + extack = netdev_notifier_info_to_extack(&info->info); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + + ret = -EINVAL; + goto notify; + } + } + if (netif_is_lag_master(dev)) { struct net_device *slave; struct list_head *iter; @@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); dev->dev_addr[ETH_ALEN - 1] += port; ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, @@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, goto err_register_netdev; } + /* Basic L2 initialization */ + ocelot_vlan_port_apply(ocelot, ocelot_port); + return 0; err_register_netdev: @@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot) int i, cpu = ocelot->num_phys_ports; char queue_name[32]; + ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(u32), GFP_KERNEL); + if (!ocelot->lags) + return -ENOMEM; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 097bd12a10d4..616bec30dfa3 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -493,7 +493,7 @@ struct ocelot { u8 num_cpu_ports; struct ocelot_port **ports; - u16 lags[16]; + u32 *lags; /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 40216d56dddc..4dbf7cba6377 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -209,7 +209,7 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_bpf_setup_tc_block_cb, - nn, nn); + nn, nn, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_bpf_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 4a6d2db75071..e56b815a8dc6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -34,6 +34,7 @@ #include <linux/bitfield.h> #include <net/pkt_cls.h> #include <net/switchdev.h> +#include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_pedit.h> @@ -44,6 +45,8 @@ #include "main.h" #include "../nfp_net_repr.h" +#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY) + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -235,9 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; + struct flowi4 flow = {}; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct rtable *rt; struct net *net; + int err; if (ip_tun->options_len) return -EOPNOTSUPP; @@ -254,7 +260,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + + /* Do a route lookup to determine ttl - if fails then use default. + * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so + * must be defined here. + */ + flow.daddr = ip_tun->key.u.ipv4.dst; + flow.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(net, &flow); + err = PTR_ERR_OR_ZERO(rt); + if (!err) { + set_tun->ttl = ip4_dst_hoplimit(&rt->dst); + ip_rt_put(rt); + } else { + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + } + + set_tun->tos = ip_tun->key.tos; + + if (!(ip_tun->key.tun_flags & TUNNEL_KEY) || + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) + return -EOPNOTSUPP; + set_tun->tun_flags = ip_tun->key.tun_flags; /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -398,8 +425,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, return 0; } +static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) +{ + switch (ip_proto) { + case 0: + /* Filter doesn't force proto match, + * both TCP and UDP will be updated if encountered + */ + return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; + case IPPROTO_TCP: + return TCA_CSUM_UPDATE_FLAG_TCP; + case IPPROTO_UDP: + return TCA_CSUM_UPDATE_FLAG_UDP; + default: + /* All other protocols will be ignored by FW */ + return 0; + } +} + static int -nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) +nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, + char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; @@ -409,6 +455,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) int idx, nkeys, err; size_t act_size; u32 offset, cmd; + u8 ip_proto = 0; memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); @@ -451,6 +498,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) return err; } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *basic; + + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + ip_proto = basic->ip_proto; + } + if (set_eth.head.len_lw) { act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); @@ -459,6 +515,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -471,18 +531,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw) { act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_src.head.len_lw) { act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_tport.head.len_lw) { act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } return 0; @@ -493,12 +565,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_output *output; int err, prelag_size; + /* If csum_updated has not been reset by now, it means HW will + * incorrectly update csums when they are not requested. + */ + if (*csum_updated) + return -EOPNOTSUPP; + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -529,10 +607,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, static int nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, + struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; @@ -545,14 +624,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, } else if (is_tcf_mirred_egress_redirect(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; @@ -602,8 +681,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, /* Tunnel decap is handled by default so accept action. */ return 0; } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len)) + if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + a_len, csum_updated)) + return -EOPNOTSUPP; + } else if (is_tcf_csum(a)) { + /* csum action requests recalc of something we have not fixed */ + if (tcf_csum_update_flags(a) & ~*csum_updated) return -EOPNOTSUPP; + /* If we will correctly fix the csum we can remove it from the + * csum update list. Which will later be used to check support. + */ + *csum_updated &= ~tcf_csum_update_flags(a); } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; @@ -620,6 +708,7 @@ int nfp_flower_compile_action(struct nfp_app *app, int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; + u32 csum_updated = 0; LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -632,8 +721,9 @@ int nfp_flower_compile_action(struct nfp_app *app, tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt, &out_cnt); + err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + netdev, &tun_type, &tun_out_cnt, + &out_cnt, &csum_updated); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4a7f3510a296..15f1eacd76b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be16 reserved2; + __be16 tun_flags; u8 ttl; - u8 reserved3; + u8 tos; __be32 extra[2]; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 0c4c957717ea..bf10598f66ae 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, if (lag_upper_info && lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || - (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && - lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) { can_offload = false; nfp_flower_cmsg_warn(priv->app, "Unable to offload tx_type %u hash %u\n", diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 525057bee0ed..43b9bf12b174 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -584,9 +584,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: return nfp_flower_get_stats(app, netdev, flower, egress); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, @@ -638,7 +638,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, - repr, repr); + repr, repr, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d4c27f849f9b..7df5ca37bfb8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -945,11 +945,12 @@ err_free: /** * nfp_net_tx_complete() - Handled completed TX packets - * @tx_ring: TX ring structure + * @tx_ring: TX ring structure + * @budget: NAPI budget (only used as bool to determine if in NAPI context) * * Return: Number of completed TX descriptors */ -static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; @@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) /* check for last gather fragment */ if (fidx == nr_frags - 1) - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; @@ -1828,7 +1829,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) unsigned int pkts_polled = 0; if (r_vec->tx_ring) - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, budget); if (r_vec->rx_ring) pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); @@ -2062,7 +2063,7 @@ static void nfp_ctrl_poll(unsigned long arg) struct nfp_net_r_vector *r_vec = (void *)arg; spin_lock_bh(&r_vec->lock); - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); spin_unlock_bh(&r_vec->lock); @@ -3115,6 +3116,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nfp_net_netpoll(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + /* nfp_net's NAPIs are statically allocated so even if there is a race + * with reconfig path this will simply try to schedule some disabled + * NAPI instances. + */ + for (i = 0; i < nn->dp.num_stack_tx_rings; i++) + napi_schedule_irqoff(&nn->r_vecs[i].napi); +} +#endif + static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3482,6 +3498,9 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nfp_net_netpoll, +#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 26d1cc4e2906..6a79c8e4a7a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct nfp_app *app; - - app = nfp_app_from_netdev(netdev); - if (!app) - return; + struct nfp_app *app = nfp_app_from_netdev(netdev); + strlcpy(drvinfo->bus_info, pci_name(app->pdev), + sizeof(drvinfo->bus_info)); nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } @@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS; + return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS; } static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) @@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_busy", i); @@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) u64 tmp[NN_RVEC_GATHER_STATS]; unsigned int i, j; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { unsigned int start; do { @@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) return data; } -static unsigned int -nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs) { - return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; + return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4; } static u8 * -nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, - unsigned int tx_rings, bool repr) +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) { int swap_off, i; @@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) data = nfp_pr_et(data, nfp_net_et_stats[i].name); - for (i = 0; i < tx_rings; i++) { - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { data = nfp_pr_et(data, "rxq_%u_pkts", i); data = nfp_pr_et(data, "rxq_%u_bytes", i); + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } return data; } static u64 * -nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, - unsigned int rx_rings, unsigned int tx_rings) +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs) { unsigned int i; for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) *data++ = readq(mem + nfp_net_et_stats[i].off); - for (i = 0; i < tx_rings; i++) { - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); } return data; @@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); - data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, - nn->dp.num_tx_rings, + data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs, false); data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(nn->port, data); @@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_net *nn = netdev_priv(netdev); data = nfp_vnic_get_sw_stats(netdev, data); - data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, - nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs); data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(nn->port, data); } @@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: return nfp_vnic_get_sw_stats_count(netdev) + - nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, - nn->dp.num_tx_rings) + + nfp_vnic_get_hw_stats_count(nn->max_r_vecs) + nfp_mac_get_stats_count(netdev) + nfp_app_port_get_stats_count(nn->port); default: @@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + data = nfp_vnic_get_hw_stats_strings(data, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(port, data); @@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_port *port = nfp_port_from_netdev(netdev); if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + data = nfp_vnic_get_hw_stats(data, port->vnic, 0); else data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(port, data); @@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - count = nfp_vnic_get_hw_stats_count(0, 0); + count = nfp_vnic_get_hw_stats_count(0); else count = nfp_mac_get_stats_count(netdev); count += nfp_app_port_get_stats_count(port); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 749655c329b2..c8d0b1016a64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp) kfree(nfp); } -static void nfp6000_read_serial(struct device *dev, u8 *serial) +static int nfp6000_read_serial(struct device *dev, u8 *serial) { struct pci_dev *pdev = to_pci_dev(dev); int pos; @@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); if (!pos) { - memset(serial, 0, NFP_SERIAL_LEN); - return; + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; } pci_read_config_dword(pdev, pos + 4, ®); put_unaligned_be16(reg >> 16, serial + 4); pci_read_config_dword(pdev, pos + 8, ®); put_unaligned_be32(reg, serial); + + return 0; } -static u16 nfp6000_get_interface(struct device *dev) +static int nfp6000_get_interface(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int pos; u32 reg; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - if (!pos) - return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + if (!pos) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } pci_read_config_dword(pdev, pos + 4, ®); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index b0da3d436850..c338d539fa96 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -364,8 +364,8 @@ struct nfp_cpp_operations { int (*init)(struct nfp_cpp *cpp); void (*free)(struct nfp_cpp *cpp); - void (*read_serial)(struct device *dev, u8 *serial); - u16 (*get_interface)(struct device *dev); + int (*read_serial)(struct device *dev, u8 *serial); + int (*get_interface)(struct device *dev); int (*area_init)(struct nfp_cpp_area *area, u32 dest, unsigned long long address, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index ef30597aa319..73de57a09800 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, { const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); struct nfp_cpp *cpp; + int ifc, err; u32 mask[2]; u32 xpbaddr; size_t tgt; - int err; cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); if (!cpp) { @@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, cpp->op = ops; cpp->priv = priv; - cpp->interface = ops->get_interface(parent); - if (ops->read_serial) - ops->read_serial(parent, cpp->serial); + + ifc = ops->get_interface(parent); + if (ifc < 0) { + err = ifc; + goto err_free_cpp; + } + cpp->interface = ifc; + if (ops->read_serial) { + err = ops->read_serial(parent, cpp->serial); + if (err) + goto err_free_cpp; + } + rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); @@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err = device_register(&cpp->dev); if (err < 0) { put_device(&cpp->dev); - goto err_dev; + goto err_free_cpp; } dev_set_drvdata(&cpp->dev, cpp); @@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err_out: device_unregister(&cpp->dev); -err_dev: +err_free_cpp: kfree(cpp); err_malloc: return ERR_PTR(err); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile index 31288d4ad248..862de0f3bc41 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_PCH_GBE) += pch_gbe.o pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o -pch_gbe-y += pch_gbe_api.o pch_gbe_main.o +pch_gbe-y += pch_gbe_main.o diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 697e29dd4bd3..44c2f291e766 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -326,32 +326,6 @@ struct pch_gbe_regs { #define PCH_GBE_FC_FULL 3 #define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL - -struct pch_gbe_hw; -/** - * struct pch_gbe_functions - HAL APi function pointer - * @get_bus_info: for pch_gbe_hal_get_bus_info - * @init_hw: for pch_gbe_hal_init_hw - * @read_phy_reg: for pch_gbe_hal_read_phy_reg - * @write_phy_reg: for pch_gbe_hal_write_phy_reg - * @reset_phy: for pch_gbe_hal_phy_hw_reset - * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset - * @power_up_phy: for pch_gbe_hal_power_up_phy - * @power_down_phy: for pch_gbe_hal_power_down_phy - * @read_mac_addr: for pch_gbe_hal_read_mac_addr - */ -struct pch_gbe_functions { - void (*get_bus_info) (struct pch_gbe_hw *); - s32 (*init_hw) (struct pch_gbe_hw *); - s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); - s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); - void (*reset_phy) (struct pch_gbe_hw *); - void (*sw_reset_phy) (struct pch_gbe_hw *); - void (*power_up_phy) (struct pch_gbe_hw *hw); - void (*power_down_phy) (struct pch_gbe_hw *hw); - s32 (*read_mac_addr) (struct pch_gbe_hw *); -}; - /** * struct pch_gbe_mac_info - MAC information * @addr[6]: Store the MAC address @@ -394,17 +368,6 @@ struct pch_gbe_phy_info { /*! * @ingroup Gigabit Ether driver Layer - * @struct pch_gbe_bus_info - * @brief Bus information - */ -struct pch_gbe_bus_info { - u8 type; - u8 speed; - u8 width; -}; - -/*! - * @ingroup Gigabit Ether driver Layer * @struct pch_gbe_hw * @brief Hardware information */ @@ -414,10 +377,8 @@ struct pch_gbe_hw { struct pch_gbe_regs __iomem *reg; spinlock_t miim_lock; - const struct pch_gbe_functions *func; struct pch_gbe_mac_info mac; struct pch_gbe_phy_info phy; - struct pch_gbe_bus_info bus; }; /** @@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev); /* pch_gbe_mac.c */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data); #endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c deleted file mode 100644 index 51250363566b..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#include "pch_gbe.h" -#include "pch_gbe_phy.h" -#include "pch_gbe_api.h" - -/* bus type values */ -#define pch_gbe_bus_type_unknown 0 -#define pch_gbe_bus_type_pci 1 -#define pch_gbe_bus_type_pcix 2 -#define pch_gbe_bus_type_pci_express 3 -#define pch_gbe_bus_type_reserved 4 - -/* bus speed values */ -#define pch_gbe_bus_speed_unknown 0 -#define pch_gbe_bus_speed_33 1 -#define pch_gbe_bus_speed_66 2 -#define pch_gbe_bus_speed_100 3 -#define pch_gbe_bus_speed_120 4 -#define pch_gbe_bus_speed_133 5 -#define pch_gbe_bus_speed_2500 6 -#define pch_gbe_bus_speed_reserved 7 - -/* bus width values */ -#define pch_gbe_bus_width_unknown 0 -#define pch_gbe_bus_width_pcie_x1 1 -#define pch_gbe_bus_width_pcie_x2 2 -#define pch_gbe_bus_width_pcie_x4 4 -#define pch_gbe_bus_width_32 5 -#define pch_gbe_bus_width_64 6 -#define pch_gbe_bus_width_reserved 7 - -/** - * pch_gbe_plat_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) -{ - hw->bus.type = pch_gbe_bus_type_pci_express; - hw->bus.speed = pch_gbe_bus_speed_2500; - hw->bus.width = pch_gbe_bus_width_pcie_x1; -} - -/** - * pch_gbe_plat_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * Negative value: Failed-EBUSY - */ -static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) -{ - s32 ret_val; - - ret_val = pch_gbe_phy_get_id(hw); - if (ret_val) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); - return ret_val; - } - pch_gbe_phy_init_setting(hw); - /* Setup Mac interface option RGMII */ -#ifdef PCH_GBE_MAC_IFOP_RGMII - pch_gbe_phy_set_rgmii(hw); -#endif - return ret_val; -} - -static const struct pch_gbe_functions pch_gbe_ops = { - .get_bus_info = pch_gbe_plat_get_bus_info, - .init_hw = pch_gbe_plat_init_hw, - .read_phy_reg = pch_gbe_phy_read_reg_miic, - .write_phy_reg = pch_gbe_phy_write_reg_miic, - .reset_phy = pch_gbe_phy_hw_reset, - .sw_reset_phy = pch_gbe_phy_sw_reset, - .power_up_phy = pch_gbe_phy_power_up, - .power_down_phy = pch_gbe_phy_power_down, - .read_mac_addr = pch_gbe_mac_read_mac_addr -}; - -/** - * pch_gbe_plat_init_function_pointers - Init func ptrs - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) -{ - /* Set PHY parameter */ - hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Set function pointers */ - hw->func = &pch_gbe_ops; -} - -/** - * pch_gbe_hal_setup_init_funcs - Initializes function pointers - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) -{ - if (!hw->reg) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); - return -ENOSYS; - } - pch_gbe_plat_init_function_pointers(hw); - return 0; -} - -/** - * pch_gbe_hal_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) -{ - if (!hw->func->get_bus_info) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->get_bus_info(hw); -} - -/** - * pch_gbe_hal_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) -{ - if (!hw->func->init_hw) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->init_hw(hw); -} - -/** - * pch_gbe_hal_read_phy_reg - Reads PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The buffer to store the 16-bit read. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 *data) -{ - if (!hw->func->read_phy_reg) - return 0; - return hw->func->read_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_write_phy_reg - Writes PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The value to write. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 data) -{ - if (!hw->func->write_phy_reg) - return 0; - return hw->func->write_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_phy_hw_reset - Hard PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->reset_phy(hw); -} - -/** - * pch_gbe_hal_phy_sw_reset - Soft PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->sw_reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->sw_reset_phy(hw); -} - -/** - * pch_gbe_hal_read_mac_addr - Reads MAC address - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) -{ - if (!hw->func->read_mac_addr) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->read_mac_addr(hw); -} - -/** - * pch_gbe_hal_power_up_phy - Power up PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_up_phy) - hw->func->power_up_phy(hw); -} - -/** - * pch_gbe_hal_power_down_phy - Power down PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_down_phy) - hw->func->power_down_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h deleted file mode 100644 index 91ce07c8306c..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#ifndef _PCH_GBE_API_H_ -#define _PCH_GBE_API_H_ - -#include "pch_gbe_phy.h" - -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw); - -#endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 731ce1e419e4..adaa0024adfe 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -17,7 +17,7 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" /** * pch_gbe_stats - Stats item information @@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev, u32 advertising; int ret; - pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); memcpy(©_ecmd, ecmd, sizeof(*ecmd)); @@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev, *regs_buff++ = ioread32(&hw->reg->INT_ST + i); /* PHY register */ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) { - pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp); + pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp); *regs_buff++ = tmp; } } @@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev, err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); if (err) goto err_setup_tx; - /* save the new, restore the old in order to free it, - * then restore the new back again */ -#ifdef RINGFREE - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - pch_gbe_free_rx_resources(adapter, adapter->rx_ring); - pch_gbe_free_tx_resources(adapter, adapter->tx_ring); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rxdr; - adapter->tx_ring = txdr; -#else pch_gbe_free_rx_resources(adapter, rx_old); pch_gbe_free_tx_resources(adapter, tx_old); kfree(tx_old); kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; -#endif err = pch_gbe_up(adapter); } return err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 34a1581eda95..43c0c10dfeb7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -18,7 +18,7 @@ */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> @@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_DMA_ALIGN 0 #define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */ -#define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ @@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION; #define MINNOW_PHY_RESET_GPIO 13 -static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; - static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); @@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) * Returns: * 0: Successful. */ -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) +static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; @@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) /* Read the MAC address. and store to the private data */ pch_gbe_mac_read_mac_addr(hw); iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); -#ifdef PCH_GBE_MAC_IFOP_RGMII iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); -#endif pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); /* Setup the receive addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); @@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count) pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); } - -/** - * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses - * @hw: Pointer to the HW structure - * @mc_addr_list: Array of multicast addresses to program - * @mc_addr_count: Number of multicast addresses to program - * @mar_used_count: The first MAC Address register free to program - * @mar_total_num: Total number of supported MAC Address Registers - */ -static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 mar_used_count, u32 mar_total_num) -{ - u32 i, adrmask; - - /* Load the first set of multicast addresses into the exact - * filters (RAR). If there are not enough to fill the RAR - * array, clear the filters. - */ - for (i = mar_used_count; i < mar_total_num; i++) { - if (mc_addr_count) { - pch_gbe_mac_mar_set(hw, mc_addr_list, i); - mc_addr_count--; - mc_addr_list += ETH_ALEN; - } else { - /* Clear MAC address mask */ - adrmask = ioread32(&hw->reg->ADDR_MASK); - iowrite32((adrmask | (0x0001 << i)), - &hw->reg->ADDR_MASK); - /* wait busy */ - pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); - /* Clear MAC address */ - iowrite32(0, &hw->reg->mac_adr[i].high); - iowrite32(0, &hw->reg->mac_adr[i].low); - } - } -} - /** * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings * @hw: Pointer to the HW structure @@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) void pch_gbe_reset(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + s32 ret_val; - pch_gbe_mac_reset_hw(&adapter->hw); + pch_gbe_mac_reset_hw(hw); /* reprogram multicast address register after reset */ pch_gbe_set_multi(netdev); /* Setup the receive address. */ - pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); - if (pch_gbe_hal_init_hw(&adapter->hw)) - netdev_err(netdev, "Hardware Error\n"); + pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES); + + ret_val = pch_gbe_phy_get_id(hw); + if (ret_val) { + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); + return; + } + pch_gbe_phy_init_setting(hw); + /* Setup Mac interface option RGMII */ + pch_gbe_phy_set_rgmii(hw); } /** @@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, unsigned long rgmii = 0; /* Set the RGMII control. */ -#ifdef PCH_GBE_MAC_IFOP_RGMII switch (speed) { case SPEED_10: rgmii = (PCH_GBE_RGMII_RATE_2_5M | @@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, break; } iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#else /* GMII */ - rgmii = 0; - iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#endif } static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed, u16 duplex) @@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Initialize the hardware-specific values */ - if (pch_gbe_hal_setup_init_funcs(hw)) { - netdev_err(netdev, "Hardware Initialization Failure\n"); - return -EIO; - } if (pch_gbe_alloc_queues(adapter)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); if (err) goto err_setup_rx; - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); err = pch_gbe_up(adapter); if (err) goto err_up; @@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev) err_up: if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); @@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev) pch_gbe_down(adapter); if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_tx_resources(adapter, adapter->tx_ring); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); return 0; @@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev) struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; - u8 *mta_list; - u32 rctl; - int i; - int mc_count; + u32 rctl, adrmask; + int mc_count, i; netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); - /* Check for Promiscuous and All Multicast modes */ + /* By default enable address & multicast filtering */ rctl = ioread32(&hw->reg->RX_MODE); + rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN; + + /* Promiscuous mode disables all hardware address filtering */ + if (netdev->flags & IFF_PROMISC) + rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); + + /* If we want to monitor more multicast addresses than the hardware can + * support then disable hardware multicast filtering. + */ mc_count = netdev_mc_count(netdev); - if ((netdev->flags & IFF_PROMISC)) { - rctl &= ~PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else if ((netdev->flags & IFF_ALLMULTI)) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; + if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES) rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - if (mc_count >= PCH_GBE_MAR_ENTRIES) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); - } - } + iowrite32(rctl, &hw->reg->RX_MODE); - if (mc_count >= PCH_GBE_MAR_ENTRIES) - return; - mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC); - if (!mta_list) + /* If we're not using multicast filtering then there's no point + * configuring the unused MAC address registers. + */ + if (!(rctl & PCH_GBE_MLT_FIL_EN)) return; - /* The shared function expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == mc_count) - break; - memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN); + /* Load the first set of multicast addresses into MAC address registers + * for use by hardware filtering. + */ + i = 1; + netdev_for_each_mc_addr(ha, netdev) + pch_gbe_mac_mar_set(hw, ha->addr, i++); + + /* If there are spare MAC registers, mask & clear them */ + for (; i < PCH_GBE_MAR_ENTRIES; i++) { + /* Clear MAC address mask */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Clear MAC address */ + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); } - pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1, - PCH_GBE_MAR_ENTRIES); - kfree(mta_list); netdev_dbg(netdev, "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", @@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D0, 0); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake up status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } else { - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } @@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device) return err; } pci_set_master(pdev); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake on lan control and status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); free_netdev(netdev); } @@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "PHY initialize error\n"); goto err_free_adapter; } - pch_gbe_hal_get_bus_info(&adapter->hw); /* Read the MAC address. and store to the private data */ - ret = pch_gbe_hal_read_mac_addr(&adapter->hw); + ret = pch_gbe_mac_read_mac_addr(&adapter->hw); if (ret) { dev_err(&pdev->dev, "MAC address Read Error\n"); goto err_free_adapter; @@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, return 0; err_free_adapter: - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); err_free_netdev: free_netdev(netdev); return ret; @@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = { .shutdown = pch_gbe_shutdown, .err_handler = &pch_gbe_err_handler }; - - -static int __init pch_gbe_init_module(void) -{ - int ret; - - pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); - ret = pci_register_driver(&pch_gbe_driver); - if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { - if (copybreak == 0) { - pr_info("copybreak disabled\n"); - } else { - pr_info("copybreak enabled for packets <= %u bytes\n", - copybreak); - } - } - return ret; -} - -static void __exit pch_gbe_exit_module(void) -{ - pci_unregister_driver(&pch_gbe_driver); -} - -module_init(pch_gbe_init_module); -module_exit(pch_gbe_exit_module); +module_pci_driver(pch_gbe_driver); MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); @@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - /* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index a5cad5ea9436..6b35b573beef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) * pch_gbe_phy_sw_reset - PHY software reset * @hw: Pointer to the HW structure */ -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) +static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) { u16 phy_ctrl; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 95ad0151ad02..23ac38711619 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -21,12 +21,10 @@ #define PCH_GBE_PHY_REGS_LEN 32 #define PCH_GBE_PHY_RESET_DELAY_US 10 -#define PCH_GBE_MAC_IFOP_RGMII s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 0c744b9c6e0a..77e386ebff09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; - random_ether_addr(vp->mac); + eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index b9a7548ec6a0..0afc3d335d56 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - random_ether_addr(rmnet_dev->dev_addr); + eth_random_addr(rmnet_dev->dev_addr); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f4cae2be0fda..f80ac894ef92 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> -#include <linux/pci-aspm.h> #include <linux/prefetch.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -35,7 +34,6 @@ #define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" -#define PFX MODULENAME ": " #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" @@ -57,19 +55,6 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" -#ifdef RTL8169_DEBUG -#define assert(expr) \ - if (!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr,__FILE__,__func__,__LINE__); \ - } -#define dprintk(fmt, args...) \ - do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) -#else -#define assert(expr) do {} while (0) -#define dprintk(fmt, args...) do {} while (0) -#endif /* RTL8169_DEBUG */ - #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) @@ -95,7 +80,6 @@ static const int multicast_filter_limit = 32; #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) @@ -399,12 +383,6 @@ enum rtl_registers { FuncForceEvent = 0xfc, }; -enum rtl8110_registers { - TBICSR = 0x64, - TBI_ANAR = 0x68, - TBI_LPAR = 0x6a, -}; - enum rtl8168_8101_registers { CSIDR = 0x64, CSIAR = 0x68, @@ -571,14 +549,6 @@ enum rtl_register_content { PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ ASPM_en = (1 << 0), /* ASPM enable */ - /* TBICSR p.28 */ - TBIReset = 0x80000000, - TBILoopback = 0x40000000, - TBINwEnable = 0x20000000, - TBINwRestart = 0x10000000, - TBILinkOk = 0x02000000, - TBINwComplete = 0x01000000, - /* CPlusCmd p.31 */ EnableBist = (1 << 15), // 8168 8101 Mac_dbgo_oe = (1 << 14), // 8168 8101 @@ -732,7 +702,6 @@ enum rtl_flag { RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_PHY_PENDING, RTL_FLAG_MAX }; @@ -760,7 +729,6 @@ struct rtl8169_private { dma_addr_t RxPhyAddr; void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - struct timer_list timer; u16 cp_cmd; u16 event_slow; @@ -776,14 +744,7 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_link_ksettings)(struct net_device *, - struct ethtool_link_ksettings *); - void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct rtl8169_private *tp); - unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(struct rtl8169_private *tp); - int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); struct { @@ -1478,31 +1439,16 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } -static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBIReset; -} - static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) { return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; } -static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBILinkOk; -} - static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) { return RTL_R8(tp, PHYstatus) & LinkStatus; } -static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) -{ - RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); -} - static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) { unsigned int val; @@ -1572,7 +1518,7 @@ static void rtl8169_check_link_status(struct net_device *dev, { struct device *d = tp_to_dev(tp); - if (tp->link_ok(tp)) { + if (rtl8169_xmii_link_ok(tp)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ pm_request_resume(d); @@ -1588,6 +1534,12 @@ static void rtl8169_check_link_status(struct net_device *dev, #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) +/* Currently we only enable WoL if explicitly told by userspace to circumvent + * issues on certain platforms, see commit bde135a672bf ("r8169: only enable + * PCI wakeups when WOL is active"). Let's keep __rtl8169_get_wol() for the + * case that we want to respect BIOS settings again. + */ +#if 0 static u32 __rtl8169_get_wol(struct rtl8169_private *tp) { u8 options; @@ -1622,25 +1574,16 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) return wolopts; } +#endif static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - pm_runtime_get_noresume(d); rtl_lock_work(tp); - wol->supported = WAKE_ANY; - if (pm_runtime_active(d)) - wol->wolopts = __rtl8169_get_wol(tp); - else - wol->wolopts = tp->saved_wolopts; - + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1716,18 +1659,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + pm_runtime_get_noresume(d); rtl_lock_work(tp); + tp->saved_wolopts = wol->wolopts; + if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, wol->wolopts); - else - tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - device_set_wakeup_enable(d, wol->wolopts); + device_set_wakeup_enable(d, tp->saved_wolopts); pm_runtime_put_noidle(d); @@ -1759,28 +1705,6 @@ static int rtl8169_get_regs_len(struct net_device *dev) return R8169_REGS_SIZE; } -static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 ignored) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret = 0; - u32 reg; - - reg = RTL_R32(tp, TBICSR); - if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && - (duplex == DUPLEX_FULL)) { - RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); - } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); - else { - netif_warn(tp, link, dev, - "incorrect speed setting refused in TBI mode\n"); - ret = -EOPNOTSUPP; - } - - return ret; -} - static int rtl8169_set_speed_xmii(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex, u32 adv) { @@ -1861,20 +1785,7 @@ out: static int rtl8169_set_speed(struct net_device *dev, u8 autoneg, u16 speed, u8 duplex, u32 advertising) { - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); - if (ret < 0) - goto out; - - if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full) && - !pci_is_pcie(tp->pci_dev)) { - mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); - } -out: - return ret; + return rtl8169_set_speed_xmii(dev, autoneg, speed, duplex, advertising); } static netdev_features_t rtl8169_fix_features(struct net_device *dev, @@ -1940,53 +1851,14 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 status; - u32 supported, advertising; - - supported = - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->base.port = PORT_FIBRE; - - status = RTL_R32(tp, TBICSR); - advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->base.autoneg = !!(status & TBINwEnable); - - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; /* Always set */ - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - return 0; -} - -static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - mii_ethtool_get_link_ksettings(&tp->mii, cmd); - - return 0; -} - static int rtl8169_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct rtl8169_private *tp = netdev_priv(dev); - int rc; - rtl_lock_work(tp); - rc = tp->get_link_ksettings(dev, cmd); - rtl_unlock_work(tp); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); - return rc; + return 0; } static int rtl8169_set_link_ksettings(struct net_device *dev, @@ -2000,8 +1872,6 @@ static int rtl8169_set_link_ksettings(struct net_device *dev, cmd->link_modes.advertising)) return -EINVAL; - del_timer_sync(&tp->timer); - rtl_lock_work(tp); rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); @@ -2553,7 +2423,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, static void rtl8169_print_mac_version(struct rtl8169_private *tp) { - dprintk("mac_version = 0x%02x\n", tp->mac_version); + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); } struct phy_reg { @@ -4405,62 +4275,28 @@ static void rtl_hw_phy_config(struct net_device *dev) } } -static void rtl_phy_work(struct rtl8169_private *tp) -{ - struct timer_list *timer = &tp->timer; - unsigned long timeout = RTL8169_PHY_TIMEOUT; - - assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - - if (tp->phy_reset_pending(tp)) { - /* - * A busy loop could burn quite a few cycles on nowadays CPU. - * Let's delay the execution of the timer for a few ticks. - */ - timeout = HZ/10; - goto out_mod_timer; - } - - if (tp->link_ok(tp)) - return; - - netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - - tp->phy_reset_enable(tp); - -out_mod_timer: - mod_timer(timer, jiffies + timeout); -} - static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { if (!test_and_set_bit(flag, tp->wk.flags)) schedule_work(&tp->wk.work); } -static void rtl8169_phy_timer(struct timer_list *t) -{ - struct rtl8169_private *tp = from_timer(tp, t, timer); - - rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); -} - DECLARE_RTL_COND(rtl_phy_reset_cond) { - return tp->phy_reset_pending(tp); + return rtl8169_xmii_reset_pending(tp); } static void rtl8169_phy_reset(struct net_device *dev, struct rtl8169_private *tp) { - tp->phy_reset_enable(tp); + rtl8169_xmii_reset_enable(tp); rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); } static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) @@ -4468,7 +4304,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); } @@ -4478,9 +4315,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } @@ -4492,9 +4331,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) (tp->mii.supports_gmii ? ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full : 0)); - - if (rtl_tbi_enabled(tp)) - netif_info(tp, link, dev, "TBI auto-negotiating\n"); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4537,14 +4373,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) return 0; } -static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); - - return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; -} - static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) { @@ -4564,9 +4392,12 @@ static int rtl_xmii_ioctl(struct rtl8169_private *tp, return -EOPNOTSUPP; } -static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - return -EOPNOTSUPP; + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? rtl_xmii_ioctl(tp, data, cmd) : -ENODEV; } static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@ -4639,7 +4470,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + if (!netif_running(tp->dev) || !tp->saved_wolopts) return false; rtl_speed_down(tp); @@ -5172,8 +5003,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xe0. " - "Bit-3 and bit-14 MUST be 1\n"); + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } @@ -5236,12 +5067,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) rtl_csi_write(tp, 0x070c, csi | val << 24); } -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { rtl_csi_access_enable(tp, 0x27); } @@ -5290,6 +5116,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } +} + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5337,7 +5174,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); @@ -5346,7 +5183,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5359,7 +5196,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5383,7 +5220,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); @@ -5399,7 +5236,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); @@ -5413,14 +5250,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); __rtl_hw_start_8168cp(tp); } static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_disable_clock_request(tp); @@ -5435,7 +5272,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5453,7 +5290,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { 0x0c, 0x0100, 0x0020 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5482,7 +5319,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); @@ -5507,7 +5344,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); @@ -5536,11 +5373,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5611,7 +5450,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5646,9 +5485,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5681,9 +5520,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) @@ -5700,8 +5539,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); @@ -5711,7 +5549,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5780,6 +5618,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe63e, 0x0000); r8168_mac_ocp_write(tp, 0xc094, 0x0000); r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep(struct rtl8169_private *tp) @@ -5793,7 +5633,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5831,11 +5671,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) @@ -5847,14 +5688,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); rtl_hw_start_8168ep(tp); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -5868,8 +5710,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); rtl_hw_start_8168ep(tp); @@ -5889,6 +5730,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) data = r8168_mac_ocp_read(tp, 0xe860); data |= 0x0080; r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168(struct rtl8169_private *tp) @@ -6006,8 +5849,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) break; default: - printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - tp->dev->name, tp->mac_version); + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); break; } } @@ -6026,7 +5870,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) }; u8 cfg1; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, FIX_NAK_1); @@ -6045,7 +5889,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -6100,7 +5944,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) { 0x1e, 0, 0x4000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@ -7015,7 +6859,6 @@ static void rtl_task(struct work_struct *work) /* XXX - keep rtl_slow_event_work() as first element. */ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } }; struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); @@ -7088,8 +6931,6 @@ static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - del_timer_sync(&tp->timer); - napi_disable(&tp->napi); netif_stop_queue(dev); @@ -7208,7 +7049,6 @@ static int rtl_open(struct net_device *dev) rtl_unlock_work(tp); - tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); rtl8169_check_link_status(dev, tp); @@ -7323,6 +7163,7 @@ static void __rtl8169_resume(struct net_device *dev) netif_device_attach(dev); rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); rtl_lock_work(tp); napi_enable(&tp->napi); @@ -7336,9 +7177,6 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_init_phy(dev, tp); if (netif_running(dev)) __rtl8169_resume(dev); @@ -7358,7 +7196,6 @@ static int rtl8169_runtime_suspend(struct device *device) } rtl_lock_work(tp); - tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); rtl_unlock_work(tp); @@ -7383,11 +7220,8 @@ static int rtl8169_runtime_resume(struct device *device) rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - tp->saved_wolopts = 0; rtl_unlock_work(tp); - rtl8169_init_phy(dev, tp); - __rtl8169_resume(dev); return 0; @@ -7455,7 +7289,7 @@ static void rtl_shutdown(struct pci_dev *pdev) rtl8169_hw_reset(tp); if (system_state == SYSTEM_POWER_OFF) { - if (__rtl8169_get_wol(tp) & WAKE_ANY) { + if (tp->saved_wolopts) { rtl_wol_suspend_quirk(tp); rtl_wol_shutdown_quirk(tp); } @@ -7647,11 +7481,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = cfg->has_gmii; - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { @@ -7689,6 +7518,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver); + if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && @@ -7737,22 +7571,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* override BIOS settings, use userspace tools to enable WOL */ __rtl8169_set_wol(tp, 0); - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - mutex_init(&tp->wk.mutex); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); @@ -7823,8 +7641,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e9007b613f17..71651e47660a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -622,7 +622,6 @@ static struct sh_eth_cpu_data r7s72100_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -672,7 +671,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -798,7 +796,6 @@ static struct sh_eth_cpu_data r8a77980_data = { .hw_swap = 1, .nbst = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -851,7 +848,6 @@ static struct sh_eth_cpu_data sh7724_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; static void sh_eth_set_rate_sh7757(struct net_device *ndev) @@ -898,7 +894,6 @@ static struct sh_eth_cpu_data sh7757_data = { .hw_swap = 1, .no_ade = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .rtrate = 1, .dual_port = 1, }; @@ -978,7 +973,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -1467,7 +1461,7 @@ static int sh_eth_dev_init(struct net_device *ndev) /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); /* all sh_eth int mask */ sh_eth_write(ndev, 0, EESIPR); @@ -1527,9 +1521,9 @@ static int sh_eth_dev_init(struct net_device *ndev) /* mask reset */ if (mdp->cd->apr) - sh_eth_write(ndev, APR_AP, APR); + sh_eth_write(ndev, 1, APR); if (mdp->cd->mpr) - sh_eth_write(ndev, MPR_MP, MPR); + sh_eth_write(ndev, 1, MPR); if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 726c55a82dd7..140ad2c57095 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT { /* APR */ enum APR_BIT { - APR_AP = 0x00000001, + APR_AP = 0x0000ffff, }; /* MPR */ enum MPR_BIT { - MPR_MP = 0x00000001, + MPR_MP = 0x0000ffff, }; /* TRSCER */ @@ -403,8 +403,7 @@ enum DESC_I_BIT { /* RPADIR */ enum RPADIR_BIT { - RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, - RPADIR_PADR = 0x0003f, + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, }; /* FDR */ @@ -488,7 +487,6 @@ struct sh_eth_cpu_data { u32 ecsipr_value; u32 fdr_value; u32 fcftr_value; - u32 rpadir_value; /* interrupt checking mask */ u32 tx_check; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 019cef1d3cf7..8820be83ce85 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) return -ENOMEM; for (i = 0; i < efx->vf_count; i++) { - random_ether_addr(nic_data->vf[i].mac); + eth_random_addr(nic_data->vf[i].mac); nic_data->vf[i].efx = NULL; nic_data->vf[i].vlan = EFX_EF10_NO_VLAN; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f08625a02cea..7b923362ee55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -61,6 +61,7 @@ struct rk_priv_data { struct clk *mac_clk_tx; struct clk *clk_mac_ref; struct clk *clk_mac_refout; + struct clk *clk_mac_speed; struct clk *aclk_mac; struct clk *pclk_mac; struct clk *clk_phy; @@ -83,6 +84,64 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define PX30_GRF_GMAC_CON1 0x0904 + +/* PX30_GRF_GMAC_CON1 */ +#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define PX30_GMAC_SPEED_100M GRF_BIT(2) + +static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_PHY_INTF_SEL_RMII); +} + +static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + int ret; + + if (IS_ERR(bsp_priv->clk_mac_speed)) { + dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_10M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n", + __func__, ret); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_100M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n", + __func__, ret); + + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops px30_ops = { + .set_to_rmii = px30_set_to_rmii, + .set_rmii_speed = px30_set_rmii_speed, +}; + #define RK3128_GRF_MAC_CON0 0x0168 #define RK3128_GRF_MAC_CON1 0x016c @@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) } } + bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed"); + if (IS_ERR(bsp_priv->clk_mac_speed)) + dev_err(dev, "cannot get clock %s\n", "clk_mac_speed"); + if (bsp_priv->clock_input) { dev_info(dev, "clock input from PHY\n"); } else { @@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) if (!IS_ERR(bsp_priv->mac_clk_tx)) clk_prepare_enable(bsp_priv->mac_clk_tx); + if (!IS_ERR(bsp_priv->clk_mac_speed)) + clk_prepare_enable(bsp_priv->clk_mac_speed); + /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_prepare_enable(bsp_priv->clk_mac); @@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) clk_disable_unprepare(bsp_priv->pclk_mac); clk_disable_unprepare(bsp_priv->mac_clk_tx); + + clk_disable_unprepare(bsp_priv->clk_mac_speed); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); @@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,px30-gmac", .data = &px30_ops }, { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 65bc3556bd8f..edb6053bd980 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); @@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; @@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index fe8b536b13f8..79911eefc2a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; @@ -236,6 +237,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) #define stmmac_set_dma_bfsize(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, set_bfsize, __args) @@ -444,17 +447,22 @@ struct stmmac_mode_ops { struct stmmac_priv; struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; struct stmmac_tc_ops { int (*init)(struct stmmac_priv *priv); int (*setup_cls_u32)(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ stmmac_do_callback(__priv, tc, init, __args) #define stmmac_tc_setup_cls_u32(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) struct stmmac_regs_off { u32 ptp_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 60f59abab009..d9e60cfd8a85 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3778,7 +3778,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); return 0; @@ -3795,6 +3795,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return stmmac_setup_tc_block(priv, type_data); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 2258cd8cc844..0b0fca0200b2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -289,7 +289,69 @@ static int tc_init(struct stmmac_priv *priv) return 0; } +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) + return -EOPNOTSUPP; + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + } + + /* Port Transmit Rate and Speed Divider */ + ptr = (priv->speed == SPEED_100) ? 4 : 8; + speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; + + /* Final adjustments for HW */ + value = qopt->idleslope * 1024 * ptr; + do_div(value, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = -qopt->sendslope * 1024UL * ptr; + do_div(value, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024 * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024 * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 358edab9e72e..093998124149 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2927,7 +2927,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); } else { - random_ether_addr(priv_sl2->mac_addr); + eth_random_addr(priv_sl2->mac_addr); dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index e40aa3e31af2..6ebf110cd594 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2052,7 +2052,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); @@ -2061,7 +2061,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (mac_addr) ether_addr_copy(ndev->dev_addr, mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 2a0c06e0f730..42f1f518dad6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -70,7 +70,8 @@ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented - * in the HW spec */ + * in the HW spec + */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) @@ -94,11 +95,11 @@ -#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ +#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, /* Set up to output the remaining data */ align_buffer = 0; - to_u8_ptr = (u8 *) &align_buffer; - from_u8_ptr = (u8 *) from_u16_ptr; + to_u8_ptr = (u8 *)&align_buffer; + from_u8_ptr = (u8 *)from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) @@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u32 align_buffer; from_u32_ptr = src_ptr; - to_u16_ptr = (u16 *) dest_ptr; + to_u16_ptr = (u16 *)dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ @@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ - to_u8_ptr = (u8 *) to_u16_ptr; + to_u8_ptr = (u8 *)to_u16_ptr; align_buffer = *from_u32_ptr++; - from_u8_ptr = (u8 *) &align_buffer; + from_u8_ptr = (u8 *)&align_buffer; /* Read the remaining data */ for (; length > 0; length--) @@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, - * if it is configured in HW */ + * if it is configured in HW + */ addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, return -1; /* Buffer was full, return failure */ /* Write the frame to the buffer */ - xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); @@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame - * has been transmitted */ + * has been transmitted + */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); @@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received + * @maxlen: Maximum supported ethernet packet length * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. @@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it - * will correct on subsequent calls */ + * will correct on subsequent calls + */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) return 0; /* No data was available */ } - /* Get the protocol type of the ethernet frame that arrived */ + /* Get the protocol type of the ethernet frame that arrived + */ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame - * or an IP packet or an ARP packet */ + * or an IP packet or an ARP packet + */ if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { @@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; else /* Field contains type other than IP or ARP, use max - * frame size and let user parse it */ + * frame size and let user parse it + */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } else /* Use the length in the frame, plus the header and trailer */ @@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = maxlen; /* Read from the EmacLite device */ - xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ @@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; - xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); @@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance - * @addr: Void pointer to the sockaddr structure + * @address: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr strucutre to the * net_device structure and updates the address in HW. @@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev) struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; - if (lp->deferred_skb) { - if (xemaclite_send_data(lp, - (u8 *) lp->deferred_skb->data, - lp->deferred_skb->len) != 0) - return; - else { - dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); - lp->deferred_skb = NULL; - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); - } - } + + if (!lp->deferred_skb) + return; + + if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data, + lp->deferred_skb->len)) + return; + + dev->stats.tx_bytes += lp->deferred_skb->len; + dev_kfree_skb_irq(lp->deferred_skb); + lp->deferred_skb = NULL; + netif_trans_update(dev); /* prevent tx timeout */ + netif_wake_queue(dev); } /** @@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* - * A new skb should have the data halfword aligned, but this code is + /* A new skb should have the data halfword aligned, but this code is * here just in case that isn't true. Calculate how many * bytes we should reserve to get the data to start on a word - * boundary */ + * boundary + */ align = BUFFER_ALIGN(skb->data); if (align) skb_reserve(skb, align); skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data, len); + len = xemaclite_recv_data(lp, (u8 *)skb->data, len); if (!len) { dev->stats.rx_errors++; @@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev) * @dev_id: Void pointer to the network device instance used as callback * reference * + * Return: IRQ_HANDLED + * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) @@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp) unsigned long end = jiffies + 2; /* wait for the MDIO interface to not be busy or timeout - after some time. - */ + * after some time. + */ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { @@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", + "%s(phy_id=%i, reg=%x) == %x\n", __func__, phy_id, reg, rc); return rc; @@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. + * + * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) @@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u32 ctrl_reg; dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + "%s(phy_id=%i, reg=%x, val=%x)\n", __func__, phy_id, reg, val); if (xemaclite_mdio_wait(lp)) @@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data - * @ofdev: Pointer to OF device structure + * @dev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. @@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev) * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. + * + * Return: 0 on success. -ENODEV, if PHY cannot be connected. + * Non-zero error value on failure. */ static int xemaclite_open(struct net_device *dev) { @@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev) * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. + * + * Return: 0, always. */ static int xemaclite_close(struct net_device *dev) { @@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); - if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { + if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the - * current transmission is complete */ + * current transmission is complete + */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ @@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); - if (p) { - return (bool)*p; - } else { - dev_warn(&ofdev->dev, "Parameter %s not found," - "defaulting to false\n", s); + if (!p) { + dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s); return false; } + + return (bool)*p; } static const struct net_device_ops xemaclite_netdev_ops; @@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure - * @match: Pointer to the structure used for matching a device * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ada33c2d9ac2..6acb6b5718b9 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, - gnvh->options, gnvh->opt_len * 4); + gnvh->options, gnvh->opt_len * 4, + TUNNEL_GENEVE_OPT); } else { /* Drop packets w/ critical options, * since we don't support any... @@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh) return sizeof(*gh) + gh->opt_len * 4; } -static struct sk_buff **geneve_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *geneve_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; @@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, goto out; } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh, geneveh->proto_type = htons(ETH_P_TEB); geneveh->rsvd2 = 0; - ip_tunnel_info_opts_get(geneveh->options, info); + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 449b2a1a1800..0fee1d06c084 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_NET_DEVLINK),) netdevsim-objs += devlink.o fib.o endif + +ifneq ($(CONFIG_XFRM_OFFLOAD),) +netdevsim-objs += ipsec.o +endif diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c new file mode 100644 index 000000000000..2dcf6cc269d0 --- /dev/null +++ b/drivers/net/netdevsim/ipsec.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include <crypto/aead.h> +#include <linux/debugfs.h> +#include <net/xfrm.h> + +#include "netdevsim.h" + +#define NSIM_IPSEC_AUTH_BITS 128 + +static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = filp->private_data; + struct nsim_ipsec *ipsec = &ns->ipsec; + size_t bufsize; + char *buf, *p; + int len; + int i; + + /* the buffer needed is + * (num SAs * 3 lines each * ~60 bytes per line) + one more line + */ + bufsize = (ipsec->count * 4 * 60) + 60; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + p = buf; + p += snprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); + + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + struct nsim_sa *sap = &ipsec->sa[i]; + + if (!sap->used) + continue; + + p += snprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); + + kfree(buf); + return len; +} + +static const struct file_operations ipsec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = nsim_dbg_netdev_ops_read, +}; + +static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec) +{ + u32 i; + + if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search sa table */ + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->sa[i].used) + return i; + } + + return -ENOSPC; +} + +static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + NSIM_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* 160 accounts for 16 byte key and 4 byte salt */ + if (key_len > NSIM_IPSEC_AUTH_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == NSIM_IPSEC_AUTH_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +static int nsim_ipsec_add_sa(struct xfrm_state *xs) +{ + struct nsim_ipsec *ipsec; + struct net_device *dev; + struct netdevsim *ns; + struct nsim_sa sa; + u16 sa_idx; + int ret; + + dev = xs->xso.dev; + ns = netdev_priv(dev); + ipsec = &ns->ipsec; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = nsim_ipsec_find_empty_idx(ipsec); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&sa, 0, sizeof(sa)); + sa.used = true; + sa.xs = xs; + + if (sa.xs->id.proto & IPPROTO_ESP) + sa.crypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for SA table\n"); + return ret; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa.rx = true; + + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); + } + + /* the preparations worked, so save the info */ + memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); + + /* the XFRM stack doesn't like offload_handle == 0, + * so add a bitflag in case our array index is 0 + */ + xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID; + ipsec->count++; + + return 0; +} + +static void nsim_ipsec_del_sa(struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + u16 sa_idx; + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (!ipsec->sa[sa_idx].used) { + netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n", + sa_idx); + return; + } + + memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa)); + ipsec->count--; +} + +static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + + ipsec->ok++; + + return true; +} + +static const struct xfrmdev_ops nsim_xfrmdev_ops = { + .xdo_dev_state_add = nsim_ipsec_add_sa, + .xdo_dev_state_delete = nsim_ipsec_del_sa, + .xdo_dev_offload_ok = nsim_ipsec_offload_ok, +}; + +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + struct xfrm_state *xs; + struct nsim_sa *tsa; + u32 sa_idx; + + /* do we even need to check this packet? */ + if (!skb->sp) + return true; + + if (unlikely(!skb->sp->len)) { + netdev_err(ns->netdev, "no xfrm state len = %d\n", + skb->sp->len); + return false; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs); + return false; + } + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) { + netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n", + sa_idx, NSIM_IPSEC_MAX_SA_COUNT); + return false; + } + + tsa = &ipsec->sa[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx); + return false; + } + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto); + return false; + } + + ipsec->tx++; + + return true; +} + +void nsim_ipsec_init(struct netdevsim *ns) +{ + ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops; + +#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + ns->netdev->features |= NSIM_ESP_FEATURES; + ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; + + ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns, + &ipsec_dbg_fops); +} + +void nsim_ipsec_teardown(struct netdevsim *ns) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + + if (ipsec->count) + netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n", + ipsec->count); + debugfs_remove_recursive(ipsec->pfile); +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ec68f38213d9..b2f9d0df93b0 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -171,6 +171,8 @@ static int nsim_init(struct net_device *dev) if (err) goto err_unreg_dev; + nsim_ipsec_init(ns); + return 0; err_unreg_dev: @@ -186,6 +188,7 @@ static void nsim_uninit(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + nsim_ipsec_teardown(ns); nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); @@ -203,11 +206,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + if (!nsim_ipsec_tx(ns, skb)) + goto out; + u64_stats_update_begin(&ns->syncp); ns->tx_packets++; ns->tx_bytes += skb->len; u64_stats_update_end(&ns->syncp); +out: dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -260,7 +267,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, - ns, ns); + ns, ns, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); return 0; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 8ca50b72c328..d8a7cc995e88 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -29,6 +29,27 @@ struct bpf_prog; struct dentry; struct nsim_vf_config; +#define NSIM_IPSEC_MAX_SA_COUNT 33 +#define NSIM_IPSEC_VALID BIT(31) + +struct nsim_sa { + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + bool used; + bool crypt; + bool rx; +}; + +struct nsim_ipsec { + struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT]; + struct dentry *pfile; + u32 count; + u32 tx; + u32 ok; +}; + struct netdevsim { struct net_device *netdev; @@ -67,6 +88,7 @@ struct netdevsim { #if IS_ENABLED(CONFIG_NET_DEVLINK) struct devlink *devlink; #endif + struct nsim_ipsec ipsec; }; extern struct dentry *nsim_ddir; @@ -148,6 +170,25 @@ static inline void nsim_devlink_exit(void) } #endif +#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) +void nsim_ipsec_init(struct netdevsim *ns); +void nsim_ipsec_teardown(struct netdevsim *ns); +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb); +#else +static inline void nsim_ipsec_init(struct netdevsim *ns) +{ +} + +static inline void nsim_ipsec_teardown(struct netdevsim *ns) +{ +} + +static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + return true; +} +#endif + static inline struct netdevsim *to_nsim(struct device *ptr) { return container_of(ptr, struct netdevsim, dev); diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 9f6f7ccd44f7..b12023bc2cab 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->hw_features = ndev->features; ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); - random_ether_addr(ndev->perm_addr); + eth_random_addr(ndev->perm_addr); memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); ndev->netdev_ops = &ntb_netdev_ops; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 343989f9f9d9..ceede09a2845 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -92,7 +92,8 @@ config MDIO_CAVIUM config MDIO_GPIO tristate "GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB + depends on MDIO_BITBANG + depends on GPIOLIB || COMPILE_TEST ---help--- Supports GPIO lib-based MDIO busses. diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 001fe1df7557..67b260877f30 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); - goto err_pdev; - } + if (IS_ERR(pdev)) + return PTR_ERR(pdev); fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { @@ -287,7 +285,6 @@ err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: platform_device_unregister(pdev); -err_pdev: return ret; } module_init(fixed_mdio_bus_init); diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 082ffef0dec4..bc90764a8b8d 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -20,23 +20,23 @@ struct mdio_mux_gpio_state { struct gpio_descs *gpios; void *mux_handle; + int values[]; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_gpio_state *s = data; - int values[s->gpios->ndescs]; unsigned int n; if (current_child == desired_child) return 0; for (n = 0; n < s->gpios->ndescs; n++) - values[n] = (desired_child >> n) & 1; + s->values[n] = (desired_child >> n) & 1; gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, - values); + s->values); return 0; } @@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; + struct gpio_descs *gpios; int r; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); - if (!s) + gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(gpios)) + return PTR_ERR(gpios); + + s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs), + GFP_KERNEL); + if (!s) { + gpiod_put_array(gpios); return -ENOMEM; + } - s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(s->gpios)) - return PTR_ERR(s->gpios); + s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 082fb40c656d..f8f127831a92 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -128,6 +128,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev) return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); } +static int rtl8211_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_aneg(phydev); + if (ret < 0) + return ret; + + /* Quirk was copied from vendor driver. Unfortunately it includes no + * description of the magic numbers. + */ + if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) { + phy_write(phydev, 0x17, 0x2138); + phy_write(phydev, 0x0e, 0x0260); + } else { + phy_write(phydev, 0x17, 0x2108); + phy_write(phydev, 0x0e, 0x0000); + } + + return 0; +} + +static int rtl8211c_config_init(struct phy_device *phydev) +{ + /* RTL8211C has an issue when operating in Gigabit slave mode */ + phy_set_bits(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + + return genphy_config_init(phydev); +} + static int rtl8211f_config_init(struct phy_device *phydev) { int ret; @@ -179,6 +210,14 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { + .phy_id = 0x001cc910, + .name = "RTL8211 Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_aneg = rtl8211_config_aneg, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -191,6 +230,14 @@ static struct phy_driver realtek_drvs[] = { .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, }, { + .phy_id = 0x001cc913, + .name = "RTL8211C Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_init = rtl8211c_config_init, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -230,6 +277,7 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc816, 0x001fffff }, + { 0x001cc910, 0x001fffff }, { 0x001cc912, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2e5150b0b8d5..74a8782313cf 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -33,17 +33,22 @@ struct gmii2rgmii { struct phy_device *phy_dev; struct phy_driver *phy_drv; struct phy_driver conv_phy_drv; - int addr; + struct mdio_device *mdio; }; static int xgmiitorgmii_read_status(struct phy_device *phydev) { struct gmii2rgmii *priv = phydev->priv; + struct mii_bus *bus = priv->mdio->bus; + int addr = priv->mdio->addr; u16 val = 0; + int err; - priv->phy_drv->read_status(phydev); + err = priv->phy_drv->read_status(phydev); + if (err < 0) + return err; - val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) @@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) else val |= BMCR_SPEED10; - mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); return 0; } @@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } - priv->addr = mdiodev->addr; + if (!priv->phy_dev->drv) { + dev_info(dev, "Attached phy not ready\n"); + return -EPROBE_DEFER; + } + + priv->mdio = mdiodev; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 18d36dff97ea..424053bd8b21 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); + /* fall through */ case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 288ecd999171..78b16eb9e58c 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -99,6 +99,7 @@ static void tx_complete(struct urb *req) struct net_device *dev = skb->dev; struct usbpn_dev *pnd = netdev_priv(dev); int status = req->status; + unsigned long flags; switch (status) { case 0: @@ -109,16 +110,17 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; + /* fall through */ default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); } dev->stats.tx_packets++; - spin_lock(&pnd->tx_lock); + spin_lock_irqsave(&pnd->tx_lock, flags); pnd->tx_queue--; netif_wake_queue(dev); - spin_unlock(&pnd->tx_lock); + spin_unlock_irqrestore(&pnd->tx_lock, flags); dev_kfree_skb_any(skb); usb_free_urb(req); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e53883ad6107..de305ead32e6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb) struct hso_net *odev = urb->context; struct net_device *net; int result; + unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ @@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ - spin_lock(&odev->net_lock); + spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); - spin_unlock(&odev->net_lock); + spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for @@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); @@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } /* @@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb) DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { + unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; @@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb) (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } @@ -1852,6 +1855,7 @@ static void intr_callback(struct urb *urb) struct hso_serial *serial; unsigned char *port_req; int status = urb->status; + unsigned long flags; int i; usb_mark_last_busy(urb->dev); @@ -1879,7 +1883,7 @@ static void intr_callback(struct urb *urb) if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on @@ -1893,7 +1897,8 @@ static void intr_callback(struct urb *urb) hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, + flags); } } } @@ -1920,6 +1925,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) { @@ -1927,9 +1933,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) return; } - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1971,14 +1977,15 @@ static void ctrl_callback(struct urb *urb) struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) return; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1994,9 +2001,9 @@ static void ctrl_callback(struct urb *urb) (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index f1605833c5cf..913e50bab0a2 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb) struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; - + unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; @@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb) net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } - spin_lock(&kaweth->device_lock); + spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2e4130746c40..6f2ea84bf0b2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1721,7 +1721,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) "MAC address read from EEPROM"); } else { /* generate random MAC */ - random_ether_addr(addr); + eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2a58607a6aea..124211afb023 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; + unsigned long flags; agg = urb->context; if (!agg) @@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock(&tp->rx_lock); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); + spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: @@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb) struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; + unsigned long flags; int status = urb->status; agg = urb->context; @@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb) stats->tx_bytes += agg->skb_len; } - spin_lock(&tp->tx_lock); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock(&tp->tx_lock); + spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); @@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); + /* fall through */ case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + /* fall through */ default: if (data != PHY_STAT_LAN_ON) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 5f565bd574da..0e81d4c441d9 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb) u16 rx_stat; int status = urb->status; int result; + unsigned long flags; dev = urb->context; if (!dev) @@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb) netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; - spin_lock(&dev->rx_pool_lock); + spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); - spin_unlock(&dev->rx_pool_lock); + spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f6bb1d54d4bd..601ae176808d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return vh; } -static struct sk_buff **vxlan_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vxlan_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct vxlanhdr *vh, *vh2; unsigned int hlen, off_vx; int flush = 1; @@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -2119,7 +2120,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len) + if (info->options_len && + info->key.tun_flags & TUNNEL_VXLAN_OPT) md = ip_tunnel_info_opts(info); ttl = info->key.ttl; tos = info->key.tos; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e60bea4604e4..1665066f4e24 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah) ath_err(common, "eeprom contains invalid mac address: %pM\n", common->macaddr); - random_ether_addr(common->macaddr); + eth_random_addr(common->macaddr); ath_err(common, "random mac address will be used: %pM\n", common->macaddr); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 39c817eddd78..31bd6f714052 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1904,7 +1904,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } EXPORT_SYMBOL(rtl_rx_ampdu_apply); |