diff options
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/hw.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ich8lan.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/ptp.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 80 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 437 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_base.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_defines.h | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ethtool.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_hw.h | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 39 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_ptp.c | 58 |
15 files changed, 591 insertions, 86 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index a8fc9208382c..03215b0aee4b 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -895,6 +895,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: mask |= BIT(18); break; default: @@ -1560,6 +1561,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b1447221669e..69a2329ea463 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -102,6 +102,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F #define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C #define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D +#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A +#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B +#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C +#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D #define E1000_REVISION_4 4 @@ -127,6 +131,7 @@ enum e1000_mac_type { e1000_pch_cnp, e1000_pch_tgp, e1000_pch_adp, + e1000_pch_mtp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ded74304e8cf..9aa6fad8ed47 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -320,6 +320,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -464,6 +465,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -708,6 +710,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1648,6 +1651,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2102,6 +2106,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3145,6 +3150,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4090,6 +4096,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 99f4ec9b5696..b30f00891c03 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3587,6 +3587,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@ -4104,6 +4105,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; @@ -7877,6 +7879,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 8d21bcb427ec..f3f671311855 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: + case e1000_pch_mtp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 2f015b60a995..0286d2fceee4 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -19,6 +19,8 @@ #include <linux/pci.h> #include <linux/mdio.h> +#include <net/xdp.h> + struct igb_adapter; #define E1000_PCS_CFG_IGN_SD 1 @@ -79,6 +81,12 @@ struct igb_adapter; #define IGB_I210_RX_LATENCY_100 2213 #define IGB_I210_RX_LATENCY_1000 448 +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; @@ -132,17 +140,62 @@ struct vf_mac_filter { /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 #define IGB_RXBUFFER_2048 2048 #define IGB_RXBUFFER_3072 3072 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_TS_HDR_LEN 16 -#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ #if (PAGE_SIZE < 8192) -#define IGB_MAX_FRAME_BUILD_SKB \ - (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() #else -#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN) +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -194,13 +247,22 @@ enum igb_tx_flags { #define IGB_SFF_ADDRESSING_MODE 0x4 #define IGB_SFF_8472_UNSUP 0x00 +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, +}; + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igb_tx_buffer { union e1000_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; unsigned int bytecount; u16 gso_segs; __be16 protocol; @@ -248,6 +310,7 @@ struct igb_ring_container { struct igb_ring { struct igb_q_vector *q_vector; /* backlink to q_vector */ struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; struct device *dev; /* device pointer for dma mapping */ union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; @@ -288,6 +351,7 @@ struct igb_ring { struct u64_stats_sync rx_syncp; }; }; + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; struct igb_q_vector { @@ -339,7 +403,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) return IGB_RXBUFFER_3072; if (ring_uses_build_skb(ring)) - return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN; + return IGB_MAX_FRAME_BUILD_SKB; #endif return IGB_RXBUFFER_2048; } @@ -467,6 +531,7 @@ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; + struct bpf_prog *xdp_prog; unsigned long state; unsigned int flags; @@ -643,6 +708,9 @@ enum igb_boards { extern char igb_driver_name[]; +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); int igb_open(struct net_device *netdev); int igb_close(struct net_device *netdev); int igb_up(struct igb_adapter *); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 6e8231c1ddf0..28baf203459a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -961,6 +961,10 @@ static int igb_set_ringparam(struct net_device *netdev, memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct igb_ring)); + /* Clear copied XDP RX-queue info */ + memset(&temp_ring[i].xdp_rxq, 0, + sizeof(temp_ring[i].xdp_rxq)); + temp_ring[i].count = new_rx_count; err = igb_setup_rx_resources(&temp_ring[i]); if (err) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 44157fcd3cf7..368f950f68b1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -30,6 +30,8 @@ #include <linux/if_ether.h> #include <linux/aer.h> #include <linux/prefetch.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/pm_runtime.h> #include <linux/etherdevice.h> #ifdef CONFIG_IGB_DCA @@ -2823,6 +2825,147 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igb_adapter *adapter = netdev_priv(dev); + bool running = netif_running(dev); + struct bpf_prog *old_prog; + bool need_reset; + + /* verify igb ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (frame_size > igb_rx_bufsz(ring)) + return -EINVAL; + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + igb_close(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (running) + igb_open(dev); + + return 0; +} + +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return igb_xdp_setup(dev, xdp->prog); + default: + return -EINVAL; + } +} + +static void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return IGB_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int igb_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + int drops = 0; + int i; + + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + if (err != IGB_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + __netif_tx_unlock(nq); + + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + + return n - drops; +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2847,6 +2990,8 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = igb_features_check, .ndo_setup_tc = igb_setup_tc, + .ndo_bpf = igb_xdp, + .ndo_xdp_xmit = igb_xdp_xmit, }; /** @@ -3387,7 +3532,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Width x1" : "unknown"), netdev->dev_addr); } - if ((hw->mac.type >= e1000_i210 || + if ((hw->mac.type == e1000_82576 && + rd32(E1000_EECD) & E1000_EECD_PRES) || + (hw->mac.type >= e1000_i210 || igb_get_flash_presence_i210(hw))) { ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); @@ -4179,6 +4326,7 @@ static void igb_configure_tx(struct igb_adapter *adapter) **/ int igb_setup_rx_resources(struct igb_ring *rx_ring) { + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); struct device *dev = rx_ring->dev; int size; @@ -4201,6 +4349,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + rx_ring->xdp_prog = adapter->xdp_prog; + + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index) < 0) + goto err; + return 0; err: @@ -4505,6 +4660,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, int reg_idx = ring->reg_idx; u32 rxdctl = 0; + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + /* disable the queue */ wr32(E1000_RXDCTL(reg_idx), 0); @@ -4709,6 +4868,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring) { igb_clean_rx_ring(rx_ring); + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6078,6 +6239,80 @@ dma_error: return -1; } +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + struct xdp_frame *xdpf) +{ + union e1000_adv_tx_desc *tx_desc; + u32 len, cmd_type, olinfo_status; + struct igb_tx_buffer *tx_buffer; + dma_addr_t dma; + u16 i; + + len = xdpf->len; + + if (unlikely(!igb_desc_unused(tx_ring))) + return IGB_XDP_CONSUMED; + + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return IGB_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = tx_ring->next_to_use; + tx_desc = IGB_TX_DESC(tx_ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->type = IGB_TYPE_XDP; + tx_buffer->xdpf = xdpf; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + cmd_type |= len | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT); + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + tx_desc->read.olinfo_status = olinfo_status; + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); + + /* set the timestamp */ + tx_buffer->time_stamp = jiffies; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == tx_ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return IGB_XDP_TX; +} + netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { @@ -6106,6 +6341,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGB_TYPE_SKB; first->skb = skb; first->bytecount = skb->len; first->gso_segs = 1; @@ -6258,6 +6494,19 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + if (adapter->xdp_prog) { + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (max_frame > igb_rx_bufsz(ring)) { + netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; + } + } + } + /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; @@ -7811,7 +8060,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - napi_consume_skb(tx_buffer->skb, napi_budget); + if (tx_buffer->type == IGB_TYPE_SKB) + napi_consume_skb(tx_buffer->skb, napi_budget); + else + xdp_return_frame(tx_buffer->xdpf); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -7995,8 +8247,8 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -8035,20 +8287,21 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring, static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, - union e1000_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union e1000_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif + unsigned int size = xdp->data_end - xdp->data; unsigned int headlen; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data); /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); @@ -8056,24 +8309,24 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, return NULL; if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; + igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); + xdp->data += IGB_TS_HDR_LEN; size -= IGB_TS_HDR_LEN; } /* Determine available headroom for copy */ headlen = size; if (headlen > IGB_RX_HDR_LEN) - headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); /* update all of the pointers */ size -= headlen; if (size) { skb_add_rx_frag(skb, 0, rx_buffer->page, - (va + headlen) - page_address(rx_buffer->page), + (xdp->data + headlen) - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; @@ -8089,29 +8342,29 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, - union e1000_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp, + union e1000_adv_rx_desc *rx_desc) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; #if (PAGE_SIZE < 8192) unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IGB_SKB_PAD + size); + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); #endif struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(va - IGB_SKB_PAD, truesize); + skb = build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IGB_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); /* pull timestamp out of packet data */ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { @@ -8129,6 +8382,79 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, return skb; } +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IGB_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!err) + result = IGB_XDP_REDIR; + else + result = IGB_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + static inline void igb_rx_checksum(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -8224,6 +8550,10 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + if (unlikely((igb_test_staterr(rx_desc, E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { struct net_device *netdev = rx_ring->netdev; @@ -8282,6 +8612,11 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, skb->protocol = eth_type_trans(skb, rx_ring->netdev); } +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, const unsigned int size) { @@ -8325,10 +8660,20 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { + struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0); +#endif while (likely(total_packets < budget)) { union e1000_adv_rx_desc *rx_desc; @@ -8355,13 +8700,38 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_buffer = igb_get_rx_buffer(rx_ring, size); /* retrieve a buffer from the ring */ - if (skb) + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - + igb_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); +#endif + skb = igb_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { + xdp_xmit |= xdp_res; + igb_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_packets++; + total_bytes += size; + } else if (skb) igb_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size); + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); else skb = igb_construct_skb(rx_ring, rx_buffer, - rx_desc, size); + &xdp, rx_desc); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -8401,6 +8771,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; + if (xdp_xmit & IGB_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + igb_xdp_ring_update_tail(tx_ring); + } + u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; @@ -8414,11 +8793,6 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) return total_packets; } -static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; -} - static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, struct igb_rx_buffer *bi) { @@ -8455,7 +8829,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = igb_rx_offset(rx_ring); - bi->pagecnt_bias = 1; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 2d566f3c827b..35baae900c1f 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -215,6 +215,8 @@ struct igc_adapter { spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ }; void igc_up(struct igc_adapter *adapter); @@ -548,6 +550,7 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index cc5a6cf531c7..fd37d2c203af 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -215,6 +215,11 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I225_K2: case IGC_DEV_ID_I225_LMVP: case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: case IGC_DEV_ID_I225_BLANK_NVM: mac->type = igc_i225; break; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index f1f464967f87..32f5fd684139 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -324,22 +324,10 @@ /* Advanced Receive Descriptor bit definitions */ #define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ -#define IGC_RXDEXT_STATERR_CE 0x01000000 -#define IGC_RXDEXT_STATERR_SE 0x02000000 -#define IGC_RXDEXT_STATERR_SEQ 0x04000000 -#define IGC_RXDEXT_STATERR_CXE 0x10000000 -#define IGC_RXDEXT_STATERR_TCPE 0x20000000 +#define IGC_RXDEXT_STATERR_L4E 0x20000000 #define IGC_RXDEXT_STATERR_IPE 0x40000000 #define IGC_RXDEXT_STATERR_RXE 0x80000000 -/* Same mask, but for extended and packet split descriptors */ -#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ - IGC_RXDEXT_STATERR_CE | \ - IGC_RXDEXT_STATERR_SE | \ - IGC_RXDEXT_STATERR_SEQ | \ - IGC_RXDEXT_STATERR_CXE | \ - IGC_RXDEXT_STATERR_RXE) - #define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 #define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 #define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 @@ -409,7 +397,7 @@ #define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ /* Time Sync Transmit Control bit definitions */ -#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ #define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ #define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ #define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 44410c2265d6..61d331ce38cd 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -321,6 +321,9 @@ static void igc_ethtool_get_regs(struct net_device *netdev, for (i = 0; i < 8; i++) regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; } static void igc_ethtool_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index b9fe51b91c47..55dae7c4703f 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -24,6 +24,11 @@ #define IGC_DEV_ID_I225_K2 0x3101 #define IGC_DEV_ID_I225_LMVP 0x5502 #define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F #define IGC_DEV_ID_I225_BLANK_NVM 0x15FD /* Function pointers for the MAC. */ @@ -125,9 +130,6 @@ struct igc_nvm_info { struct igc_nvm_operations ops; enum igc_nvm_type type; - u32 flash_bank_size; - u32 flash_base_addr; - u16 word_size; u16 delay_usec; u16 address_bits; @@ -153,7 +155,6 @@ struct igc_phy_info { u8 mdix; bool is_mdix; - bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; @@ -239,6 +240,8 @@ struct igc_hw_stats { u64 prc511; u64 prc1023; u64 prc1522; + u64 tlpic; + u64 rlpic; u64 gprc; u64 bprc; u64 mprc; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 3183150c7995..569747bbefd8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -47,6 +47,11 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, /* required last entry */ {0, } @@ -1428,7 +1433,7 @@ static void igc_rx_checksum(struct igc_ring *ring, /* TCP/UDP checksum error bit is set */ if (igc_test_staterr(rx_desc, - IGC_RXDEXT_STATERR_TCPE | + IGC_RXDEXT_STATERR_L4E | IGC_RXDEXT_STATERR_IPE)) { /* work around errata with sctp packets where the TCPE aka * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) @@ -1737,8 +1742,7 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (unlikely((igc_test_staterr(rx_desc, - IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) { + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { struct net_device *netdev = rx_ring->netdev; if (!(netdev->features & NETIF_F_RXALL)) { @@ -3679,6 +3683,8 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.prc511 += rd32(IGC_PRC511); adapter->stats.prc1023 += rd32(IGC_PRC1023); adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); mpc = rd32(IGC_MPC); adapter->stats.mpc += mpc; @@ -3772,6 +3778,8 @@ void igc_down(struct igc_adapter *adapter) set_bit(__IGC_DOWN, &adapter->state); + igc_ptp_suspend(adapter); + /* disable receives in the hardware */ rctl = rd32(IGC_RCTL); wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); @@ -4694,14 +4702,35 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, return 0; } -static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt) +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) { int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct timespec64 now; size_t n; if (qopt->cycle_time_extension) return false; + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + */ + if (!is_base_time_past(qopt->base_time, &now)) + return false; + for (n = 0; n < qopt->num_entries; n++) { const struct tc_taprio_sched_entry *e; int i; @@ -4756,7 +4785,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, if (adapter->base_time) return -EALREADY; - if (!validate_schedule(qopt)) + if (!validate_schedule(adapter, qopt)) return -EINVAL; adapter->cycle_time = qopt->cycle_time; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index e4b8f312f97c..ac0b9c85da7c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -8,6 +8,7 @@ #include <linux/pci.h> #include <linux/ptp_classify.h> #include <linux/clocksource.h> +#include <linux/ktime.h> #define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@ -16,17 +17,12 @@ #define IGC_PTP_TX_TIMEOUT (HZ * 15) /* SYSTIM read access for I225 */ -static void igc_ptp_read_i225(struct igc_adapter *adapter, - struct timespec64 *ts) +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) { struct igc_hw *hw = &adapter->hw; u32 sec, nsec; - /* The timestamp latches on lowest register read. For I210/I211, the - * lowest register is SYSTIMR. Since we only need to provide nanosecond - * resolution, we can ignore it. - */ - rd32(IGC_SYSTIMR); + /* The timestamp is latched when SYSTIML is read. */ nsec = rd32(IGC_SYSTIML); sec = rd32(IGC_SYSTIMH); @@ -39,9 +35,6 @@ static void igc_ptp_write_i225(struct igc_adapter *adapter, { struct igc_hw *hw = &adapter->hw; - /* Writing the SYSTIMR register is not necessary as it only - * provides sub-nanosecond resolution. - */ wr32(IGC_SYSTIML, ts->tv_nsec); wr32(IGC_SYSTIMH, ts->tv_sec); } @@ -81,7 +74,7 @@ static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) spin_lock_irqsave(&igc->tmreg_lock, flags); - igc_ptp_read_i225(igc, &now); + igc_ptp_read(igc, &now); now = timespec64_add(now, then); igc_ptp_write_i225(igc, (const struct timespec64 *)&now); @@ -102,10 +95,9 @@ static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, spin_lock_irqsave(&igc->tmreg_lock, flags); ptp_read_system_prets(sts); - rd32(IGC_SYSTIMR); - ptp_read_system_postts(sts); ts->tv_nsec = rd32(IGC_SYSTIML); ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); spin_unlock_irqrestore(&igc->tmreg_lock, flags); @@ -422,18 +414,11 @@ static void igc_ptp_tx_work(struct work_struct *work) if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) return; - if (time_is_before_jiffies(adapter->ptp_tx_start + - IGC_PTP_TX_TIMEOUT)) { - igc_ptp_tx_timeout(adapter); + tsynctxctl = rd32(IGC_TSYNCTXCTL); + if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0))) return; - } - tsynctxctl = rd32(IGC_TSYNCTXCTL); - if (tsynctxctl & IGC_TSYNCTXCTL_VALID) - igc_ptp_tx_hwtstamp(adapter); - else - /* reschedule to check later */ - schedule_work(&adapter->ptp_tx_work); + igc_ptp_tx_hwtstamp(adapter); } /** @@ -515,6 +500,9 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -526,6 +514,24 @@ void igc_ptp_init(struct igc_adapter *adapter) } } +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + /** * igc_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure @@ -542,6 +548,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter) dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + + igc_ptp_time_save(adapter); } /** @@ -591,9 +599,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) /* Re-initialize the timer. */ if (hw->mac.type == igc_i225) { - struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real()); - - igc_ptp_write_i225(adapter, &ts64); + igc_ptp_time_restore(adapter); } else { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); |