diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 122 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82598.c | 115 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82599.c | 1132 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 272 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.h | 8 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82599.c | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 907 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.c | 6 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.h | 1 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 686 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 1 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 168 |
12 files changed, 2928 insertions, 492 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 05a24055ac2f..cd22323cfd22 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -121,17 +121,18 @@ struct ixgbe_queue_stats { struct ixgbe_ring { void *desc; /* descriptor ring memory */ - dma_addr_t dma; /* phys. address of descriptor ring */ - unsigned int size; /* length in bytes */ - unsigned int count; /* amount of descriptors */ - unsigned int next_to_use; - unsigned int next_to_clean; - - int queue_index; /* needed for multiqueue queue management */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; + u8 atr_sample_rate; + u8 atr_count; + u16 count; /* amount of descriptors */ + u16 rx_buf_len; + u16 next_to_use; + u16 next_to_clean; + + u8 queue_index; /* needed for multiqueue queue management */ u16 head; u16 tail; @@ -139,23 +140,24 @@ struct ixgbe_ring { unsigned int total_bytes; unsigned int total_packets; - u16 reg_idx; /* holds the special value that gets the hardware register - * offset associated with this ring, which is different - * for DCB and RSS modes */ - #ifdef CONFIG_IXGBE_DCA /* cpu for tx queue */ int cpu; #endif - struct ixgbe_queue_stats stats; - u64 v_idx; /* maps directly to the index for this ring in the hardware - * vector array, can also be used for finding the bit in EICR - * and friends that represents the vector for this ring */ + u16 work_limit; /* max work per interrupt */ + u16 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ - u16 work_limit; /* max work per interrupt */ - u16 rx_buf_len; - u64 rsc_count; /* stat for coalesced packets */ + struct ixgbe_queue_stats stats; + unsigned long reinit_state; + u64 rsc_count; /* stat for coalesced packets */ + + unsigned int size; /* length in bytes */ + dma_addr_t dma; /* phys. address of descriptor ring */ }; enum ixgbe_ring_f_enum { @@ -163,6 +165,7 @@ enum ixgbe_ring_f_enum { RING_F_DCB, RING_F_VMDQ, RING_F_RSS, + RING_F_FDIR, #ifdef IXGBE_FCOE RING_F_FCOE, #endif /* IXGBE_FCOE */ @@ -173,6 +176,7 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_DCB_INDICES 8 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 16 +#define IXGBE_MAX_FDIR_INDICES 64 #ifdef IXGBE_FCOE #define IXGBE_MAX_FCOE_INDICES 8 #endif /* IXGBE_FCOE */ @@ -193,6 +197,9 @@ struct ixgbe_ring_feature { */ struct ixgbe_q_vector { struct ixgbe_adapter *adapter; + unsigned int v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ struct napi_struct napi; DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ @@ -201,7 +208,6 @@ struct ixgbe_q_vector { u8 tx_itr; u8 rx_itr; u32 eitr; - u32 v_idx; /* vector index in list */ }; /* Helper macros to switch between ints/sec and what the register uses. @@ -223,6 +229,10 @@ struct ixgbe_q_vector { #define IXGBE_TX_CTXTDESC_ADV(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) +#define IXGBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define IXGBE_TX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_tx_desc) +#define IXGBE_RX_DESC(R, i) IXGBE_GET_DESC(R, i, ixgbe_legacy_rx_desc) + #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 #ifdef IXGBE_FCOE /* Use 3K as the baby jumbo frame size for FCoE */ @@ -315,10 +325,13 @@ struct ixgbe_adapter { #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) -#define IXGBE_FLAG_RSC_CAPABLE (u32)(1 << 26) -#define IXGBE_FLAG_RSC_ENABLED (u32)(1 << 27) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) + u32 flags2; +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) @@ -327,6 +340,10 @@ struct ixgbe_adapter { struct pci_dev *pdev; struct net_device_stats net_stats; + u32 test_icr; + struct ixgbe_ring test_tx_ring; + struct ixgbe_ring test_rx_ring; + /* structs defined in ixgbe_hw.h */ struct ixgbe_hw hw; u16 msg_enable; @@ -349,6 +366,10 @@ struct ixgbe_adapter { struct timer_list sfp_timer; struct work_struct multispeed_fiber_task; struct work_struct sfp_config_module_task; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + struct work_struct fdir_reinit_task; #ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif /* IXGBE_FCOE */ @@ -361,6 +382,7 @@ enum ixbge_state_t { __IXGBE_TESTING, __IXGBE_RESETTING, __IXGBE_DOWN, + __IXGBE_FDIR_INIT_DONE, __IXGBE_SFP_MODULE_NOT_FOUND }; @@ -393,7 +415,63 @@ extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *) extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); -extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32); +extern void ixgbe_write_eitr(struct ixgbe_q_vector *); +extern int ethtool_ioctl(struct ifreq *ifr); +extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); +extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); +extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + struct ixgbe_atr_input *input, + u8 queue); +extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + struct ixgbe_atr_input *input, + u16 soft_id, + u8 queue); +extern u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key); +extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, + u16 vlan_id); +extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, + u32 src_addr); +extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, + u32 dst_addr); +extern s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, + u32 src_addr_1, u32 src_addr_2, + u32 src_addr_3, u32 src_addr_4); +extern s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, + u32 dst_addr_1, u32 dst_addr_2, + u32 dst_addr_3, u32 dst_addr_4); +extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, + u16 src_port); +extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, + u16 dst_port); +extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, + u16 flex_byte); +extern s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, + u8 vm_pool); +extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, + u8 l4type); +extern s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, + u16 *vlan_id); +extern s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, + u32 *src_addr); +extern s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, + u32 *dst_addr); +extern s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, + u32 *src_addr_1, u32 *src_addr_2, + u32 *src_addr_3, u32 *src_addr_4); +extern s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, + u32 *dst_addr_1, u32 *dst_addr_2, + u32 *dst_addr_3, u32 *dst_addr_4); +extern s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, + u16 *src_port); +extern s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, + u16 *dst_port); +extern s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, + u16 *flex_byte); +extern s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, + u8 *vm_pool); +extern s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, + u8 *l4type); #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 88e8350aa786..b9923047ce11 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -293,6 +293,17 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) u32 rmcs_reg; u32 reg; +#ifdef CONFIG_DCB + if (hw->fc.requested_mode == ixgbe_fc_pfc) + goto out; + +#endif /* CONFIG_DCB */ + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val) + goto out; + + /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); @@ -304,14 +315,20 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but + * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. +#ifdef CONFIG_DCB + * 4: Priority Flow Control is enabled. +#endif */ switch (hw->fc.current_mode) { case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ break; case ixgbe_fc_rx_pause: /* @@ -336,6 +353,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) fctrl_reg |= IXGBE_FCTRL_RFCE; rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = -IXGBE_ERR_CONFIG; @@ -343,7 +365,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) break; } - /* Enable 802.3x based flow control settings. */ + /* Set 802.3x based flow control settings. */ fctrl_reg |= IXGBE_FCTRL_DPF; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); @@ -377,79 +399,6 @@ out: } /** - * ixgbe_setup_fc_82598 - Configure flow control settings - * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) - * - * Configures the flow control settings based on SW configuration. This - * function is used for 802.3x flow control configuration only. - **/ -static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - s32 ret_val = 0; - ixgbe_link_speed speed; - bool link_up; - - /* Validate the packetbuf configuration */ - if (packetbuf_num < 0 || packetbuf_num > 7) { - hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" - " 0-7\n", packetbuf_num); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * Validate the water mark configuration. Zero water marks are invalid - * because it causes the controller to just blast out fc packets. - */ - if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - if (hw->fc.requested_mode != ixgbe_fc_none) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - } - - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause testing anomalies. - */ - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { - hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } - - /* - * 10gig parts do not have a word in the EEPROM to determine the - * default flow control setting, so we explicitly set it to full. - */ - if (hw->fc.requested_mode == ixgbe_fc_default) - hw->fc.requested_mode = ixgbe_fc_full; - - /* - * Save off the requested flow control mode for use later. Depending - * on the link partner's capabilities, we may or may not use this mode. - */ - - hw->fc.current_mode = hw->fc.requested_mode; - - /* Decide whether to use autoneg or not. */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber && - (speed == IXGBE_LINK_SPEED_1GB_FULL)) - ret_val = ixgbe_fc_autoneg(hw); - - if (ret_val) - goto out; - - ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num); - -out: - return ret_val; -} - -/** * ixgbe_setup_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure * @@ -488,13 +437,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) } } - /* - * We want to save off the original Flow Control configuration just in - * case we get disconnected and then reconnected into a different hub - * or switch with different Flow Control capabilities. - */ - ixgbe_setup_fc_82598(hw, 0); - /* Add delay to filter out noises during initial link setup */ msleep(50); @@ -581,6 +523,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_1GB_FULL; + /* if link is down, zero out the current_mode */ + if (*link_up == false) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = false; + } out: return 0; } @@ -1168,7 +1115,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, - .setup_fc = &ixgbe_setup_fc_82598, + .fc_enable = &ixgbe_fc_enable_82598, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 5d2783081a94..1984cab7d48b 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -71,10 +71,10 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); -s32 ixgbe_start_hw_rev_0_82599(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { @@ -122,10 +122,9 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); hw->eeprom.ops.read(hw, ++data_offset, &data_value); } - /* Now restart DSP */ - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102); - IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d); - IXGBE_WRITE_FLUSH(hw); + /* Now restart DSP by setting Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART)); /* Release the semaphore */ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); @@ -414,9 +413,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw) } } - /* Set up flow control */ - status = ixgbe_setup_fc_generic(hw, 0); - /* Add delay to filter out noises during initial link setup */ msleep(50); @@ -462,11 +458,31 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); bool link_up = false; bool negotiation; + int i; /* Mask off requested but non-supported speeds */ hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); speed &= phy_link_speed; + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + /* + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + */ + /* * Try each speed one by one, highest priority first. We do this in * software because 10gb fiber doesn't support speed autonegotiation. @@ -475,21 +491,52 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, speedcnt++; highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - /* Set hardware SDP's */ + /* If we already have link at this speed, just jump out */ + hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); + + if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - ixgbe_setup_mac_link_speed_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg, - autoneg_wait_to_complete); + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); - msleep(50); - - /* If we have link, just jump out */ - hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); - if (link_up) + status = ixgbe_setup_mac_link_speed_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != 0) goto out; + + /* Flap the tx laser if it has not already been done */ + if (hw->mac.autotry_restart) { + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + udelay(100); + + /* Enable tx laser; allow 2ms to light up per spec */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + msleep(2); + + hw->mac.autotry_restart = false; + } + + /* The controller may take up to 500ms at 10g to acquire link */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + hw->mac.ops.check_link(hw, &phy_link_speed, + &link_up, false); + if (link_up) + goto out; + } } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { @@ -497,16 +544,44 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - /* Set hardware SDP's */ + /* If we already have link at this speed, just jump out */ + hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); + + if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - ixgbe_setup_mac_link_speed_82599( - hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, - autoneg_wait_to_complete); + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); - msleep(50); + status = ixgbe_setup_mac_link_speed_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* Flap the tx laser if it has not already been done */ + if (hw->mac.autotry_restart) { + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + udelay(100); + + /* Enable tx laser; allow 2ms to light up per spec */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + msleep(2); + + hw->mac.autotry_restart = false; + } + + /* Wait for the link partner to also set speed */ + msleep(100); /* If we have link, just jump out */ hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); @@ -572,6 +647,11 @@ s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, else *speed = IXGBE_LINK_SPEED_100_FULL; + /* if link is down, zero out the current_mode */ + if (*link_up == false) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = false; + } return 0; } @@ -592,6 +672,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, s32 status = 0; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 start_autoc = autoc; u32 orig_autoc = 0; u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; @@ -604,6 +685,11 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); speed &= link_capabilities; + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) orig_autoc = hw->mac.orig_autoc; @@ -611,11 +697,9 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, orig_autoc = autoc; - if (speed == IXGBE_LINK_SPEED_UNKNOWN) { - status = IXGBE_ERR_LINK_SETUP; - } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || - link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); if (speed & IXGBE_LINK_SPEED_10GB_FULL) @@ -647,7 +731,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, } } - if (status == 0) { + if (autoc != start_autoc) { /* Restart link */ autoc |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); @@ -674,13 +758,11 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, } } - /* Set up flow control */ - status = ixgbe_setup_fc_generic(hw, 0); - /* Add delay to filter out noises during initial link setup */ msleep(50); } +out: return status; } @@ -1083,6 +1165,931 @@ s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw) } /** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + IXGBE_FDIRCMD_CMD_MASK)) + break; + udelay(10); + } + if (i >= IXGBE_FDIRCMD_CMD_POLL) { + hw_dbg(hw ,"Flow Director previous command isn't complete, " + "aborting table re-initialization. \n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + udelay(10); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return 0; +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @pballoc: which mode to allocate filters with + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) +{ + u32 fdirctrl = 0; + u32 pbsize; + int i; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * intialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + + /* Send interrupt when 64 filters are left */ + fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; + + /* Set the maximum length per hash bucket to 0xA filters */ + fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; + + switch (pballoc) { + case IXGBE_FDIR_PBALLOC_64K: + /* 8k - 1 signature filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + break; + case IXGBE_FDIR_PBALLOC_128K: + /* 16k - 1 signature filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + break; + case IXGBE_FDIR_PBALLOC_256K: + /* 32k - 1 signature filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + break; + default: + /* bad value */ + return IXGBE_ERR_CONFIG; + }; + + /* Move the flexible bytes to use the ethertype - shift 6 words */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); + + fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, + htonl(IXGBE_ATR_BUCKET_HASH_KEY)); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, + htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msleep(1); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + + return 0; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @pballoc: which mode to allocate filters with + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) +{ + u32 fdirctrl = 0; + u32 pbsize; + int i; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * intialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + + /* Send interrupt when 64 filters are left */ + fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; + + switch (pballoc) { + case IXGBE_FDIR_PBALLOC_64K: + /* 2k - 1 perfect filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + break; + case IXGBE_FDIR_PBALLOC_128K: + /* 4k - 1 perfect filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + break; + case IXGBE_FDIR_PBALLOC_256K: + /* 8k - 1 perfect filters */ + fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + break; + default: + /* bad value */ + return IXGBE_ERR_CONFIG; + }; + + /* Turn perfect match filtering on */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; + fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; + + /* Move the flexible bytes to use the ethertype - shift 6 words */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, + htonl(IXGBE_ATR_BUCKET_HASH_KEY)); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, + htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + + /* Set the maximum length per hash bucket to 0xA filters */ + fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msleep(1); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); + + return 0; +} + + +/** + * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR + * @stream: input bitstream to compute the hash on + * @key: 32-bit hash key + **/ +u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) +{ + /* + * The algorithm is as follows: + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] + * and A[n] x B[n] is bitwise AND between same length strings + * + * K[n] is 16 bits, defined as: + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] + * for n modulo 32 < 15, K[n] = + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] + * + * S[n] is 16 bits, defined as: + * for n >= 15, S[n] = S[n:n - 15] + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] + * + * To simplify for programming, the algorithm is implemented + * in software this way: + * + * Key[31:0], Stream[335:0] + * + * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times + * int_key[350:0] = tmp_key[351:1] + * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] + * + * hash[15:0] = 0; + * for (i = 0; i < 351; i++) { + * if (int_key[i]) + * hash ^= int_stream[(i + 15):i]; + * } + */ + + union { + u64 fill[6]; + u32 key[11]; + u8 key_stream[44]; + } tmp_key; + + u8 *stream = (u8 *)atr_input; + u8 int_key[44]; /* upper-most bit unused */ + u8 hash_str[46]; /* upper-most 2 bits unused */ + u16 hash_result = 0; + int i, j, k, h; + + /* + * Initialize the fill member to prevent warnings + * on some compilers + */ + tmp_key.fill[0] = 0; + + /* First load the temporary key stream */ + for (i = 0; i < 6; i++) { + u64 fillkey = ((u64)key << 32) | key; + tmp_key.fill[i] = fillkey; + } + + /* + * Set the interim key for the hashing. Bit 352 is unused, so we must + * shift and compensate when building the key. + */ + + int_key[0] = tmp_key.key_stream[0] >> 1; + for (i = 1, j = 0; i < 44; i++) { + unsigned int this_key = tmp_key.key_stream[j] << 7; + j++; + int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); + } + + /* + * Set the interim bit string for the hashing. Bits 368 and 367 are + * unused, so shift and compensate when building the string. + */ + hash_str[0] = (stream[40] & 0x7f) >> 1; + for (i = 1, j = 40; i < 46; i++) { + unsigned int this_str = stream[j] << 7; + j++; + if (j > 41) + j = 0; + hash_str[i] = (u8)(this_str | (stream[j] >> 1)); + } + + /* + * Now compute the hash. i is the index into hash_str, j is into our + * key stream, k is counting the number of bits, and h interates within + * each byte. + */ + for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { + for (h = 0; h < 8 && k < 351; h++, k++) { + if (int_key[j] & (1 << h)) { + /* + * Key bit is set, XOR in the current 16-bit + * string. Example of processing: + * h = 0, + * tmp = (hash_str[i - 2] & 0 << 16) | + * (hash_str[i - 1] & 0xff << 8) | + * (hash_str[i] & 0xff >> 0) + * So tmp = hash_str[15 + k:k], since the + * i + 2 clause rolls off the 16-bit value + * h = 7, + * tmp = (hash_str[i - 2] & 0x7f << 9) | + * (hash_str[i - 1] & 0xff << 1) | + * (hash_str[i] & 0x80 >> 7) + */ + int tmp = (hash_str[i] >> h); + tmp |= (hash_str[i - 1] << (8 - h)); + tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) + << (16 - h); + hash_result ^= (u16)tmp; + } + } + } + + return hash_result; +} + +/** + * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream + * @input: input stream to modify + * @vlan: the VLAN id to load + **/ +s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) +{ + input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; + input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address + * @input: input stream to modify + * @src_addr: the IP address to load + **/ +s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) +{ + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = + (src_addr >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = + (src_addr >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address + * @input: input stream to modify + * @dst_addr: the IP address to load + **/ +s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) +{ + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = + (dst_addr >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = + (dst_addr >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address + * @input: input stream to modify + * @src_addr_1: the first 4 bytes of the IP address to load + * @src_addr_2: the second 4 bytes of the IP address to load + * @src_addr_3: the third 4 bytes of the IP address to load + * @src_addr_4: the fourth 4 bytes of the IP address to load + **/ +s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, + u32 src_addr_1, u32 src_addr_2, + u32 src_addr_3, u32 src_addr_4) +{ + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = + (src_addr_4 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = + (src_addr_4 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; + + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = + (src_addr_3 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = + (src_addr_3 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; + + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = + (src_addr_2 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = + (src_addr_2 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; + + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = + (src_addr_1 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = + (src_addr_1 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; + + return 0; +} + +/** + * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address + * @input: input stream to modify + * @dst_addr_1: the first 4 bytes of the IP address to load + * @dst_addr_2: the second 4 bytes of the IP address to load + * @dst_addr_3: the third 4 bytes of the IP address to load + * @dst_addr_4: the fourth 4 bytes of the IP address to load + **/ +s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, + u32 dst_addr_1, u32 dst_addr_2, + u32 dst_addr_3, u32 dst_addr_4) +{ + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = + (dst_addr_4 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = + (dst_addr_4 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; + + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = + (dst_addr_3 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = + (dst_addr_3 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; + + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = + (dst_addr_2 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = + (dst_addr_2 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; + + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = + (dst_addr_1 >> 8) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = + (dst_addr_1 >> 16) & 0xff; + input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; + + return 0; +} + +/** + * ixgbe_atr_set_src_port_82599 - Sets the source port + * @input: input stream to modify + * @src_port: the source port to load + **/ +s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) +{ + input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; + input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_dst_port_82599 - Sets the destination port + * @input: input stream to modify + * @dst_port: the destination port to load + **/ +s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) +{ + input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; + input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes + * @input: input stream to modify + * @flex_bytes: the flexible bytes to load + **/ +s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) +{ + input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; + input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; + + return 0; +} + +/** + * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool + * @input: input stream to modify + * @vm_pool: the Virtual Machine pool to load + **/ +s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) +{ + input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; + + return 0; +} + +/** + * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type + * @input: input stream to modify + * @l4type: the layer 4 type value to load + **/ +s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) +{ + input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; + + return 0; +} + +/** + * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream + * @input: input stream to search + * @vlan: the VLAN id to load + **/ +s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) +{ + *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; + *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; + + return 0; +} + +/** + * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address + * @input: input stream to search + * @src_addr: the IP address to load + **/ +s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) +{ + *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; + *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; + + return 0; +} + +/** + * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address + * @input: input stream to search + * @dst_addr: the IP address to load + **/ +s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) +{ + *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; + *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; + + return 0; +} + +/** + * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address + * @input: input stream to search + * @src_addr_1: the first 4 bytes of the IP address to load + * @src_addr_2: the second 4 bytes of the IP address to load + * @src_addr_3: the third 4 bytes of the IP address to load + * @src_addr_4: the fourth 4 bytes of the IP address to load + **/ +s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, + u32 *src_addr_1, u32 *src_addr_2, + u32 *src_addr_3, u32 *src_addr_4) +{ + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; + *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; + + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; + *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; + + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; + *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; + + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; + *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; + + return 0; +} + +/** + * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address + * @input: input stream to search + * @dst_addr_1: the first 4 bytes of the IP address to load + * @dst_addr_2: the second 4 bytes of the IP address to load + * @dst_addr_3: the third 4 bytes of the IP address to load + * @dst_addr_4: the fourth 4 bytes of the IP address to load + **/ +s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, + u32 *dst_addr_1, u32 *dst_addr_2, + u32 *dst_addr_3, u32 *dst_addr_4) +{ + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; + *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; + + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; + *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; + + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; + *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; + + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; + *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; + + return 0; +} + +/** + * ixgbe_atr_get_src_port_82599 - Gets the source port + * @input: input stream to modify + * @src_port: the source port to load + * + * Even though the input is given in big-endian, the FDIRPORT registers + * expect the ports to be programmed in little-endian. Hence the need to swap + * endianness when retrieving the data. This can be confusing since the + * internal hash engine expects it to be big-endian. + **/ +s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) +{ + *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; + *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; + + return 0; +} + +/** + * ixgbe_atr_get_dst_port_82599 - Gets the destination port + * @input: input stream to modify + * @dst_port: the destination port to load + * + * Even though the input is given in big-endian, the FDIRPORT registers + * expect the ports to be programmed in little-endian. Hence the need to swap + * endianness when retrieving the data. This can be confusing since the + * internal hash engine expects it to be big-endian. + **/ +s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) +{ + *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; + *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; + + return 0; +} + +/** + * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes + * @input: input stream to modify + * @flex_bytes: the flexible bytes to load + **/ +s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) +{ + *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; + *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; + + return 0; +} + +/** + * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool + * @input: input stream to modify + * @vm_pool: the Virtual Machine pool to load + **/ +s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) +{ + *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; + + return 0; +} + +/** + * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type + * @input: input stream to modify + * @l4type: the layer 4 type value to load + **/ +s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) +{ + *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; + + return 0; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @stream: input bitstream + * @queue: queue index to direct traffic to + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + struct ixgbe_atr_input *input, + u8 queue) +{ + u64 fdirhashcmd; + u64 fdircmd; + u32 fdirhash; + u16 bucket_hash, sig_hash; + u8 l4type; + + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY); + + /* bucket_hash is only 15 bits */ + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* Get the l4type in order to program FDIRCMD properly */ + /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ + ixgbe_atr_get_l4type_82599(input, &l4type); + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; + + fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); + + switch (l4type & IXGBE_ATR_L4TYPE_MASK) { + case IXGBE_ATR_L4TYPE_TCP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; + break; + case IXGBE_ATR_L4TYPE_UDP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; + break; + case IXGBE_ATR_L4TYPE_SCTP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; + break; + default: + hw_dbg(hw, "Error on l4type input\n"); + return IXGBE_ERR_CONFIG; + } + + if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) + fdircmd |= IXGBE_FDIRCMD_IPV6; + + fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); + fdirhashcmd = ((fdircmd << 32) | fdirhash); + + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + return 0; +} + +/** + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @queue: queue index to direct traffic to + * + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. + **/ +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + struct ixgbe_atr_input *input, + u16 soft_id, + u8 queue) +{ + u32 fdircmd = 0; + u32 fdirhash; + u32 src_ipv4, dst_ipv4; + u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; + u16 src_port, dst_port, vlan_id, flex_bytes; + u16 bucket_hash; + u8 l4type; + + /* Get our input values */ + ixgbe_atr_get_l4type_82599(input, &l4type); + + /* + * Check l4type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (l4type & IXGBE_ATR_L4TYPE_MASK) { + case IXGBE_ATR_L4TYPE_TCP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; + break; + case IXGBE_ATR_L4TYPE_UDP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; + break; + case IXGBE_ATR_L4TYPE_SCTP: + fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; + break; + default: + hw_dbg(hw, "Error on l4type input\n"); + return IXGBE_ERR_CONFIG; + } + + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY); + + /* bucket_hash is only 15 bits */ + bucket_hash &= IXGBE_ATR_HASH_MASK; + + ixgbe_atr_get_vlan_id_82599(input, &vlan_id); + ixgbe_atr_get_src_port_82599(input, &src_port); + ixgbe_atr_get_dst_port_82599(input, &dst_port); + ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); + + fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; + + /* Now figure out if we're IPv4 or IPv6 */ + if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { + /* IPv6 */ + ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, + &src_ipv6_3, &src_ipv6_4); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); + /* The last 4 bytes is the same register as IPv4 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); + + fdircmd |= IXGBE_FDIRCMD_IPV6; + fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; + } else { + /* IPv4 */ + ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); + + } + + ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | + (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | + (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); + + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; + fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; + fdircmd |= IXGBE_FDIRCMD_LAST; + fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + return 0; +} +/** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: analog register to read @@ -1135,8 +2142,9 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { u32 q_num; + s32 ret_val; - ixgbe_start_hw_generic(hw); + ret_val = ixgbe_start_hw_generic(hw); /* Clear the rate limiters */ for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { @@ -1145,7 +2153,13 @@ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); - return 0; + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + if (ret_val == 0) + ret_val = ixgbe_verify_fw_version_82599(hw); + + return ret_val; } /** @@ -1397,6 +2411,54 @@ san_mac_addr_out: return 0; } +/** + * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 fw_version = 0; + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = 0; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ + hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ + hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset); + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ + hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), + &fw_version); + + if (fw_version > 0x5) + status = 0; + +fw_version_out: + return status; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -1432,7 +2494,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_82599, .set_vfta = &ixgbe_set_vfta_82599, - .setup_fc = &ixgbe_setup_fc_generic, + .fc_enable = &ixgbe_fc_enable_generic, .init_uta_tables = &ixgbe_init_uta_tables_82599, .setup_sfp = &ixgbe_setup_sfp_modules_82599, }; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 0cc3c47cb453..96a185953777 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -28,6 +28,8 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> +#include <linux/list.h> +#include <linux/netdevice.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -83,6 +85,9 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); + /* Setup flow control */ + ixgbe_setup_fc(hw, 0); + /* Clear adapter stopped flag */ hw->adapter_stopped = false; @@ -101,13 +106,17 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { + s32 status; + /* Reset the hardware */ - hw->mac.ops.reset_hw(hw); + status = hw->mac.ops.reset_hw(hw); - /* Start the HW */ - hw->mac.ops.start_hw(hw); + if (status == 0) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } - return 0; + return status; } /** @@ -1356,15 +1365,14 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) * Drivers using secondary unicast addresses must set user_set_promisc when * manually putting the device into promiscuous mode. **/ -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr next) +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, + struct list_head *uc_list) { - u8 *addr; u32 i; u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; u32 uc_addr_in_use; u32 fctrl; - u32 vmdq; + struct netdev_hw_addr *ha; /* * Clear accounting of old secondary address list, @@ -1382,10 +1390,9 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, } /* Add the new addresses */ - for (i = 0; i < addr_count; i++) { + list_for_each_entry(ha, uc_list, list) { hw_dbg(hw, " Adding the secondary addresses:\n"); - addr = next(hw, &addr_list, &vmdq); - ixgbe_add_uc_addr(hw, addr, vmdq); + ixgbe_add_uc_addr(hw, ha->addr, 0); } if (hw->addr_ctrl.overflow_promisc) { @@ -1577,17 +1584,16 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) } /** - * ixgbe_fc_enable - Enable flow control + * ixgbe_fc_enable_generic - Enable flow control * @hw: pointer to hardware structure * @packetbuf_num: packet buffer number (0-7) * * Enable flow control according to the current settings. **/ -s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) { s32 ret_val = 0; - u32 mflcn_reg; - u32 fccfg_reg; + u32 mflcn_reg, fccfg_reg; u32 reg; u32 rx_pba_size; @@ -1596,7 +1602,12 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) goto out; #endif /* CONFIG_DCB */ + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val) + goto out; + /* Disable any previous flow control settings */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); @@ -1616,7 +1627,10 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) */ switch (hw->fc.current_mode) { case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ break; case ixgbe_fc_rx_pause: /* @@ -1645,7 +1659,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) case ixgbe_fc_pfc: goto out; break; -#endif +#endif /* CONFIG_DCB */ default: hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = -IXGBE_ERR_CONFIG; @@ -1653,7 +1667,7 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) break; } - /* Enable 802.3x based flow control settings. */ + /* Set 802.3x based flow control settings. */ mflcn_reg |= IXGBE_MFLCN_DPF; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); @@ -1661,10 +1675,12 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) reg = IXGBE_READ_REG(hw, IXGBE_MTQC); /* Thresholds are different for link flow control when in DCB mode */ if (reg & IXGBE_MTQC_RT_ENA) { + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + /* Always disable XON for LFC when in DCB mode */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), 0); + reg = (rx_pba_size >> 5) & 0xFFE0; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg); - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); reg = (rx_pba_size >> 2) & 0xFFE0; if (hw->fc.current_mode & ixgbe_fc_tx_pause) reg |= IXGBE_FCRTH_FCEN; @@ -1709,100 +1725,41 @@ out: * ixgbe_fc_autoneg - Configure flow control * @hw: pointer to hardware structure * - * Negotiates flow control capabilities with link partner using autoneg and - * applies the results. + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. **/ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) { s32 ret_val = 0; - u32 i, reg, pcs_anadv_reg, pcs_lpab_reg; - - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + ixgbe_link_speed speed; + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + bool link_up; /* - * The possible values of fc.current_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * 4: Priority Flow Control is enabled. - * other: Invalid. + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - we don't have multispeed fiber, or if + * - we're not running at 1G, or if + * - link is not up, or if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + * + * Since we're being called from an LSC, link is already know to be up. + * So use link_up_wait_to_complete=false. */ - switch (hw->fc.current_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - reg |= (IXGBE_PCS1GANA_ASM_PAUSE); - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = -IXGBE_ERR_CONFIG; - goto out; - break; - } - - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - - /* Set PCS register for autoneg */ - /* Enable and restart autoneg */ - reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART; - - /* Disable AN timeout */ - if (hw->fc.strict_ieee) - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; - - hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - - /* See if autonegotiation has succeeded */ - hw->mac.autoneg_succeeded = 0; - for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { - msleep(10); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - if ((reg & (IXGBE_PCS1GLSTA_LINK_OK | - IXGBE_PCS1GLSTA_AN_COMPLETE)) == - (IXGBE_PCS1GLSTA_LINK_OK | - IXGBE_PCS1GLSTA_AN_COMPLETE)) { - if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT)) - hw->mac.autoneg_succeeded = 1; - break; - } - } - - if (!hw->mac.autoneg_succeeded) { - /* Autoneg failed to achieve a link, so we turn fc off */ - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); + hw->mac.ops.check_link(hw, &speed, &link_up, false); + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + + if (hw->fc.disable_fc_autoneg || + !hw->phy.multispeed_fiber || + (speed != IXGBE_LINK_SPEED_1GB_FULL) || + !link_up || + ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + hw_dbg(hw, "Autoneg FC was skipped.\n"); goto out; } @@ -1845,21 +1802,23 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) hw_dbg(hw, "Flow Control = NONE.\n"); } + /* Record that current_mode is the result of a successful autoneg */ + hw->fc.fc_was_autonegged = true; + out: return ret_val; } /** - * ixgbe_setup_fc_generic - Set up flow control + * ixgbe_setup_fc - Set up flow control * @hw: pointer to hardware structure * - * Sets up flow control. + * Called at init time to set up flow control. **/ -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) { s32 ret_val = 0; - ixgbe_link_speed speed; - bool link_up; + u32 reg; #ifdef CONFIG_DCB if (hw->fc.requested_mode == ixgbe_fc_pfc) { @@ -1881,16 +1840,14 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) * because it causes the controller to just blast out fc packets. */ if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { - if (hw->fc.requested_mode != ixgbe_fc_none) { - hw_dbg(hw, "Invalid water mark configuration\n"); - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; - goto out; - } + hw_dbg(hw, "Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; } /* * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause testing anomalies. + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict " @@ -1907,21 +1864,77 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) hw->fc.requested_mode = ixgbe_fc_full; /* - * Save off the requested flow control mode for use later. Depending - * on the link partner's capabilities, we may or may not use this mode. + * Set up the 1G flow control advertisement registers so the HW will be + * able to do fc autoneg once the cable is plugged in. If we end up + * using 10g instead, this is harmless. */ - hw->fc.current_mode = hw->fc.requested_mode; - - /* Decide whether to use autoneg or not. */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber && - (speed == IXGBE_LINK_SPEED_1GB_FULL)) - ret_val = ixgbe_fc_autoneg(hw); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - if (ret_val) + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB + * 4: Priority Flow Control is enabled. +#endif + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= (IXGBE_PCS1GANA_ASM_PAUSE); + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; +#ifdef CONFIG_DCB + case ixgbe_fc_pfc: + goto out; + break; +#endif /* CONFIG_DCB */ + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = -IXGBE_ERR_CONFIG; goto out; + break; + } - ret_val = ixgbe_fc_enable(hw, packetbuf_num); + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Enable and restart autoneg to inform the link partner */ + reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART; + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); out: return ret_val; @@ -2068,6 +2081,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { + autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); msleep(10); diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index dd260890ad0a..0d34d4d8244c 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -59,13 +59,13 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr func); -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, - u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, + struct list_head *uc_list); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num); -s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); s32 ixgbe_validate_mac_addr(u8 *mac_addr); diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index f4417fc3b0fd..589f62c7062a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -295,7 +295,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, /* If PFC is disabled globally then fall back to LFC. */ if (!dcb_config->pfc_mode_enable) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.setup_fc(hw, i); + hw->mac.ops.fc_enable(hw, i); goto out; } diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 35255b8e90b7..86f4f3e36f27 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -68,6 +68,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, {"hw_rsc_count", IXGBE_STAT(rsc_count)}, + {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, + {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, @@ -118,6 +120,13 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { IXGBE_PB_STATS_LEN + \ IXGBE_QUEUE_STATS_LEN) +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN + static int ixgbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { @@ -129,11 +138,12 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->supported = SUPPORTED_10000baseT_Full; ecmd->autoneg = AUTONEG_ENABLE; ecmd->transceiver = XCVR_EXTERNAL; - if (hw->phy.media_type == ixgbe_media_type_copper) { + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->mac.type == ixgbe_mac_82599EB)) { ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_TP | SUPPORTED_Autoneg); + SUPPORTED_Autoneg); - ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->advertising = ADVERTISED_Autoneg; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) @@ -148,7 +158,15 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->advertising |= (ADVERTISED_10000baseT_Full | ADVERTISED_1000baseT_Full); - ecmd->port = PORT_TP; + if (hw->phy.media_type == ixgbe_media_type_copper) { + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } } else if (hw->phy.media_type == ixgbe_media_type_backplane) { /* Set as FIBRE until SERDES defined in kernel */ switch (hw->device_id) { @@ -196,16 +214,10 @@ static int ixgbe_set_settings(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; - s32 err; + s32 err = 0; - switch (hw->phy.media_type) { - case ixgbe_media_type_fiber: - if ((ecmd->autoneg == AUTONEG_ENABLE) || - (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) - return -EINVAL; - /* in this case we currently only support 10Gb/FULL */ - break; - case ixgbe_media_type_copper: + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->mac.type == ixgbe_mac_82599EB)) { /* 10000/copper and 1000/copper must autoneg * this function does not support any duplex forcing, but can * limit the advertising of the adapter to only 10000 or 1000 */ @@ -221,20 +233,23 @@ static int ixgbe_set_settings(struct net_device *netdev, advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (old == advertised) - break; + return err; /* this sets the link speed and restarts auto-neg */ + hw->mac.autotry_restart = true; err = hw->mac.ops.setup_link_speed(hw, advertised, true, true); if (err) { DPRINTK(PROBE, INFO, "setup link failed with code %d\n", err); hw->mac.ops.setup_link_speed(hw, old, true, true); } - break; - default: - break; + } else { + /* in this case we currently only support 10Gb/FULL */ + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; } - return 0; + return err; } static void ixgbe_get_pauseparam(struct net_device *netdev, @@ -276,6 +291,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fc_info fc; #ifdef CONFIG_DCB if (adapter->dcb_cfg.pfc_mode_enable || @@ -284,26 +300,37 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, return -EINVAL; #endif + + fc = hw->fc; + if (pause->autoneg != AUTONEG_ENABLE) - hw->fc.disable_fc_autoneg = true; + fc.disable_fc_autoneg = true; else - hw->fc.disable_fc_autoneg = false; + fc.disable_fc_autoneg = false; if (pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = ixgbe_fc_full; + fc.requested_mode = ixgbe_fc_full; else if (pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = ixgbe_fc_rx_pause; + fc.requested_mode = ixgbe_fc_rx_pause; else if (!pause->rx_pause && pause->tx_pause) - hw->fc.requested_mode = ixgbe_fc_tx_pause; + fc.requested_mode = ixgbe_fc_tx_pause; else if (!pause->rx_pause && !pause->tx_pause) - hw->fc.requested_mode = ixgbe_fc_none; + fc.requested_mode = ixgbe_fc_none; else return -EINVAL; #ifdef CONFIG_DCB - adapter->last_lfc_mode = hw->fc.requested_mode; + adapter->last_lfc_mode = fc.requested_mode; #endif - hw->mac.ops.setup_fc(hw, 0); + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } return 0; } @@ -743,6 +770,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->fw_version, firmware_version, 32); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_stats = IXGBE_STATS_LEN; + drvinfo->testinfo_len = IXGBE_TEST_LEN; drvinfo->regdump_len = ixgbe_get_regs_len(netdev); } @@ -814,7 +842,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } goto err_setup; } - temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; } need_update = true; } @@ -844,7 +871,6 @@ static int ixgbe_set_ringparam(struct net_device *netdev, } goto err_setup; } - temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; } need_update = true; } @@ -884,6 +910,8 @@ err_setup: static int ixgbe_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; default: @@ -938,6 +966,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, int i; switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { memcpy(p, ixgbe_gstrings_stats[i].stat_string, @@ -975,6 +1007,815 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } } +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ixgbe_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default 82599 register test */ +static struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* default 82598 register test */ +static struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + /* RDH is read-only for 82598, only test RDT. */ + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +#define REG_PATTERN_TEST(R, M, W) \ +{ \ + u32 pat, val, before; \ + const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ + before = readl(adapter->hw.hw_addr + R); \ + writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ + val = readl(adapter->hw.hw_addr + R); \ + if (val != (_test[pat] & W & M)) { \ + DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ + "0x%08X expected 0x%08X\n", \ + R, val, (_test[pat] & W & M)); \ + *data = R; \ + writel(before, adapter->hw.hw_addr + R); \ + return 1; \ + } \ + writel(before, adapter->hw.hw_addr + R); \ + } \ +} + +#define REG_SET_AND_CHECK(R, M, W) \ +{ \ + u32 val, before; \ + before = readl(adapter->hw.hw_addr + R); \ + writel((W & M), (adapter->hw.hw_addr + R)); \ + val = readl(adapter->hw.hw_addr + R); \ + if ((W & M) != (val & M)) { \ + DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ + "expected 0x%08X\n", R, (val & M), (W & M)); \ + *data = R; \ + writel(before, (adapter->hw.hw_addr + R)); \ + return 1; \ + } \ + writel(before, (adapter->hw.hw_addr + R)); \ +} + +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + toggle = 0x7FFFF30F; + test = reg_test_82599; + } else { + toggle = 0x7FFFF3FF; + test = reg_test_82598; + } + + /* + * Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable on newer MACs. + */ + before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); + value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); + after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; + if (value != after) { + DPRINTK(DRV, ERR, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * 0x40)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + if (hw->eeprom.ops.validate_checksum(hw, NULL)) + *data = 1; + else + *data = 0; + return *data; +} + +static irqreturn_t ixgbe_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + + return IRQ_HANDLED; +} + +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + DPRINTK(HW, INFO, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + msleep(10); + + if (!(adapter->test_icr &mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg_ctl; + int i; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + reg_ctl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); + reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); + reg_ctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); + + /* now Tx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); + reg_ctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); + if (hw->mac.type == ixgbe_mac_82599EB) { + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg_ctl &= ~IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); + } + + ixgbe_reset(adapter); + + if (tx_ring->desc && tx_ring->tx_buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + struct ixgbe_tx_buffer *buf = + &(tx_ring->tx_buffer_info[i]); + if (buf->dma) + pci_unmap_single(pdev, buf->dma, buf->length, + PCI_DMA_TODEVICE); + if (buf->skb) + dev_kfree_skb(buf->skb); + } + } + + if (rx_ring->desc && rx_ring->rx_buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *buf = + &(rx_ring->rx_buffer_info[i]); + if (buf->dma) + pci_unmap_single(pdev, buf->dma, + IXGBE_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + if (buf->skb) + dev_kfree_skb(buf->skb); + } + } + + if (tx_ring->desc) { + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + kfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + return; +} + +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl, reg_data; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = IXGBE_DEFAULT_TXD; + + tx_ring->tx_buffer_info = kcalloc(tx_ring->count, + sizeof(struct ixgbe_tx_buffer), + GFP_KERNEL); + if (!(tx_ring->tx_buffer_info)) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, + &tx_ring->dma))) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = tx_ring->next_to_clean = 0; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0), + ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), + ((u64) tx_ring->dma >> 32)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0), + tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data |= IXGBE_HLREG0_TXPADEN; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); + + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); + reg_data |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); + } + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0)); + reg_data |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data); + + for (i = 0; i < tx_ring->count; i++) { + struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[i].length = skb->len; + tx_ring->tx_buffer_info[i].dma = + pci_map_single(pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma); + desc->lower.data = cpu_to_le32(skb->len); + desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP | + IXGBE_TXD_CMD_IFCS | + IXGBE_TXD_CMD_RS); + desc->upper.data = 0; + } + + /* Setup Rx Descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = IXGBE_DEFAULT_RXD; + + rx_ring->rx_buffer_info = kcalloc(rx_ring->count, + sizeof(struct ixgbe_rx_buffer), + GFP_KERNEL); + if (!(rx_ring->rx_buffer_info)) { + ret_val = 4; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, + &rx_ring->dma))) { + ret_val = 5; + goto err_nomem; + } + rx_ring->next_to_use = rx_ring->next_to_clean = 0; + + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0), + ((u64)rx_ring->dma & 0xFFFFFFFF)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0), + ((u64) rx_ring->dma >> 32)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL); +#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum + Threshold Size mask */ + reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL); +#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */ + reg_data &= ~IXGBE_MCSTCTRL_MO_MASK; + reg_data |= adapter->hw.mac.mc_filter_type; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0)); + reg_data |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + int j = adapter->rx_ring[0].reg_idx; + u32 k; + for (k = 0; k < 10; k++) { + if (IXGBE_READ_REG(&adapter->hw, + IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) + break; + else + msleep(1); + } + } + + rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_legacy_rx_desc *rx_desc = + IXGBE_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 6; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->rx_buffer_info[i].skb = skb; + rx_ring->rx_buffer_info[i].dma = + pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + rx_desc->buffer_addr = + cpu_to_le64(rx_ring->rx_buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + ixgbe_free_desc_rings(adapter); + return ret_val; +} + +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_data; + + /* right now we only support MAC loopback in the driver */ + + /* Setup MAC loopback */ + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data |= IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); + reg_data &= ~IXGBE_AUTOC_LMS_MASK; + reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); + + /* Disable Atlas Tx lanes; re-enabled in reset path */ + if (hw->mac.type == ixgbe_mac_82598EB) { + u8 atlas; + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); + } + + return 0; +} + +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) +{ + u32 reg_data; + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); +} + +static void ixgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int ixgbe_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { + for (i = 0; i < 64; i++) { + ixgbe_create_lbtest_frame( + tx_ring->tx_buffer_info[k].skb, + 1024); + pci_dma_sync_single_for_device(pdev, + tx_ring->tx_buffer_info[k].dma, + tx_ring->tx_buffer_info[k].length, + PCI_DMA_TODEVICE); + if (unlikely(++k == tx_ring->count)) + k = 0; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k); + msleep(200); + /* set the start time for the receive */ + time = jiffies; + good_cnt = 0; + do { + /* receive the sent packets */ + pci_dma_sync_single_for_cpu(pdev, + rx_ring->rx_buffer_info[l].dma, + IXGBE_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + ret_val = ixgbe_check_lbtest_frame( + rx_ring->rx_buffer_info[l].skb, 1024); + if (!ret_val) + good_cnt++; + if (++l == rx_ring->count) + l = 0; + /* + * time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && jiffies < (time + 20)); + if (good_cnt != 64) { + /* ret_val is the same as mis-compare */ + ret_val = 13; + break; + } + if (jiffies >= (time + 20)) { + /* Error code for time out error */ + ret_val = 14; + break; + } + } + + return ret_val; +} + +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) +{ + *data = ixgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ixgbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ixgbe_run_loopback_test(adapter); + ixgbe_loopback_cleanup(adapter); + +err_loopback: + ixgbe_free_desc_rings(adapter); +out: + return *data; +} + +static void ixgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + set_bit(__IXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + DPRINTK(HW, INFO, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbe_reset(adapter); + + DPRINTK(HW, INFO, "register testing starting\n"); + if (ixgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + DPRINTK(HW, INFO, "eeprom testing starting\n"); + if (ixgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + DPRINTK(HW, INFO, "interrupt testing starting\n"); + if (ixgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + DPRINTK(HW, INFO, "loopback testing starting\n"); + if (ixgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + + clear_bit(__IXGBE_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + DPRINTK(HW, INFO, "online testing starting\n"); + /* Online tests */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IXGBE_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, struct ethtool_wolinfo *wol) @@ -1146,8 +1987,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, else /* rx only or mixed */ q_vector->eitr = adapter->eitr_param; - ixgbe_write_eitr(adapter, i, - EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); + ixgbe_write_eitr(q_vector); } return 0; @@ -1159,13 +1999,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) ethtool_op_set_flags(netdev, data); - if (!(adapter->flags & IXGBE_FLAG_RSC_CAPABLE)) + if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) return 0; /* if state changes we need to update adapter->flags and reset */ if ((!!(data & ETH_FLAG_LRO)) != - (!!(adapter->flags & IXGBE_FLAG_RSC_ENABLED))) { - adapter->flags ^= IXGBE_FLAG_RSC_ENABLED; + (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { + adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; if (netif_running(netdev)) ixgbe_reinit_locked(adapter); else @@ -1201,6 +2041,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_msglevel = ixgbe_set_msglevel, .get_tso = ethtool_op_get_tso, .set_tso = ixgbe_set_tso, + .self_test = ixgbe_diag_test, .get_strings = ixgbe_get_strings, .phys_id = ixgbe_phys_id, .get_sset_count = ixgbe_get_sset_count, diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index d5939de8ba28..3c3bf1f07b81 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -280,7 +280,9 @@ out_noddp_unmap: * * This checks ddp status. * - * Returns : 0 for success and skb will not be delivered to ULD + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. */ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, @@ -334,6 +336,8 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, /* return 0 to bypass going to ULD for DDPed data */ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) rc = 0; + else + rc = ddp->len; } ddp_out: diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index b7f9b63aa49f..c5b50026a897 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -28,6 +28,7 @@ #ifndef _IXGBE_FCOE_H #define _IXGBE_FCOE_H +#include <scsi/fc/fc_fs.h> #include <scsi/fc/fc_fcoe.h> /* shift bits within STAT fo FCSTAT */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index dff1da8ae5c4..a551a96ce676 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "2.0.24-k2" +#define DRV_VERSION "2.0.34-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; @@ -186,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, } } +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + } else { + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + } +} + static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, struct ixgbe_tx_buffer *tx_buffer_info) @@ -248,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev); /** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure + * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean - * - * returns true if transmit work is done **/ -static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring) { + struct ixgbe_adapter *adapter = q_vector->adapter; struct net_device *netdev = adapter->netdev; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbe_tx_buffer *tx_buffer_info; @@ -278,12 +293,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, if (cleaned && skb) { unsigned int segs, bytecount; + unsigned int hlen = skb_headlen(skb); /* gso_segs is currently only valid for tcp */ segs = skb_shinfo(skb)->gso_segs ?: 1; +#ifdef IXGBE_FCOE + /* adjust for FCoE Sequence Offload */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + && (skb->protocol == htons(ETH_P_FCOE)) && + skb_is_gso(skb)) { + hlen = skb_transport_offset(skb) + + sizeof(struct fc_frame_header) + + sizeof(struct fcoe_crc_eof); + segs = DIV_ROUND_UP(skb->len - hlen, + skb_shinfo(skb)->gso_size); + } +#endif /* IXGBE_FCOE */ /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; + bytecount = ((segs - 1) * hlen) + skb->len; total_packets += segs; total_bytes += bytecount; } @@ -329,18 +356,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, } /* re-arm the interrupt */ - if (count >= tx_ring->work_limit) { - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, - tx_ring->v_idx); - else if (tx_ring->v_idx & 0xFFFFFFFF) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), - tx_ring->v_idx); - else - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), - (tx_ring->v_idx >> 32)); - } - + if (count >= tx_ring->work_limit) + ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; @@ -678,6 +695,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, bool cleaned = false; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef IXGBE_FCOE + int ddp_bytes = 0; +#endif /* IXGBE_FCOE */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); @@ -708,7 +728,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(skb->data - NET_IP_ALIGN); rx_buffer_info->skb = NULL; - if (len && !skb_shinfo(skb)->nr_frags) { + if (rx_buffer_info->dma) { pci_unmap_single(pdev, rx_buffer_info->dma, rx_ring->rx_buf_len, PCI_DMA_FROMDEVICE); @@ -743,7 +763,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(next_rxd); cleaned_count++; - if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE) + if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) rsc_count = ixgbe_get_rsc_count(rx_desc); if (rsc_count) { @@ -788,9 +808,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, skb->protocol = eth_type_trans(skb, adapter->netdev); #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb)) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + if (!ddp_bytes) goto next_desc; + } #endif /* IXGBE_FCOE */ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); @@ -816,6 +838,21 @@ next_desc: if (cleaned_count) ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); +#ifdef IXGBE_FCOE + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + unsigned int mss; + + mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); + } +#endif /* IXGBE_FCOE */ + rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; adapter->net_stats.rx_bytes += total_rx_bytes; @@ -875,12 +912,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) /* rx only */ q_vector->eitr = adapter->eitr_param; - /* - * since this is initial set up don't need to call - * ixgbe_write_eitr helper - */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), - EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); + ixgbe_write_eitr(q_vector); } if (adapter->hw.mac.type == ixgbe_mac_82598EB) @@ -965,17 +997,19 @@ update_itr_done: /** * ixgbe_write_eitr - write EITR register in hardware specific way - * @adapter: pointer to adapter struct - * @v_idx: vector index into q_vector array - * @itr_reg: new value to be written in *register* format, not ints/s + * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg) +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) { + struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { /* must write high and low 16 bits to reset counter */ itr_reg |= (itr_reg << 16); @@ -994,7 +1028,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) struct ixgbe_adapter *adapter = q_vector->adapter; u32 new_itr; u8 current_itr, ret_itr; - int i, r_idx, v_idx = q_vector->v_idx; + int i, r_idx; struct ixgbe_ring *rx_ring, *tx_ring; r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); @@ -1044,14 +1078,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) } if (new_itr != q_vector->eitr) { - u32 itr_reg; + /* do an exponential smoothing */ + new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); /* save the algorithm value here, not the smoothed one */ q_vector->eitr = new_itr; - /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); - itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); - ixgbe_write_eitr(adapter, v_idx, itr_reg); + + ixgbe_write_eitr(q_vector); } return; @@ -1122,14 +1155,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) if (hw->mac.type == ixgbe_mac_82598EB) ixgbe_check_fan_failure(adapter, eicr); - if (hw->mac.type == ixgbe_mac_82599EB) + if (hw->mac.type == ixgbe_mac_82599EB) { ixgbe_check_sfp_event(adapter, eicr); + + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int i; + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); + /* Disable transmits before FDIR Re-initialization */ + netif_tx_stop_all_queues(netdev); + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = + &adapter->tx_ring[i]; + if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, + &tx_ring->reinit_state)) + schedule_work(&adapter->fdir_reinit_task); + } + } + } if (!test_bit(__IXGBE_DOWN, &adapter->state)) IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); return IRQ_HANDLED; } +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + } else { + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); + } + /* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); + } else { + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); + } + /* skip the flush */ +} + static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) { struct ixgbe_q_vector *q_vector = data; @@ -1143,17 +1226,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_tx_dca(adapter, tx_ring); -#endif tx_ring->total_bytes = 0; tx_ring->total_packets = 0; - ixgbe_clean_tx_irq(adapter, tx_ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); } + /* disable interrupts on this vector only */ + ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); + napi_schedule(&q_vector->napi); + return IRQ_HANDLED; } @@ -1185,13 +1267,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); - else if (rx_ring->v_idx & 0xFFFFFFFF) - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx); - else - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), - (rx_ring->v_idx >> 32)); + ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -1199,27 +1275,38 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) { - ixgbe_msix_clean_rx(irq, data); - ixgbe_msix_clean_tx(irq, data); + struct ixgbe_q_vector *q_vector = data; + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int r_idx; + int i; - return IRQ_HANDLED; -} + if (!q_vector->txr_count && !q_vector->rxr_count) + return IRQ_HANDLED; -static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, - u64 qmask) -{ - u32 mask; + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->txr_count; i++) { + ring = &(adapter->tx_ring[r_idx]); + ring->total_bytes = 0; + ring->total_packets = 0; + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, + r_idx + 1); + } - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - } else { - mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); - mask = (qmask >> 32); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + ring = &(adapter->rx_ring[r_idx]); + ring->total_bytes = 0; + ring->total_packets = 0; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); } - /* skip the flush */ + + /* disable interrupts on this vector only */ + ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; } /** @@ -1254,29 +1341,42 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, rx_ring->v_idx); + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); } return work_done; } /** - * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine + * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean more than one rx queue associated with a * q_vector. **/ -static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) +static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *rx_ring = NULL; + struct ixgbe_ring *ring = NULL; int work_done = 0, i; long r_idx; - u64 enable_mask = 0; + bool tx_clean_complete = true; + + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->txr_count; i++) { + ring = &(adapter->tx_ring[r_idx]); +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_tx_dca(adapter, ring); +#endif + tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, + r_idx + 1); + } /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ @@ -1284,31 +1384,71 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) budget = max(budget, 1); r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); + ring = &(adapter->rx_ring[r_idx]); #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_rx_dca(adapter, rx_ring); + ixgbe_update_rx_dca(adapter, ring); #endif - ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - enable_mask |= rx_ring->v_idx; + ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx + 1); } r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); + ring = &(adapter->rx_ring[r_idx]); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { napi_complete(napi); if (adapter->itr_setting & 1) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, enable_mask); + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); return 0; } return work_done; } + +/** + * ixgbe_clean_txonly - msix (aka one shot) tx clean routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function is optimized for cleaning one queue only on a single + * q_vector!!! + **/ +static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *tx_ring = NULL; + int work_done = 0; + long r_idx; + + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + tx_ring = &(adapter->tx_ring[r_idx]); +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_tx_dca(adapter, tx_ring); +#endif + + if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) + work_done = budget; + + /* If all Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (adapter->itr_setting & 1) + ixgbe_set_itr_msix(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + } + + return work_done; +} + static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, int r_idx) { @@ -1316,7 +1456,6 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, set_bit(r_idx, q_vector->rxr_idx); q_vector->rxr_count++; - a->rx_ring[r_idx].v_idx = (u64)1 << v_idx; } static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, @@ -1326,7 +1465,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, set_bit(t_idx, q_vector->txr_idx); q_vector->txr_count++; - a->tx_ring[t_idx].v_idx = (u64)1 << v_idx; } /** @@ -1505,14 +1643,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) } if (new_itr != q_vector->eitr) { - u32 itr_reg; + /* do an exponential smoothing */ + new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); /* save the algorithm value here, not the smoothed one */ q_vector->eitr = new_itr; - /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); - itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); - ixgbe_write_eitr(adapter, 0, itr_reg); + + ixgbe_write_eitr(q_vector); } return; @@ -1534,6 +1671,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; } + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); ixgbe_irq_enable_queues(adapter, ~0); @@ -1879,7 +2019,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); } } else { - if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) && + if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && (netdev->mtu <= ETH_DATA_LEN)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else @@ -2008,7 +2148,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); } - if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) { + if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { /* Enable 82599 HW-RSC */ for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i].reg_idx; @@ -2181,11 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); /* reprogram secondary unicast list */ - addr_count = netdev->uc_count; - if (addr_count) - addr_list = netdev->uc_list->dmi_addr; - hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr); + hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list); /* reprogram multicast list */ addr_count = netdev->mc_count; @@ -2208,12 +2344,15 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) - continue; napi = &q_vector->napi; - if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && - (q_vector->rxr_count > 1)) - napi->poll = &ixgbe_clean_rxonly_many; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + if (!q_vector->rxr_count || !q_vector->txr_count) { + if (q_vector->txr_count == 1) + napi->poll = &ixgbe_clean_txonly; + else if (q_vector->rxr_count == 1) + napi->poll = &ixgbe_clean_rxonly; + } + } napi_enable(napi); } @@ -2231,8 +2370,6 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) - continue; napi_disable(&q_vector->napi); } } @@ -2290,6 +2427,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) static void ixgbe_configure(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; int i; ixgbe_set_rx_mode(netdev); @@ -2311,6 +2449,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_fcoe(adapter); #endif /* IXGBE_FCOE */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].atr_sample_rate = + adapter->atr_sample_rate; + ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); + } + ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) @@ -2567,6 +2714,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); } + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_FDIR_INIT_DONE, + &(adapter->tx_ring[i].reinit_state)); + /* enable transmits */ netif_tx_start_all_queues(netdev); @@ -2602,12 +2753,28 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) int err; err = hw->mac.ops.init_hw(hw); - if (err && (err != IXGBE_ERR_SFP_NOT_PRESENT)) - dev_err(&adapter->pdev->dev, "Hardware Error\n"); + switch (err) { + case 0: + case IXGBE_ERR_SFP_NOT_PRESENT: + break; + case IXGBE_ERR_MASTER_REQUESTS_PENDING: + dev_err(&adapter->pdev->dev, "master disable timed out\n"); + break; + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + dev_warn(&adapter->pdev->dev, "This device is a pre-production " + "adapter/LOM. Please be aware there may be issues " + "associated with your hardware. If you are " + "experiencing problems please contact your Intel or " + "hardware representative who provided you with this " + "hardware.\n"); + break; + default: + dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); + } /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - } /** @@ -2755,6 +2922,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->watchdog_task); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + cancel_work_sync(&adapter->fdir_reinit_task); + /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; @@ -2802,7 +2973,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) } #endif - tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); + tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); if (!tx_clean_complete) @@ -2889,6 +3060,38 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) return ret; } +/** + * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * @adapter: board private structure to initialize + * + * Flow Director is an advanced Rx filter, attempting to get Rx flows back + * to the original CPU that initiated the Tx session. This runs in addition + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the + * Rx load across CPUs using RSS. + * + **/ +static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; + + f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); + f_fdir->mask = 0; + + /* Flow Director must have RSS enabled */ + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && + ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { + adapter->num_tx_queues = f_fdir->indices; + adapter->num_rx_queues = f_fdir->indices; + ret = true; + } else { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + } + return ret; +} + #ifdef IXGBE_FCOE /** * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) @@ -2953,6 +3156,9 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) goto done; #endif + if (ixgbe_set_fdir_queues(adapter)) + goto done; + if (ixgbe_set_rss_queues(adapter)) goto done; @@ -3123,6 +3329,31 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) } #endif +/** + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * + **/ +static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +{ + int i; + bool ret = false; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && + ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].reg_idx = i; + ret = true; + } + + return ret; +} + #ifdef IXGBE_FCOE /** * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE @@ -3183,6 +3414,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) return; #endif + if (ixgbe_cache_ring_fdir(adapter)) + return; + if (ixgbe_cache_ring_rss(adapter)) return; } @@ -3276,6 +3510,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + adapter->atr_sample_rate = 0; ixgbe_set_num_queues(adapter); err = pci_enable_msi(adapter->pdev); @@ -3309,7 +3546,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; napi_vectors = adapter->num_rx_queues; - poll = &ixgbe_clean_rxonly; + poll = &ixgbe_clean_rxtx_many; } else { num_q_vectors = 1; napi_vectors = 1; @@ -3321,11 +3558,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) if (!q_vector) goto err_out; q_vector->adapter = adapter; - q_vector->v_idx = q_idx; q_vector->eitr = adapter->eitr_param; - if (q_idx < napi_vectors) - netif_napi_add(adapter->netdev, &q_vector->napi, - (*poll), 64); + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); adapter->q_vector[q_idx] = q_vector; } @@ -3353,22 +3588,16 @@ err_out: static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) { int q_idx, num_q_vectors; - int napi_vectors; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; - } else { + else num_q_vectors = 1; - napi_vectors = 1; - } for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; - adapter->q_vector[q_idx] = NULL; - if (q_idx < napi_vectors) - netif_napi_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); kfree(q_vector); } } @@ -3547,8 +3776,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; } else if (hw->mac.type == ixgbe_mac_82599EB) { adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; - adapter->flags |= IXGBE_FLAG_RSC_CAPABLE; - adapter->flags |= IXGBE_FLAG_RSC_ENABLED; + adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->ring_feature[RING_F_FDIR].indices = + IXGBE_MAX_FDIR_INDICES; + adapter->atr_sample_rate = 20; + adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; @@ -4138,6 +4372,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); #ifdef IXGBE_FCOE adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); @@ -4213,57 +4449,43 @@ static void ixgbe_watchdog(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; - /* Do the watchdog outside of interrupt context due to the lovely - * delays that some of the newer hardware requires */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - u64 eics = 0; - int i; + /* + * Do the watchdog outside of interrupt context due to the lovely + * delays that some of the newer hardware requires + */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) - eics |= ((u64)1 << i); + if (test_bit(__IXGBE_DOWN, &adapter->state)) + goto watchdog_short_circuit; - /* Cause software interrupt to ensure rx rings are cleaned */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics); - } else { - /* - * for legacy and MSI interrupts don't set any - * bits that are enabled for EIAM, because this - * operation would set *both* EIMS and EICS for - * any bit in EIAM - */ - IXGBE_WRITE_REG(hw, IXGBE_EICS, - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); - } - break; - case ixgbe_mac_82599EB: - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0), - (u32)(eics & 0xFFFFFFFF)); - IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1), - (u32)(eics >> 32)); - } else { - /* - * for legacy and MSI interrupts don't set any - * bits that are enabled for EIAM, because this - * operation would set *both* EIMS and EICS for - * any bit in EIAM - */ - IXGBE_WRITE_REG(hw, IXGBE_EICS, - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); - } - break; - default: - break; - } - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* + * for legacy and MSI interrupts don't set any bits + * that are enabled for EIAM, because this operation + * would set *both* EIMS and EICS for any bit in EIAM + */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + goto watchdog_reschedule; } + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rxr_count || qv->txr_count) + eics |= ((u64)1 << i); + } + + /* Cause software interrupt to ensure rx rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); + +watchdog_reschedule: + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); + +watchdog_short_circuit: schedule_work(&adapter->watchdog_task); } @@ -4317,6 +4539,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) } /** + * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_fdir_reinit_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + fdir_reinit_task); + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_FDIR_INIT_DONE, + &(adapter->tx_ring[i].reinit_state)); + } else { + DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters \n"); + } + /* Done FDIR Re-initialization, enable transmits */ + netif_tx_start_all_queues(adapter->netdev); +} + +/** * ixgbe_watchdog_task - worker thread to bring link up * @work: pointer to work_struct containing our data **/ @@ -4341,12 +4587,12 @@ static void ixgbe_watchdog_task(struct work_struct *work) #ifdef CONFIG_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.setup_fc(hw, i); + hw->mac.ops.fc_enable(hw, i); } else { - hw->mac.ops.setup_fc(hw, 0); + hw->mac.ops.fc_enable(hw, 0); } #else - hw->mac.ops.setup_fc(hw, 0); + hw->mac.ops.fc_enable(hw, 0); #endif } @@ -4623,7 +4869,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = map[0] + offset; + tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -4655,7 +4901,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = map[f + 1] + offset; + tx_buffer_info->dma = map[f] + offset; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -4743,6 +4989,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, writel(i, adapter->hw.hw_addr + tx_ring->tail); } +static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, + int queue, u32 tx_flags) +{ + /* Right now, we support IPv4 only */ + struct ixgbe_atr_input atr_input; + struct tcphdr *th; + struct udphdr *uh; + struct iphdr *iph = ip_hdr(skb); + struct ethhdr *eth = (struct ethhdr *)skb->data; + u16 vlan_id, src_port, dst_port, flex_bytes; + u32 src_ipv4_addr, dst_ipv4_addr; + u8 l4type = 0; + + /* check if we're UDP or TCP */ + if (iph->protocol == IPPROTO_TCP) { + th = tcp_hdr(skb); + src_port = th->source; + dst_port = th->dest; + l4type |= IXGBE_ATR_L4TYPE_TCP; + /* l4type IPv4 type is 0, no need to assign */ + } else if(iph->protocol == IPPROTO_UDP) { + uh = udp_hdr(skb); + src_port = uh->source; + dst_port = uh->dest; + l4type |= IXGBE_ATR_L4TYPE_UDP; + /* l4type IPv4 type is 0, no need to assign */ + } else { + /* Unsupported L4 header, just bail here */ + return; + } + + memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); + + vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> + IXGBE_TX_FLAGS_VLAN_SHIFT; + src_ipv4_addr = iph->saddr; + dst_ipv4_addr = iph->daddr; + flex_bytes = eth->h_proto; + + ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); + ixgbe_atr_set_src_port_82599(&atr_input, dst_port); + ixgbe_atr_set_dst_port_82599(&atr_input, src_port); + ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); + ixgbe_atr_set_l4type_82599(&atr_input, l4type); + /* src and dst are inverted, think how the receiver sees them */ + ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); + ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); +} + static int __ixgbe_maybe_stop_tx(struct net_device *netdev, struct ixgbe_ring *tx_ring, int size) { @@ -4777,6 +5075,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ixgbe_adapter *adapter = netdev_priv(dev); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + return smp_processor_id(); + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) return 0; /* All traffic should default to class 0 */ @@ -4861,9 +5162,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); if (count) { + /* add the ATR filter if ATR is on */ + if (tx_ring->atr_sample_rate) { + ++tx_ring->atr_count; + if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && + test_bit(__IXGBE_FDIR_INIT_DONE, + &tx_ring->reinit_state)) { + ixgbe_atr(adapter, skb, tx_ring->queue_index, + tx_flags); + tx_ring->atr_count = 0; + } + } ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, hdr_len); - netdev->trans_start = jiffies; ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); } else { @@ -5244,6 +5555,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_FCOE_CRC; netdev->features |= NETIF_F_FSO; netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + DPRINTK(DRV, INFO, "FCoE enabled, " + "disabling Flow Director\n"); + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags &= + ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + adapter->atr_sample_rate = 0; } else { adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; } @@ -5253,7 +5570,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) + if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; /* make sure the EEPROM is good */ @@ -5287,6 +5604,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, case IXGBE_DEV_ID_82599_KX4: adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | IXGBE_WUFC_MC | IXGBE_WUFC_BC); + /* Enable ACPI wakeup in GRC */ + IXGBE_WRITE_REG(hw, IXGBE_GRC, + (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME)); break; default: adapter->wol = 0; @@ -5329,8 +5649,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); /* reset the hardware with the new settings */ - hw->mac.ops.start_hw(hw); + err = hw->mac.ops.start_hw(hw); + if (err == IXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + dev_warn(&pdev->dev, "This device is a pre-production " + "adapter/LOM. Please be aware there may be issues " + "associated with your hardware. If you are " + "experiencing problems please contact your Intel or " + "hardware representative who provided you with this " + "hardware.\n"); + } strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) @@ -5339,6 +5668,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); + #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; @@ -5401,6 +5734,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->sfp_task); cancel_work_sync(&adapter->multispeed_fiber_task); cancel_work_sync(&adapter->sfp_config_module_task); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + cancel_work_sync(&adapter->fdir_reinit_task); flush_scheduled_work(); #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index e43d6248d7d4..453e966762f0 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -606,6 +606,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = true; /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index df1f7034c284..fa87309dc087 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -30,6 +30,7 @@ #include <linux/types.h> #include <linux/mdio.h> +#include <linux/list.h> /* Vendor ID */ #define IXGBE_INTEL_VENDOR_ID 0x8086 @@ -230,6 +231,34 @@ #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + /* Transmit DMA registers */ #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) @@ -1264,8 +1293,10 @@ #define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ /* ESDP Bit Masks */ -#define IXGBE_ESDP_SDP0 0x00000001 -#define IXGBE_ESDP_SDP1 0x00000002 +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ @@ -1365,8 +1396,6 @@ #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ -#define FIBER_LINK_UP_LIMIT 50 - /* PCS1GLSTA Bit Masks */ #define IXGBE_PCS1GLSTA_LINK_OK 1 #define IXGBE_PCS1GLSTA_SYNK_OK 0x10 @@ -1487,6 +1516,8 @@ #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 /* PCI Bus Info */ #define IXGBE_PCI_LINK_STATUS 0xB2 @@ -1651,6 +1682,9 @@ #define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ #define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ #define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ @@ -1783,6 +1817,82 @@ #endif +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_64K = 0, + IXGBE_FDIR_PBALLOC_128K, + IXGBE_FDIR_PBALLOC_256K, +}; +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L3P 0x00000008 +#define IXGBE_FDIRM_L4P 0x00000010 +#define IXGBE_FDIRM_FLEX 0x00000020 +#define IXGBE_FDIRM_DIPv6 0x00000040 + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 + /* Transmit Descriptor - Legacy */ struct ixgbe_legacy_tx_desc { u64 buffer_addr; /* Address of the descriptor's data buffer */ @@ -1956,6 +2066,45 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 + +/* Software ATR input stream offsets and masks */ +#define IXGBE_ATR_VLAN_OFFSET 0 +#define IXGBE_ATR_SRC_IPV6_OFFSET 2 +#define IXGBE_ATR_SRC_IPV4_OFFSET 14 +#define IXGBE_ATR_DST_IPV6_OFFSET 18 +#define IXGBE_ATR_DST_IPV4_OFFSET 30 +#define IXGBE_ATR_SRC_PORT_OFFSET 34 +#define IXGBE_ATR_DST_PORT_OFFSET 36 +#define IXGBE_ATR_FLEX_BYTE_OFFSET 38 +#define IXGBE_ATR_VM_POOL_OFFSET 40 +#define IXGBE_ATR_L4TYPE_OFFSET 41 + +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_HASH_MASK 0x7fff + +/* Flow Director ATR input struct. */ +struct ixgbe_atr_input { + /* Byte layout in order, all values with MSB first: + * + * vlan_id - 2 bytes + * src_ip - 16 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * vm_pool - 1 byte + * l4type - 1 byte + */ + u8 byte_stream[42]; +}; + enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, @@ -2091,7 +2240,8 @@ struct ixgbe_fc_info { u16 pause_time; /* Flow Control Pause timer */ bool send_xon; /* Flow control send XON */ bool strict_ieee; /* Strict IEEE mode */ - bool disable_fc_autoneg; /* Turn off autoneg FC mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ enum ixgbe_fc_mode current_mode; /* FC mode in effect */ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ }; @@ -2223,8 +2373,7 @@ struct ixgbe_mac_operations { s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, ixgbe_mc_addr_itr); s32 (*enable_mc)(struct ixgbe_hw *); @@ -2234,7 +2383,7 @@ struct ixgbe_mac_operations { s32 (*init_uta_tables)(struct ixgbe_hw *); /* Flow Control */ - s32 (*setup_fc)(struct ixgbe_hw *, s32); + s32 (*fc_enable)(struct ixgbe_hw *, s32); }; struct ixgbe_phy_operations { @@ -2281,6 +2430,7 @@ struct ixgbe_mac_info { bool orig_link_settings_stored; bool autoneg; bool autoneg_succeeded; + bool autotry_restart; }; struct ixgbe_phy_info { @@ -2346,6 +2496,8 @@ struct ixgbe_info { #define IXGBE_ERR_SFP_NOT_SUPPORTED -19 #define IXGBE_ERR_SFP_NOT_PRESENT -20 #define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #endif /* _IXGBE_TYPE_H_ */ |